VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 95466

Last change on this file since 95466 was 95421, checked in by vboxsync, 3 years ago

VMM/IEM: fld, fbld and fstp are 8 byte aligned as far as #AC is concerned on an 10980xe. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 451.2 KB
Line 
1/* $Id: IEMAll.cpp 95421 2022-06-29 02:41:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 */
75
76/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
77#ifdef _MSC_VER
78# pragma warning(disable:4505)
79#endif
80
81
82/*********************************************************************************************************************************
83* Header Files *
84*********************************************************************************************************************************/
85#define LOG_GROUP LOG_GROUP_IEM
86#define VMCPU_INCL_CPUM_GST_CTX
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/apic.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/nem.h>
96#include <VBox/vmm/gim.h>
97#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
98# include <VBox/vmm/em.h>
99# include <VBox/vmm/hm_svm.h>
100#endif
101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
102# include <VBox/vmm/hmvmxinline.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#include "IEMInternal.h"
108#include <VBox/vmm/vmcc.h>
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <VBox/param.h>
112#include <VBox/dis.h>
113#include <VBox/disopcode.h>
114#include <iprt/asm-math.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119#include "IEMInline.h"
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/**
126 * CPU exception classes.
127 */
128typedef enum IEMXCPTCLASS
129{
130 IEMXCPTCLASS_BENIGN,
131 IEMXCPTCLASS_CONTRIBUTORY,
132 IEMXCPTCLASS_PAGE_FAULT,
133 IEMXCPTCLASS_DOUBLE_FAULT
134} IEMXCPTCLASS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140#if defined(IEM_LOG_MEMORY_WRITES)
141/** What IEM just wrote. */
142uint8_t g_abIemWrote[256];
143/** How much IEM just wrote. */
144size_t g_cbIemWrote;
145#endif
146
147
148/*********************************************************************************************************************************
149* Internal Functions *
150*********************************************************************************************************************************/
151static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
152 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
153
154
155/**
156 * Initializes the decoder state.
157 *
158 * iemReInitDecoder is mostly a copy of this function.
159 *
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param fBypassHandlers Whether to bypass access handlers.
163 * @param fDisregardLock Whether to disregard the LOCK prefix.
164 */
165DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
166{
167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
168 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
177
178 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
179 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
180 pVCpu->iem.s.enmCpuMode = enmMode;
181 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
182 pVCpu->iem.s.enmEffAddrMode = enmMode;
183 if (enmMode != IEMMODE_64BIT)
184 {
185 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
186 pVCpu->iem.s.enmEffOpSize = enmMode;
187 }
188 else
189 {
190 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
191 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
192 }
193 pVCpu->iem.s.fPrefixes = 0;
194 pVCpu->iem.s.uRexReg = 0;
195 pVCpu->iem.s.uRexB = 0;
196 pVCpu->iem.s.uRexIndex = 0;
197 pVCpu->iem.s.idxPrefix = 0;
198 pVCpu->iem.s.uVex3rdReg = 0;
199 pVCpu->iem.s.uVexLength = 0;
200 pVCpu->iem.s.fEvexStuff = 0;
201 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
202#ifdef IEM_WITH_CODE_TLB
203 pVCpu->iem.s.pbInstrBuf = NULL;
204 pVCpu->iem.s.offInstrNextByte = 0;
205 pVCpu->iem.s.offCurInstrStart = 0;
206# ifdef VBOX_STRICT
207 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
208 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
209 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
210# endif
211#else
212 pVCpu->iem.s.offOpcode = 0;
213 pVCpu->iem.s.cbOpcode = 0;
214#endif
215 pVCpu->iem.s.offModRm = 0;
216 pVCpu->iem.s.cActiveMappings = 0;
217 pVCpu->iem.s.iNextMapping = 0;
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
220 pVCpu->iem.s.fDisregardLock = fDisregardLock;
221
222#ifdef DBGFTRACE_ENABLED
223 switch (enmMode)
224 {
225 case IEMMODE_64BIT:
226 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
227 break;
228 case IEMMODE_32BIT:
229 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
230 break;
231 case IEMMODE_16BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
233 break;
234 }
235#endif
236}
237
238
239/**
240 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
241 *
242 * This is mostly a copy of iemInitDecoder.
243 *
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 */
246DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
247{
248 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
257
258 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
259 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
260 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
261 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
262 pVCpu->iem.s.enmEffAddrMode = enmMode;
263 if (enmMode != IEMMODE_64BIT)
264 {
265 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffOpSize = enmMode;
267 }
268 else
269 {
270 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
271 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
272 }
273 pVCpu->iem.s.fPrefixes = 0;
274 pVCpu->iem.s.uRexReg = 0;
275 pVCpu->iem.s.uRexB = 0;
276 pVCpu->iem.s.uRexIndex = 0;
277 pVCpu->iem.s.idxPrefix = 0;
278 pVCpu->iem.s.uVex3rdReg = 0;
279 pVCpu->iem.s.uVexLength = 0;
280 pVCpu->iem.s.fEvexStuff = 0;
281 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
282#ifdef IEM_WITH_CODE_TLB
283 if (pVCpu->iem.s.pbInstrBuf)
284 {
285 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
286 - pVCpu->iem.s.uInstrBufPc;
287 if (off < pVCpu->iem.s.cbInstrBufTotal)
288 {
289 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
290 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
291 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
292 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
293 else
294 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
295 }
296 else
297 {
298 pVCpu->iem.s.pbInstrBuf = NULL;
299 pVCpu->iem.s.offInstrNextByte = 0;
300 pVCpu->iem.s.offCurInstrStart = 0;
301 pVCpu->iem.s.cbInstrBuf = 0;
302 pVCpu->iem.s.cbInstrBufTotal = 0;
303 }
304 }
305 else
306 {
307 pVCpu->iem.s.offInstrNextByte = 0;
308 pVCpu->iem.s.offCurInstrStart = 0;
309 pVCpu->iem.s.cbInstrBuf = 0;
310 pVCpu->iem.s.cbInstrBufTotal = 0;
311 }
312#else
313 pVCpu->iem.s.cbOpcode = 0;
314 pVCpu->iem.s.offOpcode = 0;
315#endif
316 pVCpu->iem.s.offModRm = 0;
317 Assert(pVCpu->iem.s.cActiveMappings == 0);
318 pVCpu->iem.s.iNextMapping = 0;
319 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
320 Assert(pVCpu->iem.s.fBypassHandlers == false);
321
322#ifdef DBGFTRACE_ENABLED
323 switch (enmMode)
324 {
325 case IEMMODE_64BIT:
326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
327 break;
328 case IEMMODE_32BIT:
329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
330 break;
331 case IEMMODE_16BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
333 break;
334 }
335#endif
336}
337
338
339
340/**
341 * Prefetch opcodes the first time when starting executing.
342 *
343 * @returns Strict VBox status code.
344 * @param pVCpu The cross context virtual CPU structure of the
345 * calling thread.
346 * @param fBypassHandlers Whether to bypass access handlers.
347 * @param fDisregardLock Whether to disregard LOCK prefixes.
348 *
349 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
350 * store them as such.
351 */
352static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
353{
354 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
355
356#ifdef IEM_WITH_CODE_TLB
357 /** @todo Do ITLB lookup here. */
358
359#else /* !IEM_WITH_CODE_TLB */
360
361 /*
362 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
363 *
364 * First translate CS:rIP to a physical address.
365 */
366 uint32_t cbToTryRead;
367 RTGCPTR GCPtrPC;
368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
369 {
370 cbToTryRead = GUEST_PAGE_SIZE;
371 GCPtrPC = pVCpu->cpum.GstCtx.rip;
372 if (IEM_IS_CANONICAL(GCPtrPC))
373 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
374 else
375 return iemRaiseGeneralProtectionFault0(pVCpu);
376 }
377 else
378 {
379 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
381 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
382 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
383 else
384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
385 if (cbToTryRead) { /* likely */ }
386 else /* overflowed */
387 {
388 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
389 cbToTryRead = UINT32_MAX;
390 }
391 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
392 Assert(GCPtrPC <= UINT32_MAX);
393 }
394
395 PGMPTWALK Walk;
396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
397 if (RT_SUCCESS(rc))
398 Assert(Walk.fSucceeded); /* probable. */
399 else
400 {
401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
405#endif
406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
407 }
408 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
409 else
410 {
411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
413 if (Walk.fFailed & PGM_WALKFAIL_EPT)
414 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
415#endif
416 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
417 }
418 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
419 else
420 {
421 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
422#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
423 if (Walk.fFailed & PGM_WALKFAIL_EPT)
424 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
425#endif
426 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
427 }
428 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
429 /** @todo Check reserved bits and such stuff. PGM is better at doing
430 * that, so do it when implementing the guest virtual address
431 * TLB... */
432
433 /*
434 * Read the bytes at this address.
435 */
436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
437 if (cbToTryRead > cbLeftOnPage)
438 cbToTryRead = cbLeftOnPage;
439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
441
442 if (!pVCpu->iem.s.fBypassHandlers)
443 {
444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
446 { /* likely */ }
447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
448 {
449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
452 }
453 else
454 {
455 Log((RT_SUCCESS(rcStrict)
456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
459 return rcStrict;
460 }
461 }
462 else
463 {
464 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
465 if (RT_SUCCESS(rc))
466 { /* likely */ }
467 else
468 {
469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
470 GCPtrPC, GCPhys, rc, cbToTryRead));
471 return rc;
472 }
473 }
474 pVCpu->iem.s.cbOpcode = cbToTryRead;
475#endif /* !IEM_WITH_CODE_TLB */
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Invalidates the IEM TLBs.
482 *
483 * This is called internally as well as by PGM when moving GC mappings.
484 *
485 * @returns
486 * @param pVCpu The cross context virtual CPU structure of the calling
487 * thread.
488 */
489VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
490{
491#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
492 Log10(("IEMTlbInvalidateAll\n"));
493# ifdef IEM_WITH_CODE_TLB
494 pVCpu->iem.s.cbInstrBufTotal = 0;
495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
497 { /* very likely */ }
498 else
499 {
500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
502 while (i-- > 0)
503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
504 }
505# endif
506
507# ifdef IEM_WITH_DATA_TLB
508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
510 { /* very likely */ }
511 else
512 {
513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
515 while (i-- > 0)
516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
517 }
518# endif
519#else
520 RT_NOREF(pVCpu);
521#endif
522}
523
524
525/**
526 * Invalidates a page in the TLBs.
527 *
528 * @param pVCpu The cross context virtual CPU structure of the calling
529 * thread.
530 * @param GCPtr The address of the page to invalidate
531 * @thread EMT(pVCpu)
532 */
533VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
534{
535#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
536 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
537 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
538 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
539
540# ifdef IEM_WITH_CODE_TLB
541 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
542 {
543 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
544 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
545 pVCpu->iem.s.cbInstrBufTotal = 0;
546 }
547# endif
548
549# ifdef IEM_WITH_DATA_TLB
550 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
551 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
552# endif
553#else
554 NOREF(pVCpu); NOREF(GCPtr);
555#endif
556}
557
558
559#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
560/**
561 * Invalid both TLBs slow fashion following a rollover.
562 *
563 * Worker for IEMTlbInvalidateAllPhysical,
564 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
565 * iemMemMapJmp and others.
566 *
567 * @thread EMT(pVCpu)
568 */
569static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
570{
571 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
572 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
573 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
574
575 unsigned i;
576# ifdef IEM_WITH_CODE_TLB
577 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
578 while (i-- > 0)
579 {
580 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
581 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
582 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
583 }
584# endif
585# ifdef IEM_WITH_DATA_TLB
586 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
587 while (i-- > 0)
588 {
589 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
590 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
591 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
592 }
593# endif
594
595}
596#endif
597
598
599/**
600 * Invalidates the host physical aspects of the IEM TLBs.
601 *
602 * This is called internally as well as by PGM when moving GC mappings.
603 *
604 * @param pVCpu The cross context virtual CPU structure of the calling
605 * thread.
606 * @note Currently not used.
607 */
608VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
609{
610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
611 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
612 Log10(("IEMTlbInvalidateAllPhysical\n"));
613
614# ifdef IEM_WITH_CODE_TLB
615 pVCpu->iem.s.cbInstrBufTotal = 0;
616# endif
617 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
618 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
619 {
620 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
621 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
622 }
623 else
624 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
625#else
626 NOREF(pVCpu);
627#endif
628}
629
630
631/**
632 * Invalidates the host physical aspects of the IEM TLBs.
633 *
634 * This is called internally as well as by PGM when moving GC mappings.
635 *
636 * @param pVM The cross context VM structure.
637 * @param idCpuCaller The ID of the calling EMT if available to the caller,
638 * otherwise NIL_VMCPUID.
639 *
640 * @remarks Caller holds the PGM lock.
641 */
642VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
643{
644#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
645 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
646 if (pVCpuCaller)
647 VMCPU_ASSERT_EMT(pVCpuCaller);
648 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
649
650 VMCC_FOR_EACH_VMCPU(pVM)
651 {
652# ifdef IEM_WITH_CODE_TLB
653 if (pVCpuCaller == pVCpu)
654 pVCpu->iem.s.cbInstrBufTotal = 0;
655# endif
656
657 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
658 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
659 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
660 { /* likely */}
661 else if (pVCpuCaller == pVCpu)
662 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
663 else
664 {
665 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
666 continue;
667 }
668 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
669 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
670 }
671 VMCC_FOR_EACH_VMCPU_END(pVM);
672
673#else
674 RT_NOREF(pVM, idCpuCaller);
675#endif
676}
677
678#ifdef IEM_WITH_CODE_TLB
679
680/**
681 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
682 * failure and jumps.
683 *
684 * We end up here for a number of reasons:
685 * - pbInstrBuf isn't yet initialized.
686 * - Advancing beyond the buffer boundrary (e.g. cross page).
687 * - Advancing beyond the CS segment limit.
688 * - Fetching from non-mappable page (e.g. MMIO).
689 *
690 * @param pVCpu The cross context virtual CPU structure of the
691 * calling thread.
692 * @param pvDst Where to return the bytes.
693 * @param cbDst Number of bytes to read.
694 *
695 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
696 */
697void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
698{
699#ifdef IN_RING3
700 for (;;)
701 {
702 Assert(cbDst <= 8);
703 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
704
705 /*
706 * We might have a partial buffer match, deal with that first to make the
707 * rest simpler. This is the first part of the cross page/buffer case.
708 */
709 if (pVCpu->iem.s.pbInstrBuf != NULL)
710 {
711 if (offBuf < pVCpu->iem.s.cbInstrBuf)
712 {
713 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
714 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
715 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
716
717 cbDst -= cbCopy;
718 pvDst = (uint8_t *)pvDst + cbCopy;
719 offBuf += cbCopy;
720 pVCpu->iem.s.offInstrNextByte += offBuf;
721 }
722 }
723
724 /*
725 * Check segment limit, figuring how much we're allowed to access at this point.
726 *
727 * We will fault immediately if RIP is past the segment limit / in non-canonical
728 * territory. If we do continue, there are one or more bytes to read before we
729 * end up in trouble and we need to do that first before faulting.
730 */
731 RTGCPTR GCPtrFirst;
732 uint32_t cbMaxRead;
733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
734 {
735 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
736 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
737 { /* likely */ }
738 else
739 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
740 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
741 }
742 else
743 {
744 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
745 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
746 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
747 { /* likely */ }
748 else
749 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
750 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
751 if (cbMaxRead != 0)
752 { /* likely */ }
753 else
754 {
755 /* Overflowed because address is 0 and limit is max. */
756 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
757 cbMaxRead = X86_PAGE_SIZE;
758 }
759 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
760 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
761 if (cbMaxRead2 < cbMaxRead)
762 cbMaxRead = cbMaxRead2;
763 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
764 }
765
766 /*
767 * Get the TLB entry for this piece of code.
768 */
769 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
770 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
771 if (pTlbe->uTag == uTag)
772 {
773 /* likely when executing lots of code, otherwise unlikely */
774# ifdef VBOX_WITH_STATISTICS
775 pVCpu->iem.s.CodeTlb.cTlbHits++;
776# endif
777 }
778 else
779 {
780 pVCpu->iem.s.CodeTlb.cTlbMisses++;
781 PGMPTWALK Walk;
782 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
783 if (RT_FAILURE(rc))
784 {
785#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
786 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
787 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
788#endif
789 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
791 }
792
793 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
794 Assert(Walk.fSucceeded);
795 pTlbe->uTag = uTag;
796 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
797 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
798 pTlbe->GCPhys = Walk.GCPhys;
799 pTlbe->pbMappingR3 = NULL;
800 }
801
802 /*
803 * Check TLB page table level access flags.
804 */
805 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
806 {
807 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
808 {
809 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
810 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
811 }
812 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
813 {
814 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
815 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
816 }
817 }
818
819 /*
820 * Look up the physical page info if necessary.
821 */
822 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
823 { /* not necessary */ }
824 else
825 {
826 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
827 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
828 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
829 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
830 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
831 { /* likely */ }
832 else
833 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
834 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
835 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
836 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
837 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
838 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
839 }
840
841# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
842 /*
843 * Try do a direct read using the pbMappingR3 pointer.
844 */
845 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
846 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
847 {
848 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
849 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
850 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
851 {
852 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
853 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
854 }
855 else
856 {
857 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
858 Assert(cbInstr < cbMaxRead);
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
860 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
861 }
862 if (cbDst <= cbMaxRead)
863 {
864 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
865 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
866 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
867 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
868 return;
869 }
870 pVCpu->iem.s.pbInstrBuf = NULL;
871
872 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
873 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
874 }
875 else
876# endif
877#if 0
878 /*
879 * If there is no special read handling, so we can read a bit more and
880 * put it in the prefetch buffer.
881 */
882 if ( cbDst < cbMaxRead
883 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
884 {
885 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
886 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
888 { /* likely */ }
889 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
890 {
891 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
894 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
895 }
896 else
897 {
898 Log((RT_SUCCESS(rcStrict)
899 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
900 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
901 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
903 }
904 }
905 /*
906 * Special read handling, so only read exactly what's needed.
907 * This is a highly unlikely scenario.
908 */
909 else
910#endif
911 {
912 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
913 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
914 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
915 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
916 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
917 { /* likely */ }
918 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
919 {
920 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
922 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
923 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
924 }
925 else
926 {
927 Log((RT_SUCCESS(rcStrict)
928 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
929 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
930 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
931 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
932 }
933 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
934 if (cbToRead == cbDst)
935 return;
936 }
937
938 /*
939 * More to read, loop.
940 */
941 cbDst -= cbMaxRead;
942 pvDst = (uint8_t *)pvDst + cbMaxRead;
943 }
944#else
945 RT_NOREF(pvDst, cbDst);
946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
947#endif
948}
949
950#else
951
952/**
953 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
954 * exception if it fails.
955 *
956 * @returns Strict VBox status code.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling thread.
959 * @param cbMin The minimum number of bytes relative offOpcode
960 * that must be read.
961 */
962VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
963{
964 /*
965 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
966 *
967 * First translate CS:rIP to a physical address.
968 */
969 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
970 uint32_t cbToTryRead;
971 RTGCPTR GCPtrNext;
972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
973 {
974 cbToTryRead = GUEST_PAGE_SIZE;
975 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
976 if (!IEM_IS_CANONICAL(GCPtrNext))
977 return iemRaiseGeneralProtectionFault0(pVCpu);
978 }
979 else
980 {
981 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
982 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
983 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
984 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
985 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
986 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
987 if (!cbToTryRead) /* overflowed */
988 {
989 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
990 cbToTryRead = UINT32_MAX;
991 /** @todo check out wrapping around the code segment. */
992 }
993 if (cbToTryRead < cbMin - cbLeft)
994 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
995 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
996 }
997
998 /* Only read up to the end of the page, and make sure we don't read more
999 than the opcode buffer can hold. */
1000 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1001 if (cbToTryRead > cbLeftOnPage)
1002 cbToTryRead = cbLeftOnPage;
1003 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1004 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1005/** @todo r=bird: Convert assertion into undefined opcode exception? */
1006 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1007
1008 PGMPTWALK Walk;
1009 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1010 if (RT_FAILURE(rc))
1011 {
1012 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1014 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1015 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1016#endif
1017 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1023 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1024 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1025#endif
1026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1029 {
1030 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1032 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1033 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1034#endif
1035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1036 }
1037 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1038 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1039 /** @todo Check reserved bits and such stuff. PGM is better at doing
1040 * that, so do it when implementing the guest virtual address
1041 * TLB... */
1042
1043 /*
1044 * Read the bytes at this address.
1045 *
1046 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1047 * and since PATM should only patch the start of an instruction there
1048 * should be no need to check again here.
1049 */
1050 if (!pVCpu->iem.s.fBypassHandlers)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1053 cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 }
1062 else
1063 {
1064 Log((RT_SUCCESS(rcStrict)
1065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1068 return rcStrict;
1069 }
1070 }
1071 else
1072 {
1073 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1074 if (RT_SUCCESS(rc))
1075 { /* likely */ }
1076 else
1077 {
1078 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1079 return rc;
1080 }
1081 }
1082 pVCpu->iem.s.cbOpcode += cbToTryRead;
1083 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1084
1085 return VINF_SUCCESS;
1086}
1087
1088#endif /* !IEM_WITH_CODE_TLB */
1089#ifndef IEM_WITH_SETJMP
1090
1091/**
1092 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1093 *
1094 * @returns Strict VBox status code.
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param pb Where to return the opcode byte.
1098 */
1099VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1100{
1101 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1102 if (rcStrict == VINF_SUCCESS)
1103 {
1104 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1105 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1106 pVCpu->iem.s.offOpcode = offOpcode + 1;
1107 }
1108 else
1109 *pb = 0;
1110 return rcStrict;
1111}
1112
1113#else /* IEM_WITH_SETJMP */
1114
1115/**
1116 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1117 *
1118 * @returns The opcode byte.
1119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1120 */
1121uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1122{
1123# ifdef IEM_WITH_CODE_TLB
1124 uint8_t u8;
1125 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1126 return u8;
1127# else
1128 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1129 if (rcStrict == VINF_SUCCESS)
1130 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1131 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1132# endif
1133}
1134
1135#endif /* IEM_WITH_SETJMP */
1136
1137#ifndef IEM_WITH_SETJMP
1138
1139/**
1140 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu16 Where to return the opcode dword.
1145 */
1146VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1147{
1148 uint8_t u8;
1149 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1150 if (rcStrict == VINF_SUCCESS)
1151 *pu16 = (int8_t)u8;
1152 return rcStrict;
1153}
1154
1155
1156/**
1157 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1161 * @param pu32 Where to return the opcode dword.
1162 */
1163VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1164{
1165 uint8_t u8;
1166 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1167 if (rcStrict == VINF_SUCCESS)
1168 *pu32 = (int8_t)u8;
1169 return rcStrict;
1170}
1171
1172
1173/**
1174 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1175 *
1176 * @returns Strict VBox status code.
1177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1178 * @param pu64 Where to return the opcode qword.
1179 */
1180VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1181{
1182 uint8_t u8;
1183 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1184 if (rcStrict == VINF_SUCCESS)
1185 *pu64 = (int8_t)u8;
1186 return rcStrict;
1187}
1188
1189#endif /* !IEM_WITH_SETJMP */
1190
1191
1192#ifndef IEM_WITH_SETJMP
1193
1194/**
1195 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1196 *
1197 * @returns Strict VBox status code.
1198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1199 * @param pu16 Where to return the opcode word.
1200 */
1201VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1202{
1203 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1204 if (rcStrict == VINF_SUCCESS)
1205 {
1206 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1207# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1208 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1209# else
1210 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1211# endif
1212 pVCpu->iem.s.offOpcode = offOpcode + 2;
1213 }
1214 else
1215 *pu16 = 0;
1216 return rcStrict;
1217}
1218
1219#else /* IEM_WITH_SETJMP */
1220
1221/**
1222 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1223 *
1224 * @returns The opcode word.
1225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1226 */
1227uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1228{
1229# ifdef IEM_WITH_CODE_TLB
1230 uint16_t u16;
1231 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1232 return u16;
1233# else
1234 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1235 if (rcStrict == VINF_SUCCESS)
1236 {
1237 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1238 pVCpu->iem.s.offOpcode += 2;
1239# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1240 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1241# else
1242 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1243# endif
1244 }
1245 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1246# endif
1247}
1248
1249#endif /* IEM_WITH_SETJMP */
1250
1251#ifndef IEM_WITH_SETJMP
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1258 * @param pu32 Where to return the opcode double word.
1259 */
1260VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1266 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1267 pVCpu->iem.s.offOpcode = offOpcode + 2;
1268 }
1269 else
1270 *pu32 = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu64 Where to return the opcode quad word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu64 = 0;
1293 return rcStrict;
1294}
1295
1296#endif /* !IEM_WITH_SETJMP */
1297
1298#ifndef IEM_WITH_SETJMP
1299
1300/**
1301 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1302 *
1303 * @returns Strict VBox status code.
1304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1305 * @param pu32 Where to return the opcode dword.
1306 */
1307VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1308{
1309 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1310 if (rcStrict == VINF_SUCCESS)
1311 {
1312 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1314 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1315# else
1316 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1317 pVCpu->iem.s.abOpcode[offOpcode + 1],
1318 pVCpu->iem.s.abOpcode[offOpcode + 2],
1319 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1320# endif
1321 pVCpu->iem.s.offOpcode = offOpcode + 4;
1322 }
1323 else
1324 *pu32 = 0;
1325 return rcStrict;
1326}
1327
1328#else /* IEM_WITH_SETJMP */
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1332 *
1333 * @returns The opcode dword.
1334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1335 */
1336uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1337{
1338# ifdef IEM_WITH_CODE_TLB
1339 uint32_t u32;
1340 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1341 return u32;
1342# else
1343 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1344 if (rcStrict == VINF_SUCCESS)
1345 {
1346 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1347 pVCpu->iem.s.offOpcode = offOpcode + 4;
1348# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1349 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1350# else
1351 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1352 pVCpu->iem.s.abOpcode[offOpcode + 1],
1353 pVCpu->iem.s.abOpcode[offOpcode + 2],
1354 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1355# endif
1356 }
1357 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1358# endif
1359}
1360
1361#endif /* IEM_WITH_SETJMP */
1362
1363#ifndef IEM_WITH_SETJMP
1364
1365/**
1366 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1367 *
1368 * @returns Strict VBox status code.
1369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1370 * @param pu64 Where to return the opcode dword.
1371 */
1372VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1373{
1374 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1375 if (rcStrict == VINF_SUCCESS)
1376 {
1377 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1378 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1379 pVCpu->iem.s.abOpcode[offOpcode + 1],
1380 pVCpu->iem.s.abOpcode[offOpcode + 2],
1381 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1382 pVCpu->iem.s.offOpcode = offOpcode + 4;
1383 }
1384 else
1385 *pu64 = 0;
1386 return rcStrict;
1387}
1388
1389
1390/**
1391 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1392 *
1393 * @returns Strict VBox status code.
1394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1395 * @param pu64 Where to return the opcode qword.
1396 */
1397VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1398{
1399 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1403 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1404 pVCpu->iem.s.abOpcode[offOpcode + 1],
1405 pVCpu->iem.s.abOpcode[offOpcode + 2],
1406 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1407 pVCpu->iem.s.offOpcode = offOpcode + 4;
1408 }
1409 else
1410 *pu64 = 0;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416#ifndef IEM_WITH_SETJMP
1417
1418/**
1419 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1423 * @param pu64 Where to return the opcode qword.
1424 */
1425VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1426{
1427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1428 if (rcStrict == VINF_SUCCESS)
1429 {
1430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1432 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1433# else
1434 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1435 pVCpu->iem.s.abOpcode[offOpcode + 1],
1436 pVCpu->iem.s.abOpcode[offOpcode + 2],
1437 pVCpu->iem.s.abOpcode[offOpcode + 3],
1438 pVCpu->iem.s.abOpcode[offOpcode + 4],
1439 pVCpu->iem.s.abOpcode[offOpcode + 5],
1440 pVCpu->iem.s.abOpcode[offOpcode + 6],
1441 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1442# endif
1443 pVCpu->iem.s.offOpcode = offOpcode + 8;
1444 }
1445 else
1446 *pu64 = 0;
1447 return rcStrict;
1448}
1449
1450#else /* IEM_WITH_SETJMP */
1451
1452/**
1453 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1454 *
1455 * @returns The opcode qword.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 */
1458uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1459{
1460# ifdef IEM_WITH_CODE_TLB
1461 uint64_t u64;
1462 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1463 return u64;
1464# else
1465 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1466 if (rcStrict == VINF_SUCCESS)
1467 {
1468 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1469 pVCpu->iem.s.offOpcode = offOpcode + 8;
1470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1471 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1472# else
1473 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1474 pVCpu->iem.s.abOpcode[offOpcode + 1],
1475 pVCpu->iem.s.abOpcode[offOpcode + 2],
1476 pVCpu->iem.s.abOpcode[offOpcode + 3],
1477 pVCpu->iem.s.abOpcode[offOpcode + 4],
1478 pVCpu->iem.s.abOpcode[offOpcode + 5],
1479 pVCpu->iem.s.abOpcode[offOpcode + 6],
1480 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1481# endif
1482 }
1483 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1484# endif
1485}
1486
1487#endif /* IEM_WITH_SETJMP */
1488
1489
1490
1491/** @name Misc Worker Functions.
1492 * @{
1493 */
1494
1495/**
1496 * Gets the exception class for the specified exception vector.
1497 *
1498 * @returns The class of the specified exception.
1499 * @param uVector The exception vector.
1500 */
1501static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1502{
1503 Assert(uVector <= X86_XCPT_LAST);
1504 switch (uVector)
1505 {
1506 case X86_XCPT_DE:
1507 case X86_XCPT_TS:
1508 case X86_XCPT_NP:
1509 case X86_XCPT_SS:
1510 case X86_XCPT_GP:
1511 case X86_XCPT_SX: /* AMD only */
1512 return IEMXCPTCLASS_CONTRIBUTORY;
1513
1514 case X86_XCPT_PF:
1515 case X86_XCPT_VE: /* Intel only */
1516 return IEMXCPTCLASS_PAGE_FAULT;
1517
1518 case X86_XCPT_DF:
1519 return IEMXCPTCLASS_DOUBLE_FAULT;
1520 }
1521 return IEMXCPTCLASS_BENIGN;
1522}
1523
1524
1525/**
1526 * Evaluates how to handle an exception caused during delivery of another event
1527 * (exception / interrupt).
1528 *
1529 * @returns How to handle the recursive exception.
1530 * @param pVCpu The cross context virtual CPU structure of the
1531 * calling thread.
1532 * @param fPrevFlags The flags of the previous event.
1533 * @param uPrevVector The vector of the previous event.
1534 * @param fCurFlags The flags of the current exception.
1535 * @param uCurVector The vector of the current exception.
1536 * @param pfXcptRaiseInfo Where to store additional information about the
1537 * exception condition. Optional.
1538 */
1539VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1540 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1541{
1542 /*
1543 * Only CPU exceptions can be raised while delivering other events, software interrupt
1544 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1545 */
1546 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1547 Assert(pVCpu); RT_NOREF(pVCpu);
1548 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1549
1550 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1551 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1552 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1553 {
1554 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1555 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1556 {
1557 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1558 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1559 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1560 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1561 {
1562 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1563 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1564 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1565 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1566 uCurVector, pVCpu->cpum.GstCtx.cr2));
1567 }
1568 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1569 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1570 {
1571 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1572 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1573 }
1574 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1575 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1577 {
1578 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1580 }
1581 }
1582 else
1583 {
1584 if (uPrevVector == X86_XCPT_NMI)
1585 {
1586 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1587 if (uCurVector == X86_XCPT_PF)
1588 {
1589 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1590 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1591 }
1592 }
1593 else if ( uPrevVector == X86_XCPT_AC
1594 && uCurVector == X86_XCPT_AC)
1595 {
1596 enmRaise = IEMXCPTRAISE_CPU_HANG;
1597 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1598 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1599 }
1600 }
1601 }
1602 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1603 {
1604 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1605 if (uCurVector == X86_XCPT_PF)
1606 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1607 }
1608 else
1609 {
1610 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1611 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1612 }
1613
1614 if (pfXcptRaiseInfo)
1615 *pfXcptRaiseInfo = fRaiseInfo;
1616 return enmRaise;
1617}
1618
1619
1620/**
1621 * Enters the CPU shutdown state initiated by a triple fault or other
1622 * unrecoverable conditions.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pVCpu The cross context virtual CPU structure of the
1626 * calling thread.
1627 */
1628static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1629{
1630 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1631 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1632
1633 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1634 {
1635 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1636 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1637 }
1638
1639 RT_NOREF(pVCpu);
1640 return VINF_EM_TRIPLE_FAULT;
1641}
1642
1643
1644/**
1645 * Validates a new SS segment.
1646 *
1647 * @returns VBox strict status code.
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param NewSS The new SS selctor.
1651 * @param uCpl The CPL to load the stack for.
1652 * @param pDesc Where to return the descriptor.
1653 */
1654static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1655{
1656 /* Null selectors are not allowed (we're not called for dispatching
1657 interrupts with SS=0 in long mode). */
1658 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1659 {
1660 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1661 return iemRaiseTaskSwitchFault0(pVCpu);
1662 }
1663
1664 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1665 if ((NewSS & X86_SEL_RPL) != uCpl)
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1668 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1669 }
1670
1671 /*
1672 * Read the descriptor.
1673 */
1674 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /*
1679 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1680 */
1681 if (!pDesc->Legacy.Gen.u1DescType)
1682 {
1683 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1684 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1685 }
1686
1687 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1688 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1694 {
1695 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1696 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1697 }
1698
1699 /* Is it there? */
1700 /** @todo testcase: Is this checked before the canonical / limit check below? */
1701 if (!pDesc->Legacy.Gen.u1Present)
1702 {
1703 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1704 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1705 }
1706
1707 return VINF_SUCCESS;
1708}
1709
1710/** @} */
1711
1712
1713/** @name Raising Exceptions.
1714 *
1715 * @{
1716 */
1717
1718
1719/**
1720 * Loads the specified stack far pointer from the TSS.
1721 *
1722 * @returns VBox strict status code.
1723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1724 * @param uCpl The CPL to load the stack for.
1725 * @param pSelSS Where to return the new stack segment.
1726 * @param puEsp Where to return the new stack pointer.
1727 */
1728static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1729{
1730 VBOXSTRICTRC rcStrict;
1731 Assert(uCpl < 4);
1732
1733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1734 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1735 {
1736 /*
1737 * 16-bit TSS (X86TSS16).
1738 */
1739 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1740 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1741 {
1742 uint32_t off = uCpl * 4 + 2;
1743 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1744 {
1745 /** @todo check actual access pattern here. */
1746 uint32_t u32Tmp = 0; /* gcc maybe... */
1747 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1748 if (rcStrict == VINF_SUCCESS)
1749 {
1750 *puEsp = RT_LOWORD(u32Tmp);
1751 *pSelSS = RT_HIWORD(u32Tmp);
1752 return VINF_SUCCESS;
1753 }
1754 }
1755 else
1756 {
1757 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1758 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1759 }
1760 break;
1761 }
1762
1763 /*
1764 * 32-bit TSS (X86TSS32).
1765 */
1766 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1767 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1768 {
1769 uint32_t off = uCpl * 8 + 4;
1770 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1771 {
1772/** @todo check actual access pattern here. */
1773 uint64_t u64Tmp;
1774 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1775 if (rcStrict == VINF_SUCCESS)
1776 {
1777 *puEsp = u64Tmp & UINT32_MAX;
1778 *pSelSS = (RTSEL)(u64Tmp >> 32);
1779 return VINF_SUCCESS;
1780 }
1781 }
1782 else
1783 {
1784 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1785 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1786 }
1787 break;
1788 }
1789
1790 default:
1791 AssertFailed();
1792 rcStrict = VERR_IEM_IPE_4;
1793 break;
1794 }
1795
1796 *puEsp = 0; /* make gcc happy */
1797 *pSelSS = 0; /* make gcc happy */
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * Loads the specified stack pointer from the 64-bit TSS.
1804 *
1805 * @returns VBox strict status code.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param uCpl The CPL to load the stack for.
1808 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1809 * @param puRsp Where to return the new stack pointer.
1810 */
1811static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1812{
1813 Assert(uCpl < 4);
1814 Assert(uIst < 8);
1815 *puRsp = 0; /* make gcc happy */
1816
1817 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1818 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1819
1820 uint32_t off;
1821 if (uIst)
1822 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1823 else
1824 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1825 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1826 {
1827 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1828 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1829 }
1830
1831 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1832}
1833
1834
1835/**
1836 * Adjust the CPU state according to the exception being raised.
1837 *
1838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1839 * @param u8Vector The exception that has been raised.
1840 */
1841DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1842{
1843 switch (u8Vector)
1844 {
1845 case X86_XCPT_DB:
1846 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1847 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1848 break;
1849 /** @todo Read the AMD and Intel exception reference... */
1850 }
1851}
1852
1853
1854/**
1855 * Implements exceptions and interrupts for real mode.
1856 *
1857 * @returns VBox strict status code.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param cbInstr The number of bytes to offset rIP by in the return
1860 * address.
1861 * @param u8Vector The interrupt / exception vector number.
1862 * @param fFlags The flags.
1863 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1864 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1865 */
1866static VBOXSTRICTRC
1867iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1868 uint8_t cbInstr,
1869 uint8_t u8Vector,
1870 uint32_t fFlags,
1871 uint16_t uErr,
1872 uint64_t uCr2) RT_NOEXCEPT
1873{
1874 NOREF(uErr); NOREF(uCr2);
1875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1876
1877 /*
1878 * Read the IDT entry.
1879 */
1880 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1881 {
1882 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1883 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1884 }
1885 RTFAR16 Idte;
1886 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1887 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1888 {
1889 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1890 return rcStrict;
1891 }
1892
1893 /*
1894 * Push the stack frame.
1895 */
1896 uint16_t *pu16Frame;
1897 uint64_t uNewRsp;
1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1903#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1904 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1905 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1906 fEfl |= UINT16_C(0xf000);
1907#endif
1908 pu16Frame[2] = (uint16_t)fEfl;
1909 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1910 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1911 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1912 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1913 return rcStrict;
1914
1915 /*
1916 * Load the vector address into cs:ip and make exception specific state
1917 * adjustments.
1918 */
1919 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1920 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1921 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1922 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1923 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1924 pVCpu->cpum.GstCtx.rip = Idte.off;
1925 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1926 IEMMISC_SET_EFL(pVCpu, fEfl);
1927
1928 /** @todo do we actually do this in real mode? */
1929 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1930 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1931
1932 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1933}
1934
1935
1936/**
1937 * Loads a NULL data selector into when coming from V8086 mode.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 * @param pSReg Pointer to the segment register.
1941 */
1942DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1943{
1944 pSReg->Sel = 0;
1945 pSReg->ValidSel = 0;
1946 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1947 {
1948 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1949 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1950 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1951 }
1952 else
1953 {
1954 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1955 /** @todo check this on AMD-V */
1956 pSReg->u64Base = 0;
1957 pSReg->u32Limit = 0;
1958 }
1959}
1960
1961
1962/**
1963 * Loads a segment selector during a task switch in V8086 mode.
1964 *
1965 * @param pSReg Pointer to the segment register.
1966 * @param uSel The selector value to load.
1967 */
1968DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1969{
1970 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1971 pSReg->Sel = uSel;
1972 pSReg->ValidSel = uSel;
1973 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1974 pSReg->u64Base = uSel << 4;
1975 pSReg->u32Limit = 0xffff;
1976 pSReg->Attr.u = 0xf3;
1977}
1978
1979
1980/**
1981 * Loads a segment selector during a task switch in protected mode.
1982 *
1983 * In this task switch scenario, we would throw \#TS exceptions rather than
1984 * \#GPs.
1985 *
1986 * @returns VBox strict status code.
1987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1988 * @param pSReg Pointer to the segment register.
1989 * @param uSel The new selector value.
1990 *
1991 * @remarks This does _not_ handle CS or SS.
1992 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
1993 */
1994static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
1995{
1996 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
1997
1998 /* Null data selector. */
1999 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2000 {
2001 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2004 return VINF_SUCCESS;
2005 }
2006
2007 /* Fetch the descriptor. */
2008 IEMSELDESC Desc;
2009 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2010 if (rcStrict != VINF_SUCCESS)
2011 {
2012 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2013 VBOXSTRICTRC_VAL(rcStrict)));
2014 return rcStrict;
2015 }
2016
2017 /* Must be a data segment or readable code segment. */
2018 if ( !Desc.Legacy.Gen.u1DescType
2019 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2020 {
2021 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2022 Desc.Legacy.Gen.u4Type));
2023 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2024 }
2025
2026 /* Check privileges for data segments and non-conforming code segments. */
2027 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2028 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2029 {
2030 /* The RPL and the new CPL must be less than or equal to the DPL. */
2031 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2032 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2035 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2036 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2037 }
2038 }
2039
2040 /* Is it there? */
2041 if (!Desc.Legacy.Gen.u1Present)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2044 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2045 }
2046
2047 /* The base and limit. */
2048 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2049 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2050
2051 /*
2052 * Ok, everything checked out fine. Now set the accessed bit before
2053 * committing the result into the registers.
2054 */
2055 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2056 {
2057 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2061 }
2062
2063 /* Commit */
2064 pSReg->Sel = uSel;
2065 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2066 pSReg->u32Limit = cbLimit;
2067 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2068 pSReg->ValidSel = uSel;
2069 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2070 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2071 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2072
2073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/**
2080 * Performs a task switch.
2081 *
2082 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2083 * caller is responsible for performing the necessary checks (like DPL, TSS
2084 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2085 * reference for JMP, CALL, IRET.
2086 *
2087 * If the task switch is the due to a software interrupt or hardware exception,
2088 * the caller is responsible for validating the TSS selector and descriptor. See
2089 * Intel Instruction reference for INT n.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param enmTaskSwitch The cause of the task switch.
2094 * @param uNextEip The EIP effective after the task switch.
2095 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 * @param SelTSS The TSS selector of the new task.
2099 * @param pNewDescTSS Pointer to the new TSS descriptor.
2100 */
2101VBOXSTRICTRC
2102iemTaskSwitch(PVMCPUCC pVCpu,
2103 IEMTASKSWITCH enmTaskSwitch,
2104 uint32_t uNextEip,
2105 uint32_t fFlags,
2106 uint16_t uErr,
2107 uint64_t uCr2,
2108 RTSEL SelTSS,
2109 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2110{
2111 Assert(!IEM_IS_REAL_MODE(pVCpu));
2112 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2114
2115 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2116 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2117 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2118 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2119 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2120
2121 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2122 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2123
2124 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2125 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2126
2127 /* Update CR2 in case it's a page-fault. */
2128 /** @todo This should probably be done much earlier in IEM/PGM. See
2129 * @bugref{5653#c49}. */
2130 if (fFlags & IEM_XCPT_FLAGS_CR2)
2131 pVCpu->cpum.GstCtx.cr2 = uCr2;
2132
2133 /*
2134 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2135 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2136 */
2137 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2138 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2139 if (uNewTSSLimit < uNewTSSLimitMin)
2140 {
2141 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2142 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2143 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2144 }
2145
2146 /*
2147 * Task switches in VMX non-root mode always cause task switches.
2148 * The new TSS must have been read and validated (DPL, limits etc.) before a
2149 * task-switch VM-exit commences.
2150 *
2151 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2152 */
2153 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2154 {
2155 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2156 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2157 }
2158
2159 /*
2160 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2161 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2162 */
2163 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2164 {
2165 uint32_t const uExitInfo1 = SelTSS;
2166 uint32_t uExitInfo2 = uErr;
2167 switch (enmTaskSwitch)
2168 {
2169 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2170 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2171 default: break;
2172 }
2173 if (fFlags & IEM_XCPT_FLAGS_ERR)
2174 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2175 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2176 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2177
2178 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2179 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2180 RT_NOREF2(uExitInfo1, uExitInfo2);
2181 }
2182
2183 /*
2184 * Check the current TSS limit. The last written byte to the current TSS during the
2185 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2186 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2187 *
2188 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2189 * end up with smaller than "legal" TSS limits.
2190 */
2191 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2192 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2193 if (uCurTSSLimit < uCurTSSLimitMin)
2194 {
2195 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2196 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2198 }
2199
2200 /*
2201 * Verify that the new TSS can be accessed and map it. Map only the required contents
2202 * and not the entire TSS.
2203 */
2204 void *pvNewTSS;
2205 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2206 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2207 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2208 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2209 * not perform correct translation if this happens. See Intel spec. 7.2.1
2210 * "Task-State Segment". */
2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2212 if (rcStrict != VINF_SUCCESS)
2213 {
2214 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2215 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2216 return rcStrict;
2217 }
2218
2219 /*
2220 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2221 */
2222 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2223 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2224 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2225 {
2226 PX86DESC pDescCurTSS;
2227 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2232 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2233 return rcStrict;
2234 }
2235
2236 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2237 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2238 if (rcStrict != VINF_SUCCESS)
2239 {
2240 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2242 return rcStrict;
2243 }
2244
2245 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2246 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2249 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2250 u32EFlags &= ~X86_EFL_NT;
2251 }
2252 }
2253
2254 /*
2255 * Save the CPU state into the current TSS.
2256 */
2257 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2258 if (GCPtrNewTSS == GCPtrCurTSS)
2259 {
2260 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2261 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2262 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2263 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2264 pVCpu->cpum.GstCtx.ldtr.Sel));
2265 }
2266 if (fIsNewTSS386)
2267 {
2268 /*
2269 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2270 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2271 */
2272 void *pvCurTSS32;
2273 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2274 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2275 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2277 if (rcStrict != VINF_SUCCESS)
2278 {
2279 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2280 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2281 return rcStrict;
2282 }
2283
2284 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2285 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2286 pCurTSS32->eip = uNextEip;
2287 pCurTSS32->eflags = u32EFlags;
2288 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2289 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2290 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2291 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2292 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2293 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2294 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2295 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2296 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2297 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2298 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2299 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2300 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2301 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2302
2303 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2304 if (rcStrict != VINF_SUCCESS)
2305 {
2306 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2307 VBOXSTRICTRC_VAL(rcStrict)));
2308 return rcStrict;
2309 }
2310 }
2311 else
2312 {
2313 /*
2314 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2315 */
2316 void *pvCurTSS16;
2317 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2318 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2319 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2321 if (rcStrict != VINF_SUCCESS)
2322 {
2323 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2324 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327
2328 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2329 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2330 pCurTSS16->ip = uNextEip;
2331 pCurTSS16->flags = u32EFlags;
2332 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2333 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2334 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2335 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2336 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2337 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2338 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2339 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2340 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2341 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2342 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2343 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2344
2345 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2346 if (rcStrict != VINF_SUCCESS)
2347 {
2348 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2349 VBOXSTRICTRC_VAL(rcStrict)));
2350 return rcStrict;
2351 }
2352 }
2353
2354 /*
2355 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2356 */
2357 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2358 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2359 {
2360 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2361 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2362 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2363 }
2364
2365 /*
2366 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2367 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2368 */
2369 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2370 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2371 bool fNewDebugTrap;
2372 if (fIsNewTSS386)
2373 {
2374 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2375 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2376 uNewEip = pNewTSS32->eip;
2377 uNewEflags = pNewTSS32->eflags;
2378 uNewEax = pNewTSS32->eax;
2379 uNewEcx = pNewTSS32->ecx;
2380 uNewEdx = pNewTSS32->edx;
2381 uNewEbx = pNewTSS32->ebx;
2382 uNewEsp = pNewTSS32->esp;
2383 uNewEbp = pNewTSS32->ebp;
2384 uNewEsi = pNewTSS32->esi;
2385 uNewEdi = pNewTSS32->edi;
2386 uNewES = pNewTSS32->es;
2387 uNewCS = pNewTSS32->cs;
2388 uNewSS = pNewTSS32->ss;
2389 uNewDS = pNewTSS32->ds;
2390 uNewFS = pNewTSS32->fs;
2391 uNewGS = pNewTSS32->gs;
2392 uNewLdt = pNewTSS32->selLdt;
2393 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2394 }
2395 else
2396 {
2397 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2398 uNewCr3 = 0;
2399 uNewEip = pNewTSS16->ip;
2400 uNewEflags = pNewTSS16->flags;
2401 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2402 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2403 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2404 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2405 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2406 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2407 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2408 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2409 uNewES = pNewTSS16->es;
2410 uNewCS = pNewTSS16->cs;
2411 uNewSS = pNewTSS16->ss;
2412 uNewDS = pNewTSS16->ds;
2413 uNewFS = 0;
2414 uNewGS = 0;
2415 uNewLdt = pNewTSS16->selLdt;
2416 fNewDebugTrap = false;
2417 }
2418
2419 if (GCPtrNewTSS == GCPtrCurTSS)
2420 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2421 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2422
2423 /*
2424 * We're done accessing the new TSS.
2425 */
2426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432
2433 /*
2434 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2435 */
2436 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2437 {
2438 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2440 if (rcStrict != VINF_SUCCESS)
2441 {
2442 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2443 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2444 return rcStrict;
2445 }
2446
2447 /* Check that the descriptor indicates the new TSS is available (not busy). */
2448 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2450 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2451
2452 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2453 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2454 if (rcStrict != VINF_SUCCESS)
2455 {
2456 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2457 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2458 return rcStrict;
2459 }
2460 }
2461
2462 /*
2463 * From this point on, we're technically in the new task. We will defer exceptions
2464 * until the completion of the task switch but before executing any instructions in the new task.
2465 */
2466 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2467 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2468 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2469 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2470 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2471 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2472 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2473
2474 /* Set the busy bit in TR. */
2475 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2476
2477 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2478 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2479 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2480 {
2481 uNewEflags |= X86_EFL_NT;
2482 }
2483
2484 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2485 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2486 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2487
2488 pVCpu->cpum.GstCtx.eip = uNewEip;
2489 pVCpu->cpum.GstCtx.eax = uNewEax;
2490 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2491 pVCpu->cpum.GstCtx.edx = uNewEdx;
2492 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2493 pVCpu->cpum.GstCtx.esp = uNewEsp;
2494 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2495 pVCpu->cpum.GstCtx.esi = uNewEsi;
2496 pVCpu->cpum.GstCtx.edi = uNewEdi;
2497
2498 uNewEflags &= X86_EFL_LIVE_MASK;
2499 uNewEflags |= X86_EFL_RA1_MASK;
2500 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2501
2502 /*
2503 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2504 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2505 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2506 */
2507 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2508 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2509
2510 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2511 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2512
2513 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2514 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2515
2516 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2517 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2518
2519 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2520 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2521
2522 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2523 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2524 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2525
2526 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2527 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2528 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2529 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2530
2531 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2532 {
2533 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2534 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2535 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2536 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2537 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2538 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2539 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2540 }
2541
2542 /*
2543 * Switch CR3 for the new task.
2544 */
2545 if ( fIsNewTSS386
2546 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2547 {
2548 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2549 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2550 AssertRCSuccessReturn(rc, rc);
2551
2552 /* Inform PGM. */
2553 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2554 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2555 AssertRCReturn(rc, rc);
2556 /* ignore informational status codes */
2557
2558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2559 }
2560
2561 /*
2562 * Switch LDTR for the new task.
2563 */
2564 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2565 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2566 else
2567 {
2568 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2569
2570 IEMSELDESC DescNewLdt;
2571 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2575 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578 if ( !DescNewLdt.Legacy.Gen.u1Present
2579 || DescNewLdt.Legacy.Gen.u1DescType
2580 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2581 {
2582 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2583 uNewLdt, DescNewLdt.Legacy.u));
2584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2585 }
2586
2587 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2588 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2589 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2590 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2591 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2592 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2593 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2595 }
2596
2597 IEMSELDESC DescSS;
2598 if (IEM_IS_V86_MODE(pVCpu))
2599 {
2600 pVCpu->iem.s.uCpl = 3;
2601 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2602 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2603 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2604 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2605 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2606 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2607
2608 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2609 DescSS.Legacy.u = 0;
2610 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2611 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2612 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2613 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2614 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2615 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2616 DescSS.Legacy.Gen.u2Dpl = 3;
2617 }
2618 else
2619 {
2620 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2621
2622 /*
2623 * Load the stack segment for the new task.
2624 */
2625 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2626 {
2627 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2629 }
2630
2631 /* Fetch the descriptor. */
2632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2636 VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* SS must be a data segment and writable. */
2641 if ( !DescSS.Legacy.Gen.u1DescType
2642 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2643 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2644 {
2645 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2646 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2648 }
2649
2650 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2651 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2652 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2653 {
2654 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2655 uNewCpl));
2656 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2657 }
2658
2659 /* Is it there? */
2660 if (!DescSS.Legacy.Gen.u1Present)
2661 {
2662 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2663 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2667 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2668
2669 /* Set the accessed bit before committing the result into SS. */
2670 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2671 {
2672 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2673 if (rcStrict != VINF_SUCCESS)
2674 return rcStrict;
2675 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2676 }
2677
2678 /* Commit SS. */
2679 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2680 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2681 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2682 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2683 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2684 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2685 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2686
2687 /* CPL has changed, update IEM before loading rest of segments. */
2688 pVCpu->iem.s.uCpl = uNewCpl;
2689
2690 /*
2691 * Load the data segments for the new task.
2692 */
2693 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2694 if (rcStrict != VINF_SUCCESS)
2695 return rcStrict;
2696 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2700 if (rcStrict != VINF_SUCCESS)
2701 return rcStrict;
2702 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2703 if (rcStrict != VINF_SUCCESS)
2704 return rcStrict;
2705
2706 /*
2707 * Load the code segment for the new task.
2708 */
2709 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2710 {
2711 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2712 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2713 }
2714
2715 /* Fetch the descriptor. */
2716 IEMSELDESC DescCS;
2717 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2718 if (rcStrict != VINF_SUCCESS)
2719 {
2720 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2721 return rcStrict;
2722 }
2723
2724 /* CS must be a code segment. */
2725 if ( !DescCS.Legacy.Gen.u1DescType
2726 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2727 {
2728 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2729 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2730 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2731 }
2732
2733 /* For conforming CS, DPL must be less than or equal to the RPL. */
2734 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2735 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2736 {
2737 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2738 DescCS.Legacy.Gen.u2Dpl));
2739 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2740 }
2741
2742 /* For non-conforming CS, DPL must match RPL. */
2743 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2744 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2745 {
2746 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2747 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2748 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2749 }
2750
2751 /* Is it there? */
2752 if (!DescCS.Legacy.Gen.u1Present)
2753 {
2754 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2755 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2759 u64Base = X86DESC_BASE(&DescCS.Legacy);
2760
2761 /* Set the accessed bit before committing the result into CS. */
2762 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2763 {
2764 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2765 if (rcStrict != VINF_SUCCESS)
2766 return rcStrict;
2767 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2768 }
2769
2770 /* Commit CS. */
2771 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2772 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2773 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2774 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2775 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2776 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2778 }
2779
2780 /** @todo Debug trap. */
2781 if (fIsNewTSS386 && fNewDebugTrap)
2782 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2783
2784 /*
2785 * Construct the error code masks based on what caused this task switch.
2786 * See Intel Instruction reference for INT.
2787 */
2788 uint16_t uExt;
2789 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2790 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2791 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2792 {
2793 uExt = 1;
2794 }
2795 else
2796 uExt = 0;
2797
2798 /*
2799 * Push any error code on to the new stack.
2800 */
2801 if (fFlags & IEM_XCPT_FLAGS_ERR)
2802 {
2803 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2804 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2805 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2806
2807 /* Check that there is sufficient space on the stack. */
2808 /** @todo Factor out segment limit checking for normal/expand down segments
2809 * into a separate function. */
2810 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2811 {
2812 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2813 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2814 {
2815 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2816 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2817 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2818 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2819 }
2820 }
2821 else
2822 {
2823 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2824 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2825 {
2826 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2827 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2828 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2829 }
2830 }
2831
2832
2833 if (fIsNewTSS386)
2834 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2835 else
2836 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2837 if (rcStrict != VINF_SUCCESS)
2838 {
2839 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2840 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2841 return rcStrict;
2842 }
2843 }
2844
2845 /* Check the new EIP against the new CS limit. */
2846 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2847 {
2848 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2849 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2850 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2851 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2852 }
2853
2854 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2855 pVCpu->cpum.GstCtx.ss.Sel));
2856 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements exceptions and interrupts for protected mode.
2862 *
2863 * @returns VBox strict status code.
2864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2865 * @param cbInstr The number of bytes to offset rIP by in the return
2866 * address.
2867 * @param u8Vector The interrupt / exception vector number.
2868 * @param fFlags The flags.
2869 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2870 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2871 */
2872static VBOXSTRICTRC
2873iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2874 uint8_t cbInstr,
2875 uint8_t u8Vector,
2876 uint32_t fFlags,
2877 uint16_t uErr,
2878 uint64_t uCr2) RT_NOEXCEPT
2879{
2880 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2881
2882 /*
2883 * Read the IDT entry.
2884 */
2885 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2886 {
2887 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2888 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2889 }
2890 X86DESC Idte;
2891 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2892 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2893 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2894 {
2895 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2896 return rcStrict;
2897 }
2898 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2899 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2900 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2901
2902 /*
2903 * Check the descriptor type, DPL and such.
2904 * ASSUMES this is done in the same order as described for call-gate calls.
2905 */
2906 if (Idte.Gate.u1DescType)
2907 {
2908 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2909 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2910 }
2911 bool fTaskGate = false;
2912 uint8_t f32BitGate = true;
2913 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2914 switch (Idte.Gate.u4Type)
2915 {
2916 case X86_SEL_TYPE_SYS_UNDEFINED:
2917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2918 case X86_SEL_TYPE_SYS_LDT:
2919 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2920 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2921 case X86_SEL_TYPE_SYS_UNDEFINED2:
2922 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2923 case X86_SEL_TYPE_SYS_UNDEFINED3:
2924 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2925 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2926 case X86_SEL_TYPE_SYS_UNDEFINED4:
2927 {
2928 /** @todo check what actually happens when the type is wrong...
2929 * esp. call gates. */
2930 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933
2934 case X86_SEL_TYPE_SYS_286_INT_GATE:
2935 f32BitGate = false;
2936 RT_FALL_THRU();
2937 case X86_SEL_TYPE_SYS_386_INT_GATE:
2938 fEflToClear |= X86_EFL_IF;
2939 break;
2940
2941 case X86_SEL_TYPE_SYS_TASK_GATE:
2942 fTaskGate = true;
2943#ifndef IEM_IMPLEMENTS_TASKSWITCH
2944 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2945#endif
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2949 f32BitGate = false;
2950 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2951 break;
2952
2953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2954 }
2955
2956 /* Check DPL against CPL if applicable. */
2957 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2958 {
2959 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2960 {
2961 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2962 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2963 }
2964 }
2965
2966 /* Is it there? */
2967 if (!Idte.Gate.u1Present)
2968 {
2969 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2971 }
2972
2973 /* Is it a task-gate? */
2974 if (fTaskGate)
2975 {
2976 /*
2977 * Construct the error code masks based on what caused this task switch.
2978 * See Intel Instruction reference for INT.
2979 */
2980 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2981 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2982 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2983 RTSEL SelTSS = Idte.Gate.u16Sel;
2984
2985 /*
2986 * Fetch the TSS descriptor in the GDT.
2987 */
2988 IEMSELDESC DescTSS;
2989 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2990 if (rcStrict != VINF_SUCCESS)
2991 {
2992 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
2993 VBOXSTRICTRC_VAL(rcStrict)));
2994 return rcStrict;
2995 }
2996
2997 /* The TSS descriptor must be a system segment and be available (not busy). */
2998 if ( DescTSS.Legacy.Gen.u1DescType
2999 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3000 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3001 {
3002 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3003 u8Vector, SelTSS, DescTSS.Legacy.au64));
3004 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3005 }
3006
3007 /* The TSS must be present. */
3008 if (!DescTSS.Legacy.Gen.u1Present)
3009 {
3010 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* Do the actual task switch. */
3015 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3016 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3017 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3018 }
3019
3020 /* A null CS is bad. */
3021 RTSEL NewCS = Idte.Gate.u16Sel;
3022 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3025 return iemRaiseGeneralProtectionFault0(pVCpu);
3026 }
3027
3028 /* Fetch the descriptor for the new CS. */
3029 IEMSELDESC DescCS;
3030 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3031 if (rcStrict != VINF_SUCCESS)
3032 {
3033 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3034 return rcStrict;
3035 }
3036
3037 /* Must be a code segment. */
3038 if (!DescCS.Legacy.Gen.u1DescType)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3041 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3042 }
3043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3044 {
3045 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3046 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3047 }
3048
3049 /* Don't allow lowering the privilege level. */
3050 /** @todo Does the lowering of privileges apply to software interrupts
3051 * only? This has bearings on the more-privileged or
3052 * same-privilege stack behavior further down. A testcase would
3053 * be nice. */
3054 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3055 {
3056 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3057 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3058 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 /* Make sure the selector is present. */
3062 if (!DescCS.Legacy.Gen.u1Present)
3063 {
3064 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3065 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3066 }
3067
3068 /* Check the new EIP against the new CS limit. */
3069 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3070 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3071 ? Idte.Gate.u16OffsetLow
3072 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3073 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3074 if (uNewEip > cbLimitCS)
3075 {
3076 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3077 u8Vector, uNewEip, cbLimitCS, NewCS));
3078 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3079 }
3080 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3081
3082 /* Calc the flag image to push. */
3083 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3084 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3085 fEfl &= ~X86_EFL_RF;
3086 else
3087 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3088
3089 /* From V8086 mode only go to CPL 0. */
3090 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3091 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3092 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3093 {
3094 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3095 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3096 }
3097
3098 /*
3099 * If the privilege level changes, we need to get a new stack from the TSS.
3100 * This in turns means validating the new SS and ESP...
3101 */
3102 if (uNewCpl != pVCpu->iem.s.uCpl)
3103 {
3104 RTSEL NewSS;
3105 uint32_t uNewEsp;
3106 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3107 if (rcStrict != VINF_SUCCESS)
3108 return rcStrict;
3109
3110 IEMSELDESC DescSS;
3111 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3112 if (rcStrict != VINF_SUCCESS)
3113 return rcStrict;
3114 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3115 if (!DescSS.Legacy.Gen.u1DefBig)
3116 {
3117 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3118 uNewEsp = (uint16_t)uNewEsp;
3119 }
3120
3121 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3122
3123 /* Check that there is sufficient space for the stack frame. */
3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3125 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3126 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3127 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3128
3129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3130 {
3131 if ( uNewEsp - 1 > cbLimitSS
3132 || uNewEsp < cbStackFrame)
3133 {
3134 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3135 u8Vector, NewSS, uNewEsp, cbStackFrame));
3136 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3137 }
3138 }
3139 else
3140 {
3141 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3142 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3143 {
3144 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3145 u8Vector, NewSS, uNewEsp, cbStackFrame));
3146 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3147 }
3148 }
3149
3150 /*
3151 * Start making changes.
3152 */
3153
3154 /* Set the new CPL so that stack accesses use it. */
3155 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3156 pVCpu->iem.s.uCpl = uNewCpl;
3157
3158 /* Create the stack frame. */
3159 RTPTRUNION uStackFrame;
3160 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3162 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3163 if (rcStrict != VINF_SUCCESS)
3164 return rcStrict;
3165 void * const pvStackFrame = uStackFrame.pv;
3166 if (f32BitGate)
3167 {
3168 if (fFlags & IEM_XCPT_FLAGS_ERR)
3169 *uStackFrame.pu32++ = uErr;
3170 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3171 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3172 uStackFrame.pu32[2] = fEfl;
3173 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3174 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3175 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3176 if (fEfl & X86_EFL_VM)
3177 {
3178 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3179 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3180 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3181 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3182 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3183 }
3184 }
3185 else
3186 {
3187 if (fFlags & IEM_XCPT_FLAGS_ERR)
3188 *uStackFrame.pu16++ = uErr;
3189 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3190 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3191 uStackFrame.pu16[2] = fEfl;
3192 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3193 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3194 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3195 if (fEfl & X86_EFL_VM)
3196 {
3197 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3198 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3199 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3200 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3201 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3202 }
3203 }
3204 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3205 if (rcStrict != VINF_SUCCESS)
3206 return rcStrict;
3207
3208 /* Mark the selectors 'accessed' (hope this is the correct time). */
3209 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3210 * after pushing the stack frame? (Write protect the gdt + stack to
3211 * find out.) */
3212 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3213 {
3214 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3215 if (rcStrict != VINF_SUCCESS)
3216 return rcStrict;
3217 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3218 }
3219
3220 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3221 {
3222 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3223 if (rcStrict != VINF_SUCCESS)
3224 return rcStrict;
3225 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3226 }
3227
3228 /*
3229 * Start comitting the register changes (joins with the DPL=CPL branch).
3230 */
3231 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3232 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3233 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3234 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3235 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3236 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3237 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3238 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3239 * SP is loaded).
3240 * Need to check the other combinations too:
3241 * - 16-bit TSS, 32-bit handler
3242 * - 32-bit TSS, 16-bit handler */
3243 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3244 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3245 else
3246 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3247
3248 if (fEfl & X86_EFL_VM)
3249 {
3250 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3251 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3252 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3253 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3254 }
3255 }
3256 /*
3257 * Same privilege, no stack change and smaller stack frame.
3258 */
3259 else
3260 {
3261 uint64_t uNewRsp;
3262 RTPTRUNION uStackFrame;
3263 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3264 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3265 if (rcStrict != VINF_SUCCESS)
3266 return rcStrict;
3267 void * const pvStackFrame = uStackFrame.pv;
3268
3269 if (f32BitGate)
3270 {
3271 if (fFlags & IEM_XCPT_FLAGS_ERR)
3272 *uStackFrame.pu32++ = uErr;
3273 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3274 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3275 uStackFrame.pu32[2] = fEfl;
3276 }
3277 else
3278 {
3279 if (fFlags & IEM_XCPT_FLAGS_ERR)
3280 *uStackFrame.pu16++ = uErr;
3281 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3282 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3283 uStackFrame.pu16[2] = fEfl;
3284 }
3285 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3286 if (rcStrict != VINF_SUCCESS)
3287 return rcStrict;
3288
3289 /* Mark the CS selector as 'accessed'. */
3290 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3291 {
3292 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3293 if (rcStrict != VINF_SUCCESS)
3294 return rcStrict;
3295 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3296 }
3297
3298 /*
3299 * Start committing the register changes (joins with the other branch).
3300 */
3301 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3302 }
3303
3304 /* ... register committing continues. */
3305 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3306 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3307 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3308 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3309 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3310 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3311
3312 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3313 fEfl &= ~fEflToClear;
3314 IEMMISC_SET_EFL(pVCpu, fEfl);
3315
3316 if (fFlags & IEM_XCPT_FLAGS_CR2)
3317 pVCpu->cpum.GstCtx.cr2 = uCr2;
3318
3319 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3320 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3321
3322 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3323}
3324
3325
3326/**
3327 * Implements exceptions and interrupts for long mode.
3328 *
3329 * @returns VBox strict status code.
3330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3331 * @param cbInstr The number of bytes to offset rIP by in the return
3332 * address.
3333 * @param u8Vector The interrupt / exception vector number.
3334 * @param fFlags The flags.
3335 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3336 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3337 */
3338static VBOXSTRICTRC
3339iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3340 uint8_t cbInstr,
3341 uint8_t u8Vector,
3342 uint32_t fFlags,
3343 uint16_t uErr,
3344 uint64_t uCr2) RT_NOEXCEPT
3345{
3346 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3347
3348 /*
3349 * Read the IDT entry.
3350 */
3351 uint16_t offIdt = (uint16_t)u8Vector << 4;
3352 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3353 {
3354 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3355 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3356 }
3357 X86DESC64 Idte;
3358#ifdef _MSC_VER /* Shut up silly compiler warning. */
3359 Idte.au64[0] = 0;
3360 Idte.au64[1] = 0;
3361#endif
3362 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3363 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3364 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3365 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3366 {
3367 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3368 return rcStrict;
3369 }
3370 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3371 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3372 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3373
3374 /*
3375 * Check the descriptor type, DPL and such.
3376 * ASSUMES this is done in the same order as described for call-gate calls.
3377 */
3378 if (Idte.Gate.u1DescType)
3379 {
3380 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3381 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3382 }
3383 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3384 switch (Idte.Gate.u4Type)
3385 {
3386 case AMD64_SEL_TYPE_SYS_INT_GATE:
3387 fEflToClear |= X86_EFL_IF;
3388 break;
3389 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3390 break;
3391
3392 default:
3393 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3394 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3395 }
3396
3397 /* Check DPL against CPL if applicable. */
3398 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3399 {
3400 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3401 {
3402 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3403 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3404 }
3405 }
3406
3407 /* Is it there? */
3408 if (!Idte.Gate.u1Present)
3409 {
3410 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3411 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3412 }
3413
3414 /* A null CS is bad. */
3415 RTSEL NewCS = Idte.Gate.u16Sel;
3416 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3417 {
3418 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3419 return iemRaiseGeneralProtectionFault0(pVCpu);
3420 }
3421
3422 /* Fetch the descriptor for the new CS. */
3423 IEMSELDESC DescCS;
3424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3425 if (rcStrict != VINF_SUCCESS)
3426 {
3427 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3428 return rcStrict;
3429 }
3430
3431 /* Must be a 64-bit code segment. */
3432 if (!DescCS.Long.Gen.u1DescType)
3433 {
3434 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3435 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3436 }
3437 if ( !DescCS.Long.Gen.u1Long
3438 || DescCS.Long.Gen.u1DefBig
3439 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3440 {
3441 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3442 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3443 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3444 }
3445
3446 /* Don't allow lowering the privilege level. For non-conforming CS
3447 selectors, the CS.DPL sets the privilege level the trap/interrupt
3448 handler runs at. For conforming CS selectors, the CPL remains
3449 unchanged, but the CS.DPL must be <= CPL. */
3450 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3451 * when CPU in Ring-0. Result \#GP? */
3452 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3453 {
3454 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3455 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3456 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3457 }
3458
3459
3460 /* Make sure the selector is present. */
3461 if (!DescCS.Legacy.Gen.u1Present)
3462 {
3463 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3464 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3465 }
3466
3467 /* Check that the new RIP is canonical. */
3468 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3469 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3470 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3471 if (!IEM_IS_CANONICAL(uNewRip))
3472 {
3473 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3474 return iemRaiseGeneralProtectionFault0(pVCpu);
3475 }
3476
3477 /*
3478 * If the privilege level changes or if the IST isn't zero, we need to get
3479 * a new stack from the TSS.
3480 */
3481 uint64_t uNewRsp;
3482 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3483 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3484 if ( uNewCpl != pVCpu->iem.s.uCpl
3485 || Idte.Gate.u3IST != 0)
3486 {
3487 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3488 if (rcStrict != VINF_SUCCESS)
3489 return rcStrict;
3490 }
3491 else
3492 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3493 uNewRsp &= ~(uint64_t)0xf;
3494
3495 /*
3496 * Calc the flag image to push.
3497 */
3498 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3499 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3500 fEfl &= ~X86_EFL_RF;
3501 else
3502 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3503
3504 /*
3505 * Start making changes.
3506 */
3507 /* Set the new CPL so that stack accesses use it. */
3508 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3509 pVCpu->iem.s.uCpl = uNewCpl;
3510
3511 /* Create the stack frame. */
3512 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3513 RTPTRUNION uStackFrame;
3514 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3515 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3516 if (rcStrict != VINF_SUCCESS)
3517 return rcStrict;
3518 void * const pvStackFrame = uStackFrame.pv;
3519
3520 if (fFlags & IEM_XCPT_FLAGS_ERR)
3521 *uStackFrame.pu64++ = uErr;
3522 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3523 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3524 uStackFrame.pu64[2] = fEfl;
3525 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3526 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3527 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3528 if (rcStrict != VINF_SUCCESS)
3529 return rcStrict;
3530
3531 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3532 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3533 * after pushing the stack frame? (Write protect the gdt + stack to
3534 * find out.) */
3535 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3536 {
3537 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3538 if (rcStrict != VINF_SUCCESS)
3539 return rcStrict;
3540 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3541 }
3542
3543 /*
3544 * Start comitting the register changes.
3545 */
3546 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3547 * hidden registers when interrupting 32-bit or 16-bit code! */
3548 if (uNewCpl != uOldCpl)
3549 {
3550 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3551 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3552 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3553 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3554 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3555 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3556 }
3557 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3558 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3559 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3560 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3561 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3562 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3563 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3564 pVCpu->cpum.GstCtx.rip = uNewRip;
3565
3566 fEfl &= ~fEflToClear;
3567 IEMMISC_SET_EFL(pVCpu, fEfl);
3568
3569 if (fFlags & IEM_XCPT_FLAGS_CR2)
3570 pVCpu->cpum.GstCtx.cr2 = uCr2;
3571
3572 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3573 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3574
3575 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3576}
3577
3578
3579/**
3580 * Implements exceptions and interrupts.
3581 *
3582 * All exceptions and interrupts goes thru this function!
3583 *
3584 * @returns VBox strict status code.
3585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3586 * @param cbInstr The number of bytes to offset rIP by in the return
3587 * address.
3588 * @param u8Vector The interrupt / exception vector number.
3589 * @param fFlags The flags.
3590 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3591 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3592 */
3593VBOXSTRICTRC
3594iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3595 uint8_t cbInstr,
3596 uint8_t u8Vector,
3597 uint32_t fFlags,
3598 uint16_t uErr,
3599 uint64_t uCr2) RT_NOEXCEPT
3600{
3601 /*
3602 * Get all the state that we might need here.
3603 */
3604 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3605 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3606
3607#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3608 /*
3609 * Flush prefetch buffer
3610 */
3611 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3612#endif
3613
3614 /*
3615 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3616 */
3617 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3618 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3619 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3620 | IEM_XCPT_FLAGS_BP_INSTR
3621 | IEM_XCPT_FLAGS_ICEBP_INSTR
3622 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3623 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3624 {
3625 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3626 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3627 u8Vector = X86_XCPT_GP;
3628 uErr = 0;
3629 }
3630#ifdef DBGFTRACE_ENABLED
3631 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3632 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3633 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3634#endif
3635
3636 /*
3637 * Evaluate whether NMI blocking should be in effect.
3638 * Normally, NMI blocking is in effect whenever we inject an NMI.
3639 */
3640 bool fBlockNmi;
3641 if ( u8Vector == X86_XCPT_NMI
3642 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3643 fBlockNmi = true;
3644 else
3645 fBlockNmi = false;
3646
3647#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3648 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3649 {
3650 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3651 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3652 return rcStrict0;
3653
3654 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3655 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3656 {
3657 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3658 fBlockNmi = false;
3659 }
3660 }
3661#endif
3662
3663#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3664 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3665 {
3666 /*
3667 * If the event is being injected as part of VMRUN, it isn't subject to event
3668 * intercepts in the nested-guest. However, secondary exceptions that occur
3669 * during injection of any event -are- subject to exception intercepts.
3670 *
3671 * See AMD spec. 15.20 "Event Injection".
3672 */
3673 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3674 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3675 else
3676 {
3677 /*
3678 * Check and handle if the event being raised is intercepted.
3679 */
3680 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3681 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3682 return rcStrict0;
3683 }
3684 }
3685#endif
3686
3687 /*
3688 * Set NMI blocking if necessary.
3689 */
3690 if ( fBlockNmi
3691 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3692 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3693
3694 /*
3695 * Do recursion accounting.
3696 */
3697 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3698 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3699 if (pVCpu->iem.s.cXcptRecursions == 0)
3700 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3701 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3702 else
3703 {
3704 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3705 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3706 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3707
3708 if (pVCpu->iem.s.cXcptRecursions >= 4)
3709 {
3710#ifdef DEBUG_bird
3711 AssertFailed();
3712#endif
3713 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3714 }
3715
3716 /*
3717 * Evaluate the sequence of recurring events.
3718 */
3719 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3720 NULL /* pXcptRaiseInfo */);
3721 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3722 { /* likely */ }
3723 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3724 {
3725 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3726 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3727 u8Vector = X86_XCPT_DF;
3728 uErr = 0;
3729#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3730 /* VMX nested-guest #DF intercept needs to be checked here. */
3731 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3732 {
3733 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3734 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3735 return rcStrict0;
3736 }
3737#endif
3738 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3739 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3740 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3741 }
3742 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3743 {
3744 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3745 return iemInitiateCpuShutdown(pVCpu);
3746 }
3747 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3748 {
3749 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3750 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3751 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3752 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3753 return VERR_EM_GUEST_CPU_HANG;
3754 }
3755 else
3756 {
3757 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3758 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3759 return VERR_IEM_IPE_9;
3760 }
3761
3762 /*
3763 * The 'EXT' bit is set when an exception occurs during deliver of an external
3764 * event (such as an interrupt or earlier exception)[1]. Privileged software
3765 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3766 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3767 *
3768 * [1] - Intel spec. 6.13 "Error Code"
3769 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3770 * [3] - Intel Instruction reference for INT n.
3771 */
3772 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3773 && (fFlags & IEM_XCPT_FLAGS_ERR)
3774 && u8Vector != X86_XCPT_PF
3775 && u8Vector != X86_XCPT_DF)
3776 {
3777 uErr |= X86_TRAP_ERR_EXTERNAL;
3778 }
3779 }
3780
3781 pVCpu->iem.s.cXcptRecursions++;
3782 pVCpu->iem.s.uCurXcpt = u8Vector;
3783 pVCpu->iem.s.fCurXcpt = fFlags;
3784 pVCpu->iem.s.uCurXcptErr = uErr;
3785 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3786
3787 /*
3788 * Extensive logging.
3789 */
3790#if defined(LOG_ENABLED) && defined(IN_RING3)
3791 if (LogIs3Enabled())
3792 {
3793 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3794 PVM pVM = pVCpu->CTX_SUFF(pVM);
3795 char szRegs[4096];
3796 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3797 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3798 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3799 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3800 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3801 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3802 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3803 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3804 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3805 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3806 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3807 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3808 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3809 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3810 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3811 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3812 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3813 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3814 " efer=%016VR{efer}\n"
3815 " pat=%016VR{pat}\n"
3816 " sf_mask=%016VR{sf_mask}\n"
3817 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3818 " lstar=%016VR{lstar}\n"
3819 " star=%016VR{star} cstar=%016VR{cstar}\n"
3820 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3821 );
3822
3823 char szInstr[256];
3824 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3825 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3826 szInstr, sizeof(szInstr), NULL);
3827 Log3(("%s%s\n", szRegs, szInstr));
3828 }
3829#endif /* LOG_ENABLED */
3830
3831 /*
3832 * Call the mode specific worker function.
3833 */
3834 VBOXSTRICTRC rcStrict;
3835 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3836 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3837 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3838 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3839 else
3840 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3841
3842 /* Flush the prefetch buffer. */
3843#ifdef IEM_WITH_CODE_TLB
3844 pVCpu->iem.s.pbInstrBuf = NULL;
3845#else
3846 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3847#endif
3848
3849 /*
3850 * Unwind.
3851 */
3852 pVCpu->iem.s.cXcptRecursions--;
3853 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3854 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3855 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3856 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3857 pVCpu->iem.s.cXcptRecursions + 1));
3858 return rcStrict;
3859}
3860
3861#ifdef IEM_WITH_SETJMP
3862/**
3863 * See iemRaiseXcptOrInt. Will not return.
3864 */
3865DECL_NO_RETURN(void)
3866iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3867 uint8_t cbInstr,
3868 uint8_t u8Vector,
3869 uint32_t fFlags,
3870 uint16_t uErr,
3871 uint64_t uCr2) RT_NOEXCEPT
3872{
3873 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3874 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3875}
3876#endif
3877
3878
3879/** \#DE - 00. */
3880VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3881{
3882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3883}
3884
3885
3886/** \#DB - 01.
3887 * @note This automatically clear DR7.GD. */
3888VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3889{
3890 /** @todo set/clear RF. */
3891 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3892 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3893}
3894
3895
3896/** \#BR - 05. */
3897VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3898{
3899 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3900}
3901
3902
3903/** \#UD - 06. */
3904VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3905{
3906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3907}
3908
3909
3910/** \#NM - 07. */
3911VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3912{
3913 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3914}
3915
3916
3917/** \#TS(err) - 0a. */
3918VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3919{
3920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3921}
3922
3923
3924/** \#TS(tr) - 0a. */
3925VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3926{
3927 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3928 pVCpu->cpum.GstCtx.tr.Sel, 0);
3929}
3930
3931
3932/** \#TS(0) - 0a. */
3933VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3934{
3935 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3936 0, 0);
3937}
3938
3939
3940/** \#TS(err) - 0a. */
3941VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3942{
3943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3944 uSel & X86_SEL_MASK_OFF_RPL, 0);
3945}
3946
3947
3948/** \#NP(err) - 0b. */
3949VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3950{
3951 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3952}
3953
3954
3955/** \#NP(sel) - 0b. */
3956VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3957{
3958 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3959 uSel & ~X86_SEL_RPL, 0);
3960}
3961
3962
3963/** \#SS(seg) - 0c. */
3964VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3965{
3966 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3967 uSel & ~X86_SEL_RPL, 0);
3968}
3969
3970
3971/** \#SS(err) - 0c. */
3972VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3973{
3974 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3975}
3976
3977
3978/** \#GP(n) - 0d. */
3979VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3980{
3981 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3982}
3983
3984
3985/** \#GP(0) - 0d. */
3986VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3987{
3988 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3989}
3990
3991#ifdef IEM_WITH_SETJMP
3992/** \#GP(0) - 0d. */
3993DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
3994{
3995 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3996}
3997#endif
3998
3999
4000/** \#GP(sel) - 0d. */
4001VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4002{
4003 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4004 Sel & ~X86_SEL_RPL, 0);
4005}
4006
4007
4008/** \#GP(0) - 0d. */
4009VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4010{
4011 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4012}
4013
4014
4015/** \#GP(sel) - 0d. */
4016VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4017{
4018 NOREF(iSegReg); NOREF(fAccess);
4019 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4020 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4021}
4022
4023#ifdef IEM_WITH_SETJMP
4024/** \#GP(sel) - 0d, longjmp. */
4025DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4026{
4027 NOREF(iSegReg); NOREF(fAccess);
4028 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4029 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4030}
4031#endif
4032
4033/** \#GP(sel) - 0d. */
4034VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4035{
4036 NOREF(Sel);
4037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4038}
4039
4040#ifdef IEM_WITH_SETJMP
4041/** \#GP(sel) - 0d, longjmp. */
4042DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4043{
4044 NOREF(Sel);
4045 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4046}
4047#endif
4048
4049
4050/** \#GP(sel) - 0d. */
4051VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4052{
4053 NOREF(iSegReg); NOREF(fAccess);
4054 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4055}
4056
4057#ifdef IEM_WITH_SETJMP
4058/** \#GP(sel) - 0d, longjmp. */
4059DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4060{
4061 NOREF(iSegReg); NOREF(fAccess);
4062 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4063}
4064#endif
4065
4066
4067/** \#PF(n) - 0e. */
4068VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4069{
4070 uint16_t uErr;
4071 switch (rc)
4072 {
4073 case VERR_PAGE_NOT_PRESENT:
4074 case VERR_PAGE_TABLE_NOT_PRESENT:
4075 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4076 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4077 uErr = 0;
4078 break;
4079
4080 default:
4081 AssertMsgFailed(("%Rrc\n", rc));
4082 RT_FALL_THRU();
4083 case VERR_ACCESS_DENIED:
4084 uErr = X86_TRAP_PF_P;
4085 break;
4086
4087 /** @todo reserved */
4088 }
4089
4090 if (pVCpu->iem.s.uCpl == 3)
4091 uErr |= X86_TRAP_PF_US;
4092
4093 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4094 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4095 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4096 uErr |= X86_TRAP_PF_ID;
4097
4098#if 0 /* This is so much non-sense, really. Why was it done like that? */
4099 /* Note! RW access callers reporting a WRITE protection fault, will clear
4100 the READ flag before calling. So, read-modify-write accesses (RW)
4101 can safely be reported as READ faults. */
4102 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4103 uErr |= X86_TRAP_PF_RW;
4104#else
4105 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4106 {
4107 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4108 /// (regardless of outcome of the comparison in the latter case).
4109 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4110 uErr |= X86_TRAP_PF_RW;
4111 }
4112#endif
4113
4114 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4115 uErr, GCPtrWhere);
4116}
4117
4118#ifdef IEM_WITH_SETJMP
4119/** \#PF(n) - 0e, longjmp. */
4120DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4121{
4122 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4123}
4124#endif
4125
4126
4127/** \#MF(0) - 10. */
4128VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4129{
4130 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4131}
4132
4133
4134/** \#AC(0) - 11. */
4135VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4136{
4137 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4138}
4139
4140#ifdef IEM_WITH_SETJMP
4141/** \#AC(0) - 11, longjmp. */
4142DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4143{
4144 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4145}
4146#endif
4147
4148
4149/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4150IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4151{
4152 NOREF(cbInstr);
4153 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4154}
4155
4156
4157/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4158IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4159{
4160 NOREF(cbInstr);
4161 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4162}
4163
4164
4165/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4166IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4167{
4168 NOREF(cbInstr);
4169 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4170}
4171
4172
4173/** @} */
4174
4175/** @name Common opcode decoders.
4176 * @{
4177 */
4178//#include <iprt/mem.h>
4179
4180/**
4181 * Used to add extra details about a stub case.
4182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4183 */
4184void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4185{
4186#if defined(LOG_ENABLED) && defined(IN_RING3)
4187 PVM pVM = pVCpu->CTX_SUFF(pVM);
4188 char szRegs[4096];
4189 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4190 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4191 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4192 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4193 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4194 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4195 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4196 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4197 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4198 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4199 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4200 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4201 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4202 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4203 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4204 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4205 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4206 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4207 " efer=%016VR{efer}\n"
4208 " pat=%016VR{pat}\n"
4209 " sf_mask=%016VR{sf_mask}\n"
4210 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4211 " lstar=%016VR{lstar}\n"
4212 " star=%016VR{star} cstar=%016VR{cstar}\n"
4213 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4214 );
4215
4216 char szInstr[256];
4217 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4218 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4219 szInstr, sizeof(szInstr), NULL);
4220
4221 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4222#else
4223 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4224#endif
4225}
4226
4227/** @} */
4228
4229
4230
4231/** @name Register Access.
4232 * @{
4233 */
4234
4235/**
4236 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4237 *
4238 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4239 * segment limit.
4240 *
4241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4242 * @param offNextInstr The offset of the next instruction.
4243 */
4244VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4245{
4246 switch (pVCpu->iem.s.enmEffOpSize)
4247 {
4248 case IEMMODE_16BIT:
4249 {
4250 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4251 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4252 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4253 return iemRaiseGeneralProtectionFault0(pVCpu);
4254 pVCpu->cpum.GstCtx.rip = uNewIp;
4255 break;
4256 }
4257
4258 case IEMMODE_32BIT:
4259 {
4260 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4261 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4262
4263 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4264 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4265 return iemRaiseGeneralProtectionFault0(pVCpu);
4266 pVCpu->cpum.GstCtx.rip = uNewEip;
4267 break;
4268 }
4269
4270 case IEMMODE_64BIT:
4271 {
4272 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4273
4274 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4275 if (!IEM_IS_CANONICAL(uNewRip))
4276 return iemRaiseGeneralProtectionFault0(pVCpu);
4277 pVCpu->cpum.GstCtx.rip = uNewRip;
4278 break;
4279 }
4280
4281 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4282 }
4283
4284 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4285
4286#ifndef IEM_WITH_CODE_TLB
4287 /* Flush the prefetch buffer. */
4288 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4289#endif
4290
4291 return VINF_SUCCESS;
4292}
4293
4294
4295/**
4296 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4297 *
4298 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4299 * segment limit.
4300 *
4301 * @returns Strict VBox status code.
4302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4303 * @param offNextInstr The offset of the next instruction.
4304 */
4305VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4306{
4307 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4308
4309 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4310 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4311 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4312 return iemRaiseGeneralProtectionFault0(pVCpu);
4313 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4314 pVCpu->cpum.GstCtx.rip = uNewIp;
4315 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4316
4317#ifndef IEM_WITH_CODE_TLB
4318 /* Flush the prefetch buffer. */
4319 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4320#endif
4321
4322 return VINF_SUCCESS;
4323}
4324
4325
4326/**
4327 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4328 *
4329 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4330 * segment limit.
4331 *
4332 * @returns Strict VBox status code.
4333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4334 * @param offNextInstr The offset of the next instruction.
4335 */
4336VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4337{
4338 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4339
4340 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4341 {
4342 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4343
4344 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4345 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4346 return iemRaiseGeneralProtectionFault0(pVCpu);
4347 pVCpu->cpum.GstCtx.rip = uNewEip;
4348 }
4349 else
4350 {
4351 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4352
4353 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4354 if (!IEM_IS_CANONICAL(uNewRip))
4355 return iemRaiseGeneralProtectionFault0(pVCpu);
4356 pVCpu->cpum.GstCtx.rip = uNewRip;
4357 }
4358 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4359
4360#ifndef IEM_WITH_CODE_TLB
4361 /* Flush the prefetch buffer. */
4362 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4363#endif
4364
4365 return VINF_SUCCESS;
4366}
4367
4368
4369/**
4370 * Performs a near jump to the specified address.
4371 *
4372 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4373 * segment limit.
4374 *
4375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4376 * @param uNewRip The new RIP value.
4377 */
4378VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4379{
4380 switch (pVCpu->iem.s.enmEffOpSize)
4381 {
4382 case IEMMODE_16BIT:
4383 {
4384 Assert(uNewRip <= UINT16_MAX);
4385 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4386 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4387 return iemRaiseGeneralProtectionFault0(pVCpu);
4388 /** @todo Test 16-bit jump in 64-bit mode. */
4389 pVCpu->cpum.GstCtx.rip = uNewRip;
4390 break;
4391 }
4392
4393 case IEMMODE_32BIT:
4394 {
4395 Assert(uNewRip <= UINT32_MAX);
4396 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4397 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4398
4399 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4400 return iemRaiseGeneralProtectionFault0(pVCpu);
4401 pVCpu->cpum.GstCtx.rip = uNewRip;
4402 break;
4403 }
4404
4405 case IEMMODE_64BIT:
4406 {
4407 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4408
4409 if (!IEM_IS_CANONICAL(uNewRip))
4410 return iemRaiseGeneralProtectionFault0(pVCpu);
4411 pVCpu->cpum.GstCtx.rip = uNewRip;
4412 break;
4413 }
4414
4415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4416 }
4417
4418 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4419
4420#ifndef IEM_WITH_CODE_TLB
4421 /* Flush the prefetch buffer. */
4422 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4423#endif
4424
4425 return VINF_SUCCESS;
4426}
4427
4428/** @} */
4429
4430
4431/** @name FPU access and helpers.
4432 *
4433 * @{
4434 */
4435
4436/**
4437 * Updates the x87.DS and FPUDP registers.
4438 *
4439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4440 * @param pFpuCtx The FPU context.
4441 * @param iEffSeg The effective segment register.
4442 * @param GCPtrEff The effective address relative to @a iEffSeg.
4443 */
4444DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4445{
4446 RTSEL sel;
4447 switch (iEffSeg)
4448 {
4449 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4450 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4451 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4452 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4453 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4454 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4455 default:
4456 AssertMsgFailed(("%d\n", iEffSeg));
4457 sel = pVCpu->cpum.GstCtx.ds.Sel;
4458 }
4459 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4460 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4461 {
4462 pFpuCtx->DS = 0;
4463 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4464 }
4465 else if (!IEM_IS_LONG_MODE(pVCpu))
4466 {
4467 pFpuCtx->DS = sel;
4468 pFpuCtx->FPUDP = GCPtrEff;
4469 }
4470 else
4471 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4472}
4473
4474
4475/**
4476 * Rotates the stack registers in the push direction.
4477 *
4478 * @param pFpuCtx The FPU context.
4479 * @remarks This is a complete waste of time, but fxsave stores the registers in
4480 * stack order.
4481 */
4482DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4483{
4484 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4485 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4486 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4487 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4488 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4489 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4490 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4491 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4492 pFpuCtx->aRegs[0].r80 = r80Tmp;
4493}
4494
4495
4496/**
4497 * Rotates the stack registers in the pop direction.
4498 *
4499 * @param pFpuCtx The FPU context.
4500 * @remarks This is a complete waste of time, but fxsave stores the registers in
4501 * stack order.
4502 */
4503DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4504{
4505 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4506 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4507 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4508 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4509 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4510 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4511 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4512 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4513 pFpuCtx->aRegs[7].r80 = r80Tmp;
4514}
4515
4516
4517/**
4518 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4519 * exception prevents it.
4520 *
4521 * @param pResult The FPU operation result to push.
4522 * @param pFpuCtx The FPU context.
4523 */
4524static void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4525{
4526 /* Update FSW and bail if there are pending exceptions afterwards. */
4527 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4528 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4529 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4530 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4531 {
4532 pFpuCtx->FSW = fFsw;
4533 return;
4534 }
4535
4536 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4537 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4538 {
4539 /* All is fine, push the actual value. */
4540 pFpuCtx->FTW |= RT_BIT(iNewTop);
4541 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4542 }
4543 else if (pFpuCtx->FCW & X86_FCW_IM)
4544 {
4545 /* Masked stack overflow, push QNaN. */
4546 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4547 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4548 }
4549 else
4550 {
4551 /* Raise stack overflow, don't push anything. */
4552 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4553 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4554 return;
4555 }
4556
4557 fFsw &= ~X86_FSW_TOP_MASK;
4558 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4559 pFpuCtx->FSW = fFsw;
4560
4561 iemFpuRotateStackPush(pFpuCtx);
4562}
4563
4564
4565/**
4566 * Stores a result in a FPU register and updates the FSW and FTW.
4567 *
4568 * @param pFpuCtx The FPU context.
4569 * @param pResult The result to store.
4570 * @param iStReg Which FPU register to store it in.
4571 */
4572static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4573{
4574 Assert(iStReg < 8);
4575 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4576 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4577 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4578 pFpuCtx->FTW |= RT_BIT(iReg);
4579 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4580}
4581
4582
4583/**
4584 * Only updates the FPU status word (FSW) with the result of the current
4585 * instruction.
4586 *
4587 * @param pFpuCtx The FPU context.
4588 * @param u16FSW The FSW output of the current instruction.
4589 */
4590static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4591{
4592 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4593 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4594}
4595
4596
4597/**
4598 * Pops one item off the FPU stack if no pending exception prevents it.
4599 *
4600 * @param pFpuCtx The FPU context.
4601 */
4602static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4603{
4604 /* Check pending exceptions. */
4605 uint16_t uFSW = pFpuCtx->FSW;
4606 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4607 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4608 return;
4609
4610 /* TOP--. */
4611 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4612 uFSW &= ~X86_FSW_TOP_MASK;
4613 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4614 pFpuCtx->FSW = uFSW;
4615
4616 /* Mark the previous ST0 as empty. */
4617 iOldTop >>= X86_FSW_TOP_SHIFT;
4618 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4619
4620 /* Rotate the registers. */
4621 iemFpuRotateStackPop(pFpuCtx);
4622}
4623
4624
4625/**
4626 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4627 *
4628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4629 * @param pResult The FPU operation result to push.
4630 */
4631void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4632{
4633 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4635 iemFpuMaybePushResult(pResult, pFpuCtx);
4636}
4637
4638
4639/**
4640 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4641 * and sets FPUDP and FPUDS.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param pResult The FPU operation result to push.
4645 * @param iEffSeg The effective segment register.
4646 * @param GCPtrEff The effective address relative to @a iEffSeg.
4647 */
4648void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4649{
4650 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4651 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4652 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4653 iemFpuMaybePushResult(pResult, pFpuCtx);
4654}
4655
4656
4657/**
4658 * Replace ST0 with the first value and push the second onto the FPU stack,
4659 * unless a pending exception prevents it.
4660 *
4661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4662 * @param pResult The FPU operation result to store and push.
4663 */
4664void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4665{
4666 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4667 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4668
4669 /* Update FSW and bail if there are pending exceptions afterwards. */
4670 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4671 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4672 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4673 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4674 {
4675 pFpuCtx->FSW = fFsw;
4676 return;
4677 }
4678
4679 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4680 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4681 {
4682 /* All is fine, push the actual value. */
4683 pFpuCtx->FTW |= RT_BIT(iNewTop);
4684 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4685 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4686 }
4687 else if (pFpuCtx->FCW & X86_FCW_IM)
4688 {
4689 /* Masked stack overflow, push QNaN. */
4690 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4691 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4692 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4693 }
4694 else
4695 {
4696 /* Raise stack overflow, don't push anything. */
4697 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4699 return;
4700 }
4701
4702 fFsw &= ~X86_FSW_TOP_MASK;
4703 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4704 pFpuCtx->FSW = fFsw;
4705
4706 iemFpuRotateStackPush(pFpuCtx);
4707}
4708
4709
4710/**
4711 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4712 * FOP.
4713 *
4714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4715 * @param pResult The result to store.
4716 * @param iStReg Which FPU register to store it in.
4717 */
4718void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4719{
4720 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4721 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4722 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4723}
4724
4725
4726/**
4727 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4728 * FOP, and then pops the stack.
4729 *
4730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4731 * @param pResult The result to store.
4732 * @param iStReg Which FPU register to store it in.
4733 */
4734void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4735{
4736 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4737 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4738 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4739 iemFpuMaybePopOne(pFpuCtx);
4740}
4741
4742
4743/**
4744 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4745 * FPUDP, and FPUDS.
4746 *
4747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4748 * @param pResult The result to store.
4749 * @param iStReg Which FPU register to store it in.
4750 * @param iEffSeg The effective memory operand selector register.
4751 * @param GCPtrEff The effective memory operand offset.
4752 */
4753void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4754 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4755{
4756 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4757 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4758 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4759 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4760}
4761
4762
4763/**
4764 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4765 * FPUDP, and FPUDS, and then pops the stack.
4766 *
4767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4768 * @param pResult The result to store.
4769 * @param iStReg Which FPU register to store it in.
4770 * @param iEffSeg The effective memory operand selector register.
4771 * @param GCPtrEff The effective memory operand offset.
4772 */
4773void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4774 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4775{
4776 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4777 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4778 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4779 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4780 iemFpuMaybePopOne(pFpuCtx);
4781}
4782
4783
4784/**
4785 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4786 *
4787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4788 */
4789void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4790{
4791 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4792 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4793}
4794
4795
4796/**
4797 * Updates the FSW, FOP, FPUIP, and FPUCS.
4798 *
4799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4800 * @param u16FSW The FSW from the current instruction.
4801 */
4802void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4803{
4804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4805 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4806 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4807}
4808
4809
4810/**
4811 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4812 *
4813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4814 * @param u16FSW The FSW from the current instruction.
4815 */
4816void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4817{
4818 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4819 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4820 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4821 iemFpuMaybePopOne(pFpuCtx);
4822}
4823
4824
4825/**
4826 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4827 *
4828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4829 * @param u16FSW The FSW from the current instruction.
4830 * @param iEffSeg The effective memory operand selector register.
4831 * @param GCPtrEff The effective memory operand offset.
4832 */
4833void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4834{
4835 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4836 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4837 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4838 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4839}
4840
4841
4842/**
4843 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4844 *
4845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4846 * @param u16FSW The FSW from the current instruction.
4847 */
4848void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4849{
4850 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4851 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4852 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4853 iemFpuMaybePopOne(pFpuCtx);
4854 iemFpuMaybePopOne(pFpuCtx);
4855}
4856
4857
4858/**
4859 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4860 *
4861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4862 * @param u16FSW The FSW from the current instruction.
4863 * @param iEffSeg The effective memory operand selector register.
4864 * @param GCPtrEff The effective memory operand offset.
4865 */
4866void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4867{
4868 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4869 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4870 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4871 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4872 iemFpuMaybePopOne(pFpuCtx);
4873}
4874
4875
4876/**
4877 * Worker routine for raising an FPU stack underflow exception.
4878 *
4879 * @param pFpuCtx The FPU context.
4880 * @param iStReg The stack register being accessed.
4881 */
4882static void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
4883{
4884 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4885 if (pFpuCtx->FCW & X86_FCW_IM)
4886 {
4887 /* Masked underflow. */
4888 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4889 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4890 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4891 if (iStReg != UINT8_MAX)
4892 {
4893 pFpuCtx->FTW |= RT_BIT(iReg);
4894 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4895 }
4896 }
4897 else
4898 {
4899 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4900 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4901 }
4902}
4903
4904
4905/**
4906 * Raises a FPU stack underflow exception.
4907 *
4908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4909 * @param iStReg The destination register that should be loaded
4910 * with QNaN if \#IS is not masked. Specify
4911 * UINT8_MAX if none (like for fcom).
4912 */
4913void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4914{
4915 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4916 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4917 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4918}
4919
4920
4921void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4922{
4923 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4924 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4925 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4926 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4927}
4928
4929
4930void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4931{
4932 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4933 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4934 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4935 iemFpuMaybePopOne(pFpuCtx);
4936}
4937
4938
4939void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4940{
4941 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4942 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4943 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4944 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4945 iemFpuMaybePopOne(pFpuCtx);
4946}
4947
4948
4949void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
4950{
4951 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4952 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4953 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
4954 iemFpuMaybePopOne(pFpuCtx);
4955 iemFpuMaybePopOne(pFpuCtx);
4956}
4957
4958
4959void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
4960{
4961 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4962 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4963
4964 if (pFpuCtx->FCW & X86_FCW_IM)
4965 {
4966 /* Masked overflow - Push QNaN. */
4967 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4968 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4969 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4970 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4971 pFpuCtx->FTW |= RT_BIT(iNewTop);
4972 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4973 iemFpuRotateStackPush(pFpuCtx);
4974 }
4975 else
4976 {
4977 /* Exception pending - don't change TOP or the register stack. */
4978 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4979 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4980 }
4981}
4982
4983
4984void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
4985{
4986 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4987 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4988
4989 if (pFpuCtx->FCW & X86_FCW_IM)
4990 {
4991 /* Masked overflow - Push QNaN. */
4992 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4993 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4994 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4995 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4996 pFpuCtx->FTW |= RT_BIT(iNewTop);
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4998 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4999 iemFpuRotateStackPush(pFpuCtx);
5000 }
5001 else
5002 {
5003 /* Exception pending - don't change TOP or the register stack. */
5004 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5005 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5006 }
5007}
5008
5009
5010/**
5011 * Worker routine for raising an FPU stack overflow exception on a push.
5012 *
5013 * @param pFpuCtx The FPU context.
5014 */
5015static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5016{
5017 if (pFpuCtx->FCW & X86_FCW_IM)
5018 {
5019 /* Masked overflow. */
5020 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5021 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5022 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5023 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5024 pFpuCtx->FTW |= RT_BIT(iNewTop);
5025 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5026 iemFpuRotateStackPush(pFpuCtx);
5027 }
5028 else
5029 {
5030 /* Exception pending - don't change TOP or the register stack. */
5031 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5032 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5033 }
5034}
5035
5036
5037/**
5038 * Raises a FPU stack overflow exception on a push.
5039 *
5040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5041 */
5042void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5043{
5044 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5045 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5046 iemFpuStackPushOverflowOnly(pFpuCtx);
5047}
5048
5049
5050/**
5051 * Raises a FPU stack overflow exception on a push with a memory operand.
5052 *
5053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5054 * @param iEffSeg The effective memory operand selector register.
5055 * @param GCPtrEff The effective memory operand offset.
5056 */
5057void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5058{
5059 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5060 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5061 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5062 iemFpuStackPushOverflowOnly(pFpuCtx);
5063}
5064
5065/** @} */
5066
5067
5068/** @name Memory access.
5069 *
5070 * @{
5071 */
5072
5073
5074/**
5075 * Updates the IEMCPU::cbWritten counter if applicable.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param fAccess The access being accounted for.
5079 * @param cbMem The access size.
5080 */
5081DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5082{
5083 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5084 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5085 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5086}
5087
5088
5089/**
5090 * Applies the segment limit, base and attributes.
5091 *
5092 * This may raise a \#GP or \#SS.
5093 *
5094 * @returns VBox strict status code.
5095 *
5096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5097 * @param fAccess The kind of access which is being performed.
5098 * @param iSegReg The index of the segment register to apply.
5099 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5100 * TSS, ++).
5101 * @param cbMem The access size.
5102 * @param pGCPtrMem Pointer to the guest memory address to apply
5103 * segmentation to. Input and output parameter.
5104 */
5105VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5106{
5107 if (iSegReg == UINT8_MAX)
5108 return VINF_SUCCESS;
5109
5110 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5111 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5112 switch (pVCpu->iem.s.enmCpuMode)
5113 {
5114 case IEMMODE_16BIT:
5115 case IEMMODE_32BIT:
5116 {
5117 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5118 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5119
5120 if ( pSel->Attr.n.u1Present
5121 && !pSel->Attr.n.u1Unusable)
5122 {
5123 Assert(pSel->Attr.n.u1DescType);
5124 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5125 {
5126 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5127 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5128 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5129
5130 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5131 {
5132 /** @todo CPL check. */
5133 }
5134
5135 /*
5136 * There are two kinds of data selectors, normal and expand down.
5137 */
5138 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5139 {
5140 if ( GCPtrFirst32 > pSel->u32Limit
5141 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5142 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5143 }
5144 else
5145 {
5146 /*
5147 * The upper boundary is defined by the B bit, not the G bit!
5148 */
5149 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5150 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5151 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5152 }
5153 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5154 }
5155 else
5156 {
5157 /*
5158 * Code selector and usually be used to read thru, writing is
5159 * only permitted in real and V8086 mode.
5160 */
5161 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5162 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5163 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5164 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5165 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5166
5167 if ( GCPtrFirst32 > pSel->u32Limit
5168 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5169 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5170
5171 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5172 {
5173 /** @todo CPL check. */
5174 }
5175
5176 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5177 }
5178 }
5179 else
5180 return iemRaiseGeneralProtectionFault0(pVCpu);
5181 return VINF_SUCCESS;
5182 }
5183
5184 case IEMMODE_64BIT:
5185 {
5186 RTGCPTR GCPtrMem = *pGCPtrMem;
5187 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5188 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5189
5190 Assert(cbMem >= 1);
5191 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5192 return VINF_SUCCESS;
5193 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5194 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5195 return iemRaiseGeneralProtectionFault0(pVCpu);
5196 }
5197
5198 default:
5199 AssertFailedReturn(VERR_IEM_IPE_7);
5200 }
5201}
5202
5203
5204/**
5205 * Translates a virtual address to a physical physical address and checks if we
5206 * can access the page as specified.
5207 *
5208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5209 * @param GCPtrMem The virtual address.
5210 * @param fAccess The intended access.
5211 * @param pGCPhysMem Where to return the physical address.
5212 */
5213VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5214{
5215 /** @todo Need a different PGM interface here. We're currently using
5216 * generic / REM interfaces. this won't cut it for R0. */
5217 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5218 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5219 * here. */
5220 PGMPTWALK Walk;
5221 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5222 if (RT_FAILURE(rc))
5223 {
5224 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5225 /** @todo Check unassigned memory in unpaged mode. */
5226 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5227#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5228 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5229 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5230#endif
5231 *pGCPhysMem = NIL_RTGCPHYS;
5232 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5233 }
5234
5235 /* If the page is writable and does not have the no-exec bit set, all
5236 access is allowed. Otherwise we'll have to check more carefully... */
5237 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5238 {
5239 /* Write to read only memory? */
5240 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5241 && !(Walk.fEffective & X86_PTE_RW)
5242 && ( ( pVCpu->iem.s.uCpl == 3
5243 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5244 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5245 {
5246 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5247 *pGCPhysMem = NIL_RTGCPHYS;
5248#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5249 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5250 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5251#endif
5252 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5253 }
5254
5255 /* Kernel memory accessed by userland? */
5256 if ( !(Walk.fEffective & X86_PTE_US)
5257 && pVCpu->iem.s.uCpl == 3
5258 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5259 {
5260 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5261 *pGCPhysMem = NIL_RTGCPHYS;
5262#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5263 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5264 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5265#endif
5266 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5267 }
5268
5269 /* Executing non-executable memory? */
5270 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5271 && (Walk.fEffective & X86_PTE_PAE_NX)
5272 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5273 {
5274 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5275 *pGCPhysMem = NIL_RTGCPHYS;
5276#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5277 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5278 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5279#endif
5280 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5281 VERR_ACCESS_DENIED);
5282 }
5283 }
5284
5285 /*
5286 * Set the dirty / access flags.
5287 * ASSUMES this is set when the address is translated rather than on committ...
5288 */
5289 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5290 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5291 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5292 {
5293 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5294 AssertRC(rc2);
5295 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5296 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5297 }
5298
5299 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5300 *pGCPhysMem = GCPhys;
5301 return VINF_SUCCESS;
5302}
5303
5304
5305/**
5306 * Looks up a memory mapping entry.
5307 *
5308 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5310 * @param pvMem The memory address.
5311 * @param fAccess The access to.
5312 */
5313DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5314{
5315 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5316 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5317 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5318 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5319 return 0;
5320 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5321 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5322 return 1;
5323 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5324 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5325 return 2;
5326 return VERR_NOT_FOUND;
5327}
5328
5329
5330/**
5331 * Finds a free memmap entry when using iNextMapping doesn't work.
5332 *
5333 * @returns Memory mapping index, 1024 on failure.
5334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5335 */
5336static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5337{
5338 /*
5339 * The easy case.
5340 */
5341 if (pVCpu->iem.s.cActiveMappings == 0)
5342 {
5343 pVCpu->iem.s.iNextMapping = 1;
5344 return 0;
5345 }
5346
5347 /* There should be enough mappings for all instructions. */
5348 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5349
5350 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5351 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5352 return i;
5353
5354 AssertFailedReturn(1024);
5355}
5356
5357
5358/**
5359 * Commits a bounce buffer that needs writing back and unmaps it.
5360 *
5361 * @returns Strict VBox status code.
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param iMemMap The index of the buffer to commit.
5364 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5365 * Always false in ring-3, obviously.
5366 */
5367static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5368{
5369 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5370 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5371#ifdef IN_RING3
5372 Assert(!fPostponeFail);
5373 RT_NOREF_PV(fPostponeFail);
5374#endif
5375
5376 /*
5377 * Do the writing.
5378 */
5379 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5380 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5381 {
5382 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5383 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5384 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5385 if (!pVCpu->iem.s.fBypassHandlers)
5386 {
5387 /*
5388 * Carefully and efficiently dealing with access handler return
5389 * codes make this a little bloated.
5390 */
5391 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5393 pbBuf,
5394 cbFirst,
5395 PGMACCESSORIGIN_IEM);
5396 if (rcStrict == VINF_SUCCESS)
5397 {
5398 if (cbSecond)
5399 {
5400 rcStrict = PGMPhysWrite(pVM,
5401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5402 pbBuf + cbFirst,
5403 cbSecond,
5404 PGMACCESSORIGIN_IEM);
5405 if (rcStrict == VINF_SUCCESS)
5406 { /* nothing */ }
5407 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5408 {
5409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5412 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5413 }
5414#ifndef IN_RING3
5415 else if (fPostponeFail)
5416 {
5417 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5420 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5421 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5422 return iemSetPassUpStatus(pVCpu, rcStrict);
5423 }
5424#endif
5425 else
5426 {
5427 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5429 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5430 return rcStrict;
5431 }
5432 }
5433 }
5434 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5435 {
5436 if (!cbSecond)
5437 {
5438 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5440 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5441 }
5442 else
5443 {
5444 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5446 pbBuf + cbFirst,
5447 cbSecond,
5448 PGMACCESSORIGIN_IEM);
5449 if (rcStrict2 == VINF_SUCCESS)
5450 {
5451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5454 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5455 }
5456 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5457 {
5458 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5461 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5462 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5463 }
5464#ifndef IN_RING3
5465 else if (fPostponeFail)
5466 {
5467 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5470 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5471 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5472 return iemSetPassUpStatus(pVCpu, rcStrict);
5473 }
5474#endif
5475 else
5476 {
5477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5480 return rcStrict2;
5481 }
5482 }
5483 }
5484#ifndef IN_RING3
5485 else if (fPostponeFail)
5486 {
5487 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5490 if (!cbSecond)
5491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5492 else
5493 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5494 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5495 return iemSetPassUpStatus(pVCpu, rcStrict);
5496 }
5497#endif
5498 else
5499 {
5500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5503 return rcStrict;
5504 }
5505 }
5506 else
5507 {
5508 /*
5509 * No access handlers, much simpler.
5510 */
5511 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5512 if (RT_SUCCESS(rc))
5513 {
5514 if (cbSecond)
5515 {
5516 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5517 if (RT_SUCCESS(rc))
5518 { /* likely */ }
5519 else
5520 {
5521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5524 return rc;
5525 }
5526 }
5527 }
5528 else
5529 {
5530 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5532 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5533 return rc;
5534 }
5535 }
5536 }
5537
5538#if defined(IEM_LOG_MEMORY_WRITES)
5539 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5540 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5541 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5542 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5543 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5544 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5545
5546 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5547 g_cbIemWrote = cbWrote;
5548 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5549#endif
5550
5551 /*
5552 * Free the mapping entry.
5553 */
5554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5555 Assert(pVCpu->iem.s.cActiveMappings != 0);
5556 pVCpu->iem.s.cActiveMappings--;
5557 return VINF_SUCCESS;
5558}
5559
5560
5561/**
5562 * iemMemMap worker that deals with a request crossing pages.
5563 */
5564static VBOXSTRICTRC
5565iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5566{
5567 /*
5568 * Do the address translations.
5569 */
5570 RTGCPHYS GCPhysFirst;
5571 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5572 if (rcStrict != VINF_SUCCESS)
5573 return rcStrict;
5574
5575 RTGCPHYS GCPhysSecond;
5576 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5577 fAccess, &GCPhysSecond);
5578 if (rcStrict != VINF_SUCCESS)
5579 return rcStrict;
5580 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5581
5582 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5583
5584 /*
5585 * Read in the current memory content if it's a read, execute or partial
5586 * write access.
5587 */
5588 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5589 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5590 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5591
5592 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5593 {
5594 if (!pVCpu->iem.s.fBypassHandlers)
5595 {
5596 /*
5597 * Must carefully deal with access handler status codes here,
5598 * makes the code a bit bloated.
5599 */
5600 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5601 if (rcStrict == VINF_SUCCESS)
5602 {
5603 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5604 if (rcStrict == VINF_SUCCESS)
5605 { /*likely */ }
5606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5607 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5608 else
5609 {
5610 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5611 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5612 return rcStrict;
5613 }
5614 }
5615 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5616 {
5617 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5618 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5619 {
5620 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5621 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5622 }
5623 else
5624 {
5625 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5626 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5627 return rcStrict2;
5628 }
5629 }
5630 else
5631 {
5632 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5633 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5634 return rcStrict;
5635 }
5636 }
5637 else
5638 {
5639 /*
5640 * No informational status codes here, much more straight forward.
5641 */
5642 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5643 if (RT_SUCCESS(rc))
5644 {
5645 Assert(rc == VINF_SUCCESS);
5646 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5647 if (RT_SUCCESS(rc))
5648 Assert(rc == VINF_SUCCESS);
5649 else
5650 {
5651 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5652 return rc;
5653 }
5654 }
5655 else
5656 {
5657 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5658 return rc;
5659 }
5660 }
5661 }
5662#ifdef VBOX_STRICT
5663 else
5664 memset(pbBuf, 0xcc, cbMem);
5665 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5666 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5667#endif
5668
5669 /*
5670 * Commit the bounce buffer entry.
5671 */
5672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5673 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5674 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5675 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5676 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5677 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5678 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5679 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5680 pVCpu->iem.s.cActiveMappings++;
5681
5682 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5683 *ppvMem = pbBuf;
5684 return VINF_SUCCESS;
5685}
5686
5687
5688/**
5689 * iemMemMap woker that deals with iemMemPageMap failures.
5690 */
5691static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5692 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5693{
5694 /*
5695 * Filter out conditions we can handle and the ones which shouldn't happen.
5696 */
5697 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5698 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5699 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5700 {
5701 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5702 return rcMap;
5703 }
5704 pVCpu->iem.s.cPotentialExits++;
5705
5706 /*
5707 * Read in the current memory content if it's a read, execute or partial
5708 * write access.
5709 */
5710 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5711 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5712 {
5713 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5714 memset(pbBuf, 0xff, cbMem);
5715 else
5716 {
5717 int rc;
5718 if (!pVCpu->iem.s.fBypassHandlers)
5719 {
5720 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5721 if (rcStrict == VINF_SUCCESS)
5722 { /* nothing */ }
5723 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5724 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5725 else
5726 {
5727 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5728 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5729 return rcStrict;
5730 }
5731 }
5732 else
5733 {
5734 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5735 if (RT_SUCCESS(rc))
5736 { /* likely */ }
5737 else
5738 {
5739 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5740 GCPhysFirst, rc));
5741 return rc;
5742 }
5743 }
5744 }
5745 }
5746#ifdef VBOX_STRICT
5747 else
5748 memset(pbBuf, 0xcc, cbMem);
5749#endif
5750#ifdef VBOX_STRICT
5751 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5752 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5753#endif
5754
5755 /*
5756 * Commit the bounce buffer entry.
5757 */
5758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5759 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5761 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5762 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5763 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5764 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5765 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5766 pVCpu->iem.s.cActiveMappings++;
5767
5768 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5769 *ppvMem = pbBuf;
5770 return VINF_SUCCESS;
5771}
5772
5773
5774
5775/**
5776 * Maps the specified guest memory for the given kind of access.
5777 *
5778 * This may be using bounce buffering of the memory if it's crossing a page
5779 * boundary or if there is an access handler installed for any of it. Because
5780 * of lock prefix guarantees, we're in for some extra clutter when this
5781 * happens.
5782 *
5783 * This may raise a \#GP, \#SS, \#PF or \#AC.
5784 *
5785 * @returns VBox strict status code.
5786 *
5787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5788 * @param ppvMem Where to return the pointer to the mapped memory.
5789 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5790 * 8, 12, 16, 32 or 512. When used by string operations
5791 * it can be up to a page.
5792 * @param iSegReg The index of the segment register to use for this
5793 * access. The base and limits are checked. Use UINT8_MAX
5794 * to indicate that no segmentation is required (for IDT,
5795 * GDT and LDT accesses).
5796 * @param GCPtrMem The address of the guest memory.
5797 * @param fAccess How the memory is being accessed. The
5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5799 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5800 * when raising exceptions.
5801 * @param uAlignCtl Alignment control:
5802 * - Bits 15:0 is the alignment mask.
5803 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5804 * IEM_MEMMAP_F_ALIGN_SSE, and
5805 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5806 * Pass zero to skip alignment.
5807 */
5808VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5809 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5810{
5811 /*
5812 * Check the input and figure out which mapping entry to use.
5813 */
5814 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5815 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5816 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5817
5818 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5819 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5820 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5821 {
5822 iMemMap = iemMemMapFindFree(pVCpu);
5823 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5824 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5825 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5826 pVCpu->iem.s.aMemMappings[2].fAccess),
5827 VERR_IEM_IPE_9);
5828 }
5829
5830 /*
5831 * Map the memory, checking that we can actually access it. If something
5832 * slightly complicated happens, fall back on bounce buffering.
5833 */
5834 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5835 if (rcStrict == VINF_SUCCESS)
5836 { /* likely */ }
5837 else
5838 return rcStrict;
5839
5840 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5841 { /* likely */ }
5842 else
5843 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5844
5845 /*
5846 * Alignment check.
5847 */
5848 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5849 { /* likelyish */ }
5850 else
5851 {
5852 /* Misaligned access. */
5853 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5854 {
5855 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5856 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5857 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5858 {
5859 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5860
5861 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5862 return iemRaiseAlignmentCheckException(pVCpu);
5863 }
5864 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5865 && iemMemAreAlignmentChecksEnabled(pVCpu)
5866/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5867 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5868 )
5869 return iemRaiseAlignmentCheckException(pVCpu);
5870 else
5871 return iemRaiseGeneralProtectionFault0(pVCpu);
5872 }
5873 }
5874
5875#ifdef IEM_WITH_DATA_TLB
5876 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5877
5878 /*
5879 * Get the TLB entry for this page.
5880 */
5881 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5882 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5883 if (pTlbe->uTag == uTag)
5884 {
5885# ifdef VBOX_WITH_STATISTICS
5886 pVCpu->iem.s.DataTlb.cTlbHits++;
5887# endif
5888 }
5889 else
5890 {
5891 pVCpu->iem.s.DataTlb.cTlbMisses++;
5892 PGMPTWALK Walk;
5893 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5894 if (RT_FAILURE(rc))
5895 {
5896 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5897# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5898 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5899 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5900# endif
5901 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5902 }
5903
5904 Assert(Walk.fSucceeded);
5905 pTlbe->uTag = uTag;
5906 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5907 pTlbe->GCPhys = Walk.GCPhys;
5908 pTlbe->pbMappingR3 = NULL;
5909 }
5910
5911 /*
5912 * Check TLB page table level access flags.
5913 */
5914 /* If the page is either supervisor only or non-writable, we need to do
5915 more careful access checks. */
5916 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5917 {
5918 /* Write to read only memory? */
5919 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5920 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5921 && ( ( pVCpu->iem.s.uCpl == 3
5922 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5923 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5924 {
5925 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5926# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5927 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5928 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5929# endif
5930 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5931 }
5932
5933 /* Kernel memory accessed by userland? */
5934 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
5935 && pVCpu->iem.s.uCpl == 3
5936 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5937 {
5938 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5939# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5940 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5941 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5942# endif
5943 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5944 }
5945 }
5946
5947 /*
5948 * Set the dirty / access flags.
5949 * ASSUMES this is set when the address is translated rather than on commit...
5950 */
5951 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5952 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
5953 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
5954 {
5955 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5956 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5957 AssertRC(rc2);
5958 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5959 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5960 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
5961 }
5962
5963 /*
5964 * Look up the physical page info if necessary.
5965 */
5966 uint8_t *pbMem = NULL;
5967 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
5968# ifdef IN_RING3
5969 pbMem = pTlbe->pbMappingR3;
5970# else
5971 pbMem = NULL;
5972# endif
5973 else
5974 {
5975 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
5976 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
5977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
5978 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
5979 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
5980 { /* likely */ }
5981 else
5982 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
5983 pTlbe->pbMappingR3 = NULL;
5984 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
5985 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
5986 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
5987 &pbMem, &pTlbe->fFlagsAndPhysRev);
5988 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
5989# ifdef IN_RING3
5990 pTlbe->pbMappingR3 = pbMem;
5991# endif
5992 }
5993
5994 /*
5995 * Check the physical page level access and mapping.
5996 */
5997 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
5998 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
5999 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6000 { /* probably likely */ }
6001 else
6002 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6003 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6004 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6005 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6006 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6007 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6008
6009 if (pbMem)
6010 {
6011 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6012 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6013 fAccess |= IEM_ACCESS_NOT_LOCKED;
6014 }
6015 else
6016 {
6017 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6018 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6019 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6020 if (rcStrict != VINF_SUCCESS)
6021 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6022 }
6023
6024 void * const pvMem = pbMem;
6025
6026 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6027 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6028 if (fAccess & IEM_ACCESS_TYPE_READ)
6029 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6030
6031#else /* !IEM_WITH_DATA_TLB */
6032
6033 RTGCPHYS GCPhysFirst;
6034 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6035 if (rcStrict != VINF_SUCCESS)
6036 return rcStrict;
6037
6038 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6039 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6040 if (fAccess & IEM_ACCESS_TYPE_READ)
6041 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6042
6043 void *pvMem;
6044 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6045 if (rcStrict != VINF_SUCCESS)
6046 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6047
6048#endif /* !IEM_WITH_DATA_TLB */
6049
6050 /*
6051 * Fill in the mapping table entry.
6052 */
6053 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6054 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6055 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6056 pVCpu->iem.s.cActiveMappings += 1;
6057
6058 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6059 *ppvMem = pvMem;
6060
6061 return VINF_SUCCESS;
6062}
6063
6064
6065/**
6066 * Commits the guest memory if bounce buffered and unmaps it.
6067 *
6068 * @returns Strict VBox status code.
6069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6070 * @param pvMem The mapping.
6071 * @param fAccess The kind of access.
6072 */
6073VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6074{
6075 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6076 AssertReturn(iMemMap >= 0, iMemMap);
6077
6078 /* If it's bounce buffered, we may need to write back the buffer. */
6079 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6080 {
6081 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6082 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6083 }
6084 /* Otherwise unlock it. */
6085 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6086 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6087
6088 /* Free the entry. */
6089 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6090 Assert(pVCpu->iem.s.cActiveMappings != 0);
6091 pVCpu->iem.s.cActiveMappings--;
6092 return VINF_SUCCESS;
6093}
6094
6095#ifdef IEM_WITH_SETJMP
6096
6097/**
6098 * Maps the specified guest memory for the given kind of access, longjmp on
6099 * error.
6100 *
6101 * This may be using bounce buffering of the memory if it's crossing a page
6102 * boundary or if there is an access handler installed for any of it. Because
6103 * of lock prefix guarantees, we're in for some extra clutter when this
6104 * happens.
6105 *
6106 * This may raise a \#GP, \#SS, \#PF or \#AC.
6107 *
6108 * @returns Pointer to the mapped memory.
6109 *
6110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6111 * @param cbMem The number of bytes to map. This is usually 1,
6112 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6113 * string operations it can be up to a page.
6114 * @param iSegReg The index of the segment register to use for
6115 * this access. The base and limits are checked.
6116 * Use UINT8_MAX to indicate that no segmentation
6117 * is required (for IDT, GDT and LDT accesses).
6118 * @param GCPtrMem The address of the guest memory.
6119 * @param fAccess How the memory is being accessed. The
6120 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6121 * how to map the memory, while the
6122 * IEM_ACCESS_WHAT_XXX bit is used when raising
6123 * exceptions.
6124 * @param uAlignCtl Alignment control:
6125 * - Bits 15:0 is the alignment mask.
6126 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6127 * IEM_MEMMAP_F_ALIGN_SSE, and
6128 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6129 * Pass zero to skip alignment.
6130 */
6131void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6132 uint32_t uAlignCtl) RT_NOEXCEPT
6133{
6134 /*
6135 * Check the input, check segment access and adjust address
6136 * with segment base.
6137 */
6138 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6139 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6140 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6141
6142 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6143 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6144 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6145
6146 /*
6147 * Alignment check.
6148 */
6149 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6150 { /* likelyish */ }
6151 else
6152 {
6153 /* Misaligned access. */
6154 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6155 {
6156 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6157 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6158 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6159 {
6160 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6161
6162 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6163 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6164 }
6165 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6166 && iemMemAreAlignmentChecksEnabled(pVCpu)
6167/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6168 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6169 )
6170 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6171 else
6172 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6173 }
6174 }
6175
6176 /*
6177 * Figure out which mapping entry to use.
6178 */
6179 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6180 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6181 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6182 {
6183 iMemMap = iemMemMapFindFree(pVCpu);
6184 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6185 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6186 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6187 pVCpu->iem.s.aMemMappings[2].fAccess),
6188 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6189 }
6190
6191 /*
6192 * Crossing a page boundary?
6193 */
6194 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6195 { /* No (likely). */ }
6196 else
6197 {
6198 void *pvMem;
6199 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6200 if (rcStrict == VINF_SUCCESS)
6201 return pvMem;
6202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6203 }
6204
6205#ifdef IEM_WITH_DATA_TLB
6206 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6207
6208 /*
6209 * Get the TLB entry for this page.
6210 */
6211 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6212 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6213 if (pTlbe->uTag == uTag)
6214 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6215 else
6216 {
6217 pVCpu->iem.s.DataTlb.cTlbMisses++;
6218 PGMPTWALK Walk;
6219 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6220 if (RT_FAILURE(rc))
6221 {
6222 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6223# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6224 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6225 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6226# endif
6227 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6228 }
6229
6230 Assert(Walk.fSucceeded);
6231 pTlbe->uTag = uTag;
6232 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6233 pTlbe->GCPhys = Walk.GCPhys;
6234 pTlbe->pbMappingR3 = NULL;
6235 }
6236
6237 /*
6238 * Check the flags and physical revision.
6239 */
6240 /** @todo make the caller pass these in with fAccess. */
6241 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6242 ? IEMTLBE_F_PT_NO_USER : 0;
6243 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6244 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6245 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6246 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6247 ? IEMTLBE_F_PT_NO_WRITE : 0)
6248 : 0;
6249 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6250 uint8_t *pbMem = NULL;
6251 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6252 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6253# ifdef IN_RING3
6254 pbMem = pTlbe->pbMappingR3;
6255# else
6256 pbMem = NULL;
6257# endif
6258 else
6259 {
6260 /*
6261 * Okay, something isn't quite right or needs refreshing.
6262 */
6263 /* Write to read only memory? */
6264 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6265 {
6266 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6267# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6268 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6269 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6270# endif
6271 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6272 }
6273
6274 /* Kernel memory accessed by userland? */
6275 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6276 {
6277 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6278# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6279 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6280 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6281# endif
6282 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6283 }
6284
6285 /* Set the dirty / access flags.
6286 ASSUMES this is set when the address is translated rather than on commit... */
6287 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6288 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6289 {
6290 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6291 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6292 AssertRC(rc2);
6293 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6294 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6295 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6296 }
6297
6298 /*
6299 * Check if the physical page info needs updating.
6300 */
6301 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6302# ifdef IN_RING3
6303 pbMem = pTlbe->pbMappingR3;
6304# else
6305 pbMem = NULL;
6306# endif
6307 else
6308 {
6309 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6310 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6311 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6312 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6313 pTlbe->pbMappingR3 = NULL;
6314 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6315 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6316 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6317 &pbMem, &pTlbe->fFlagsAndPhysRev);
6318 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6319# ifdef IN_RING3
6320 pTlbe->pbMappingR3 = pbMem;
6321# endif
6322 }
6323
6324 /*
6325 * Check the physical page level access and mapping.
6326 */
6327 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6328 { /* probably likely */ }
6329 else
6330 {
6331 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6332 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6333 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6334 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6335 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6336 if (rcStrict == VINF_SUCCESS)
6337 return pbMem;
6338 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6339 }
6340 }
6341 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6342
6343 if (pbMem)
6344 {
6345 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6346 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6347 fAccess |= IEM_ACCESS_NOT_LOCKED;
6348 }
6349 else
6350 {
6351 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6352 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6353 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6354 if (rcStrict == VINF_SUCCESS)
6355 return pbMem;
6356 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6357 }
6358
6359 void * const pvMem = pbMem;
6360
6361 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6362 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6363 if (fAccess & IEM_ACCESS_TYPE_READ)
6364 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6365
6366#else /* !IEM_WITH_DATA_TLB */
6367
6368
6369 RTGCPHYS GCPhysFirst;
6370 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6371 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6372 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6373
6374 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6375 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6376 if (fAccess & IEM_ACCESS_TYPE_READ)
6377 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6378
6379 void *pvMem;
6380 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6381 if (rcStrict == VINF_SUCCESS)
6382 { /* likely */ }
6383 else
6384 {
6385 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6386 if (rcStrict == VINF_SUCCESS)
6387 return pvMem;
6388 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6389 }
6390
6391#endif /* !IEM_WITH_DATA_TLB */
6392
6393 /*
6394 * Fill in the mapping table entry.
6395 */
6396 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6397 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6398 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6399 pVCpu->iem.s.cActiveMappings++;
6400
6401 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6402 return pvMem;
6403}
6404
6405
6406/**
6407 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6408 *
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param pvMem The mapping.
6411 * @param fAccess The kind of access.
6412 */
6413void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6414{
6415 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6416 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6417
6418 /* If it's bounce buffered, we may need to write back the buffer. */
6419 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6420 {
6421 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6422 {
6423 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6424 if (rcStrict == VINF_SUCCESS)
6425 return;
6426 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6427 }
6428 }
6429 /* Otherwise unlock it. */
6430 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6431 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6432
6433 /* Free the entry. */
6434 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6435 Assert(pVCpu->iem.s.cActiveMappings != 0);
6436 pVCpu->iem.s.cActiveMappings--;
6437}
6438
6439#endif /* IEM_WITH_SETJMP */
6440
6441#ifndef IN_RING3
6442/**
6443 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6444 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6445 *
6446 * Allows the instruction to be completed and retired, while the IEM user will
6447 * return to ring-3 immediately afterwards and do the postponed writes there.
6448 *
6449 * @returns VBox status code (no strict statuses). Caller must check
6450 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6452 * @param pvMem The mapping.
6453 * @param fAccess The kind of access.
6454 */
6455VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6456{
6457 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6458 AssertReturn(iMemMap >= 0, iMemMap);
6459
6460 /* If it's bounce buffered, we may need to write back the buffer. */
6461 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6462 {
6463 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6464 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6465 }
6466 /* Otherwise unlock it. */
6467 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6468 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6469
6470 /* Free the entry. */
6471 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6472 Assert(pVCpu->iem.s.cActiveMappings != 0);
6473 pVCpu->iem.s.cActiveMappings--;
6474 return VINF_SUCCESS;
6475}
6476#endif
6477
6478
6479/**
6480 * Rollbacks mappings, releasing page locks and such.
6481 *
6482 * The caller shall only call this after checking cActiveMappings.
6483 *
6484 * @returns Strict VBox status code to pass up.
6485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6486 */
6487void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6488{
6489 Assert(pVCpu->iem.s.cActiveMappings > 0);
6490
6491 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6492 while (iMemMap-- > 0)
6493 {
6494 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6495 if (fAccess != IEM_ACCESS_INVALID)
6496 {
6497 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6498 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6499 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6500 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6501 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6502 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6503 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6505 pVCpu->iem.s.cActiveMappings--;
6506 }
6507 }
6508}
6509
6510
6511/**
6512 * Fetches a data byte.
6513 *
6514 * @returns Strict VBox status code.
6515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6516 * @param pu8Dst Where to return the byte.
6517 * @param iSegReg The index of the segment register to use for
6518 * this access. The base and limits are checked.
6519 * @param GCPtrMem The address of the guest memory.
6520 */
6521VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6522{
6523 /* The lazy approach for now... */
6524 uint8_t const *pu8Src;
6525 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6526 if (rc == VINF_SUCCESS)
6527 {
6528 *pu8Dst = *pu8Src;
6529 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6530 }
6531 return rc;
6532}
6533
6534
6535#ifdef IEM_WITH_SETJMP
6536/**
6537 * Fetches a data byte, longjmp on error.
6538 *
6539 * @returns The byte.
6540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6541 * @param iSegReg The index of the segment register to use for
6542 * this access. The base and limits are checked.
6543 * @param GCPtrMem The address of the guest memory.
6544 */
6545uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6546{
6547 /* The lazy approach for now... */
6548 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6549 uint8_t const bRet = *pu8Src;
6550 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6551 return bRet;
6552}
6553#endif /* IEM_WITH_SETJMP */
6554
6555
6556/**
6557 * Fetches a data word.
6558 *
6559 * @returns Strict VBox status code.
6560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6561 * @param pu16Dst Where to return the word.
6562 * @param iSegReg The index of the segment register to use for
6563 * this access. The base and limits are checked.
6564 * @param GCPtrMem The address of the guest memory.
6565 */
6566VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6567{
6568 /* The lazy approach for now... */
6569 uint16_t const *pu16Src;
6570 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6571 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6572 if (rc == VINF_SUCCESS)
6573 {
6574 *pu16Dst = *pu16Src;
6575 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6576 }
6577 return rc;
6578}
6579
6580
6581#ifdef IEM_WITH_SETJMP
6582/**
6583 * Fetches a data word, longjmp on error.
6584 *
6585 * @returns The word
6586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6587 * @param iSegReg The index of the segment register to use for
6588 * this access. The base and limits are checked.
6589 * @param GCPtrMem The address of the guest memory.
6590 */
6591uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6592{
6593 /* The lazy approach for now... */
6594 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6595 sizeof(*pu16Src) - 1);
6596 uint16_t const u16Ret = *pu16Src;
6597 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6598 return u16Ret;
6599}
6600#endif
6601
6602
6603/**
6604 * Fetches a data dword.
6605 *
6606 * @returns Strict VBox status code.
6607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6608 * @param pu32Dst Where to return the dword.
6609 * @param iSegReg The index of the segment register to use for
6610 * this access. The base and limits are checked.
6611 * @param GCPtrMem The address of the guest memory.
6612 */
6613VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6614{
6615 /* The lazy approach for now... */
6616 uint32_t const *pu32Src;
6617 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6618 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6619 if (rc == VINF_SUCCESS)
6620 {
6621 *pu32Dst = *pu32Src;
6622 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6623 }
6624 return rc;
6625}
6626
6627
6628/**
6629 * Fetches a data dword and zero extends it to a qword.
6630 *
6631 * @returns Strict VBox status code.
6632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6633 * @param pu64Dst Where to return the qword.
6634 * @param iSegReg The index of the segment register to use for
6635 * this access. The base and limits are checked.
6636 * @param GCPtrMem The address of the guest memory.
6637 */
6638VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6639{
6640 /* The lazy approach for now... */
6641 uint32_t const *pu32Src;
6642 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6643 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6644 if (rc == VINF_SUCCESS)
6645 {
6646 *pu64Dst = *pu32Src;
6647 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6648 }
6649 return rc;
6650}
6651
6652
6653#ifdef IEM_WITH_SETJMP
6654
6655/**
6656 * Fetches a data dword, longjmp on error, fallback/safe version.
6657 *
6658 * @returns The dword
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param iSegReg The index of the segment register to use for
6661 * this access. The base and limits are checked.
6662 * @param GCPtrMem The address of the guest memory.
6663 */
6664uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6665{
6666 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6667 sizeof(*pu32Src) - 1);
6668 uint32_t const u32Ret = *pu32Src;
6669 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6670 return u32Ret;
6671}
6672
6673
6674/**
6675 * Fetches a data dword, longjmp on error.
6676 *
6677 * @returns The dword
6678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6679 * @param iSegReg The index of the segment register to use for
6680 * this access. The base and limits are checked.
6681 * @param GCPtrMem The address of the guest memory.
6682 */
6683uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6684{
6685# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6686 /*
6687 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6688 */
6689 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6690 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6691 {
6692 /*
6693 * TLB lookup.
6694 */
6695 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6696 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6697 if (pTlbe->uTag == uTag)
6698 {
6699 /*
6700 * Check TLB page table level access flags.
6701 */
6702 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6703 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6704 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6705 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6706 {
6707 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6708
6709 /*
6710 * Alignment check:
6711 */
6712 /** @todo check priority \#AC vs \#PF */
6713 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6714 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6715 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6716 || pVCpu->iem.s.uCpl != 3)
6717 {
6718 /*
6719 * Fetch and return the dword
6720 */
6721 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6722 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6723 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6724 }
6725 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6726 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6727 }
6728 }
6729 }
6730
6731 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6732 outdated page pointer, or other troubles. */
6733 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6734 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6735
6736# else
6737 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6738 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6739 uint32_t const u32Ret = *pu32Src;
6740 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6741 return u32Ret;
6742# endif
6743}
6744#endif
6745
6746
6747#ifdef SOME_UNUSED_FUNCTION
6748/**
6749 * Fetches a data dword and sign extends it to a qword.
6750 *
6751 * @returns Strict VBox status code.
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 * @param pu64Dst Where to return the sign extended value.
6754 * @param iSegReg The index of the segment register to use for
6755 * this access. The base and limits are checked.
6756 * @param GCPtrMem The address of the guest memory.
6757 */
6758VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6759{
6760 /* The lazy approach for now... */
6761 int32_t const *pi32Src;
6762 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6763 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6764 if (rc == VINF_SUCCESS)
6765 {
6766 *pu64Dst = *pi32Src;
6767 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6768 }
6769#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6770 else
6771 *pu64Dst = 0;
6772#endif
6773 return rc;
6774}
6775#endif
6776
6777
6778/**
6779 * Fetches a data qword.
6780 *
6781 * @returns Strict VBox status code.
6782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6783 * @param pu64Dst Where to return the qword.
6784 * @param iSegReg The index of the segment register to use for
6785 * this access. The base and limits are checked.
6786 * @param GCPtrMem The address of the guest memory.
6787 */
6788VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6789{
6790 /* The lazy approach for now... */
6791 uint64_t const *pu64Src;
6792 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6793 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6794 if (rc == VINF_SUCCESS)
6795 {
6796 *pu64Dst = *pu64Src;
6797 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6798 }
6799 return rc;
6800}
6801
6802
6803#ifdef IEM_WITH_SETJMP
6804/**
6805 * Fetches a data qword, longjmp on error.
6806 *
6807 * @returns The qword.
6808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6809 * @param iSegReg The index of the segment register to use for
6810 * this access. The base and limits are checked.
6811 * @param GCPtrMem The address of the guest memory.
6812 */
6813uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6814{
6815 /* The lazy approach for now... */
6816 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6817 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6818 uint64_t const u64Ret = *pu64Src;
6819 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6820 return u64Ret;
6821}
6822#endif
6823
6824
6825/**
6826 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6827 *
6828 * @returns Strict VBox status code.
6829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6830 * @param pu64Dst Where to return the qword.
6831 * @param iSegReg The index of the segment register to use for
6832 * this access. The base and limits are checked.
6833 * @param GCPtrMem The address of the guest memory.
6834 */
6835VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6836{
6837 /* The lazy approach for now... */
6838 uint64_t const *pu64Src;
6839 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6840 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6841 if (rc == VINF_SUCCESS)
6842 {
6843 *pu64Dst = *pu64Src;
6844 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6845 }
6846 return rc;
6847}
6848
6849
6850#ifdef IEM_WITH_SETJMP
6851/**
6852 * Fetches a data qword, longjmp on error.
6853 *
6854 * @returns The qword.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param iSegReg The index of the segment register to use for
6857 * this access. The base and limits are checked.
6858 * @param GCPtrMem The address of the guest memory.
6859 */
6860uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6861{
6862 /* The lazy approach for now... */
6863 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6864 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6865 uint64_t const u64Ret = *pu64Src;
6866 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6867 return u64Ret;
6868}
6869#endif
6870
6871
6872/**
6873 * Fetches a data tword.
6874 *
6875 * @returns Strict VBox status code.
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 * @param pr80Dst Where to return the tword.
6878 * @param iSegReg The index of the segment register to use for
6879 * this access. The base and limits are checked.
6880 * @param GCPtrMem The address of the guest memory.
6881 */
6882VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6883{
6884 /* The lazy approach for now... */
6885 PCRTFLOAT80U pr80Src;
6886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
6887 if (rc == VINF_SUCCESS)
6888 {
6889 *pr80Dst = *pr80Src;
6890 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6891 }
6892 return rc;
6893}
6894
6895
6896#ifdef IEM_WITH_SETJMP
6897/**
6898 * Fetches a data tword, longjmp on error.
6899 *
6900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6901 * @param pr80Dst Where to return the tword.
6902 * @param iSegReg The index of the segment register to use for
6903 * this access. The base and limits are checked.
6904 * @param GCPtrMem The address of the guest memory.
6905 */
6906void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6907{
6908 /* The lazy approach for now... */
6909 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
6910 *pr80Dst = *pr80Src;
6911 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6912}
6913#endif
6914
6915
6916/**
6917 * Fetches a data decimal tword.
6918 *
6919 * @returns Strict VBox status code.
6920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6921 * @param pd80Dst Where to return the tword.
6922 * @param iSegReg The index of the segment register to use for
6923 * this access. The base and limits are checked.
6924 * @param GCPtrMem The address of the guest memory.
6925 */
6926VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6927{
6928 /* The lazy approach for now... */
6929 PCRTPBCD80U pd80Src;
6930 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
6931 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
6932 if (rc == VINF_SUCCESS)
6933 {
6934 *pd80Dst = *pd80Src;
6935 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6936 }
6937 return rc;
6938}
6939
6940
6941#ifdef IEM_WITH_SETJMP
6942/**
6943 * Fetches a data decimal tword, longjmp on error.
6944 *
6945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6946 * @param pd80Dst Where to return the tword.
6947 * @param iSegReg The index of the segment register to use for
6948 * this access. The base and limits are checked.
6949 * @param GCPtrMem The address of the guest memory.
6950 */
6951void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6952{
6953 /* The lazy approach for now... */
6954 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
6955 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
6956 *pd80Dst = *pd80Src;
6957 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6958}
6959#endif
6960
6961
6962/**
6963 * Fetches a data dqword (double qword), generally SSE related.
6964 *
6965 * @returns Strict VBox status code.
6966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6967 * @param pu128Dst Where to return the qword.
6968 * @param iSegReg The index of the segment register to use for
6969 * this access. The base and limits are checked.
6970 * @param GCPtrMem The address of the guest memory.
6971 */
6972VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6973{
6974 /* The lazy approach for now... */
6975 PCRTUINT128U pu128Src;
6976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
6977 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
6978 if (rc == VINF_SUCCESS)
6979 {
6980 pu128Dst->au64[0] = pu128Src->au64[0];
6981 pu128Dst->au64[1] = pu128Src->au64[1];
6982 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6983 }
6984 return rc;
6985}
6986
6987
6988#ifdef IEM_WITH_SETJMP
6989/**
6990 * Fetches a data dqword (double qword), generally SSE related.
6991 *
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param pu128Dst Where to return the qword.
6994 * @param iSegReg The index of the segment register to use for
6995 * this access. The base and limits are checked.
6996 * @param GCPtrMem The address of the guest memory.
6997 */
6998void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6999{
7000 /* The lazy approach for now... */
7001 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7002 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7003 pu128Dst->au64[0] = pu128Src->au64[0];
7004 pu128Dst->au64[1] = pu128Src->au64[1];
7005 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7006}
7007#endif
7008
7009
7010/**
7011 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7012 * related.
7013 *
7014 * Raises \#GP(0) if not aligned.
7015 *
7016 * @returns Strict VBox status code.
7017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7018 * @param pu128Dst Where to return the qword.
7019 * @param iSegReg The index of the segment register to use for
7020 * this access. The base and limits are checked.
7021 * @param GCPtrMem The address of the guest memory.
7022 */
7023VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7024{
7025 /* The lazy approach for now... */
7026 PCRTUINT128U pu128Src;
7027 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7028 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7029 if (rc == VINF_SUCCESS)
7030 {
7031 pu128Dst->au64[0] = pu128Src->au64[0];
7032 pu128Dst->au64[1] = pu128Src->au64[1];
7033 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7034 }
7035 return rc;
7036}
7037
7038
7039#ifdef IEM_WITH_SETJMP
7040/**
7041 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7042 * related, longjmp on error.
7043 *
7044 * Raises \#GP(0) if not aligned.
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param pu128Dst Where to return the qword.
7048 * @param iSegReg The index of the segment register to use for
7049 * this access. The base and limits are checked.
7050 * @param GCPtrMem The address of the guest memory.
7051 */
7052void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7053{
7054 /* The lazy approach for now... */
7055 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7056 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7057 pu128Dst->au64[0] = pu128Src->au64[0];
7058 pu128Dst->au64[1] = pu128Src->au64[1];
7059 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7060}
7061#endif
7062
7063
7064/**
7065 * Fetches a data oword (octo word), generally AVX related.
7066 *
7067 * @returns Strict VBox status code.
7068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7069 * @param pu256Dst Where to return the qword.
7070 * @param iSegReg The index of the segment register to use for
7071 * this access. The base and limits are checked.
7072 * @param GCPtrMem The address of the guest memory.
7073 */
7074VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7075{
7076 /* The lazy approach for now... */
7077 PCRTUINT256U pu256Src;
7078 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7079 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7080 if (rc == VINF_SUCCESS)
7081 {
7082 pu256Dst->au64[0] = pu256Src->au64[0];
7083 pu256Dst->au64[1] = pu256Src->au64[1];
7084 pu256Dst->au64[2] = pu256Src->au64[2];
7085 pu256Dst->au64[3] = pu256Src->au64[3];
7086 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7087 }
7088 return rc;
7089}
7090
7091
7092#ifdef IEM_WITH_SETJMP
7093/**
7094 * Fetches a data oword (octo word), generally AVX related.
7095 *
7096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7097 * @param pu256Dst Where to return the qword.
7098 * @param iSegReg The index of the segment register to use for
7099 * this access. The base and limits are checked.
7100 * @param GCPtrMem The address of the guest memory.
7101 */
7102void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7103{
7104 /* The lazy approach for now... */
7105 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7106 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7107 pu256Dst->au64[0] = pu256Src->au64[0];
7108 pu256Dst->au64[1] = pu256Src->au64[1];
7109 pu256Dst->au64[2] = pu256Src->au64[2];
7110 pu256Dst->au64[3] = pu256Src->au64[3];
7111 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7112}
7113#endif
7114
7115
7116/**
7117 * Fetches a data oword (octo word) at an aligned address, generally AVX
7118 * related.
7119 *
7120 * Raises \#GP(0) if not aligned.
7121 *
7122 * @returns Strict VBox status code.
7123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7124 * @param pu256Dst Where to return the qword.
7125 * @param iSegReg The index of the segment register to use for
7126 * this access. The base and limits are checked.
7127 * @param GCPtrMem The address of the guest memory.
7128 */
7129VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7130{
7131 /* The lazy approach for now... */
7132 PCRTUINT256U pu256Src;
7133 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7134 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7135 if (rc == VINF_SUCCESS)
7136 {
7137 pu256Dst->au64[0] = pu256Src->au64[0];
7138 pu256Dst->au64[1] = pu256Src->au64[1];
7139 pu256Dst->au64[2] = pu256Src->au64[2];
7140 pu256Dst->au64[3] = pu256Src->au64[3];
7141 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7142 }
7143 return rc;
7144}
7145
7146
7147#ifdef IEM_WITH_SETJMP
7148/**
7149 * Fetches a data oword (octo word) at an aligned address, generally AVX
7150 * related, longjmp on error.
7151 *
7152 * Raises \#GP(0) if not aligned.
7153 *
7154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7155 * @param pu256Dst Where to return the qword.
7156 * @param iSegReg The index of the segment register to use for
7157 * this access. The base and limits are checked.
7158 * @param GCPtrMem The address of the guest memory.
7159 */
7160void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7161{
7162 /* The lazy approach for now... */
7163 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7164 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7165 pu256Dst->au64[0] = pu256Src->au64[0];
7166 pu256Dst->au64[1] = pu256Src->au64[1];
7167 pu256Dst->au64[2] = pu256Src->au64[2];
7168 pu256Dst->au64[3] = pu256Src->au64[3];
7169 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7170}
7171#endif
7172
7173
7174
7175/**
7176 * Fetches a descriptor register (lgdt, lidt).
7177 *
7178 * @returns Strict VBox status code.
7179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7180 * @param pcbLimit Where to return the limit.
7181 * @param pGCPtrBase Where to return the base.
7182 * @param iSegReg The index of the segment register to use for
7183 * this access. The base and limits are checked.
7184 * @param GCPtrMem The address of the guest memory.
7185 * @param enmOpSize The effective operand size.
7186 */
7187VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7188 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7189{
7190 /*
7191 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7192 * little special:
7193 * - The two reads are done separately.
7194 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7195 * - We suspect the 386 to actually commit the limit before the base in
7196 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7197 * don't try emulate this eccentric behavior, because it's not well
7198 * enough understood and rather hard to trigger.
7199 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7200 */
7201 VBOXSTRICTRC rcStrict;
7202 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7203 {
7204 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7205 if (rcStrict == VINF_SUCCESS)
7206 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7207 }
7208 else
7209 {
7210 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7211 if (enmOpSize == IEMMODE_32BIT)
7212 {
7213 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7214 {
7215 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7216 if (rcStrict == VINF_SUCCESS)
7217 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7218 }
7219 else
7220 {
7221 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7222 if (rcStrict == VINF_SUCCESS)
7223 {
7224 *pcbLimit = (uint16_t)uTmp;
7225 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7226 }
7227 }
7228 if (rcStrict == VINF_SUCCESS)
7229 *pGCPtrBase = uTmp;
7230 }
7231 else
7232 {
7233 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7234 if (rcStrict == VINF_SUCCESS)
7235 {
7236 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7237 if (rcStrict == VINF_SUCCESS)
7238 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7239 }
7240 }
7241 }
7242 return rcStrict;
7243}
7244
7245
7246
7247/**
7248 * Stores a data byte.
7249 *
7250 * @returns Strict VBox status code.
7251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7252 * @param iSegReg The index of the segment register to use for
7253 * this access. The base and limits are checked.
7254 * @param GCPtrMem The address of the guest memory.
7255 * @param u8Value The value to store.
7256 */
7257VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7258{
7259 /* The lazy approach for now... */
7260 uint8_t *pu8Dst;
7261 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7262 if (rc == VINF_SUCCESS)
7263 {
7264 *pu8Dst = u8Value;
7265 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7266 }
7267 return rc;
7268}
7269
7270
7271#ifdef IEM_WITH_SETJMP
7272/**
7273 * Stores a data byte, longjmp on error.
7274 *
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param iSegReg The index of the segment register to use for
7277 * this access. The base and limits are checked.
7278 * @param GCPtrMem The address of the guest memory.
7279 * @param u8Value The value to store.
7280 */
7281void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7282{
7283 /* The lazy approach for now... */
7284 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7285 *pu8Dst = u8Value;
7286 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7287}
7288#endif
7289
7290
7291/**
7292 * Stores a data word.
7293 *
7294 * @returns Strict VBox status code.
7295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7296 * @param iSegReg The index of the segment register to use for
7297 * this access. The base and limits are checked.
7298 * @param GCPtrMem The address of the guest memory.
7299 * @param u16Value The value to store.
7300 */
7301VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7302{
7303 /* The lazy approach for now... */
7304 uint16_t *pu16Dst;
7305 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7306 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7307 if (rc == VINF_SUCCESS)
7308 {
7309 *pu16Dst = u16Value;
7310 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7311 }
7312 return rc;
7313}
7314
7315
7316#ifdef IEM_WITH_SETJMP
7317/**
7318 * Stores a data word, longjmp on error.
7319 *
7320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7321 * @param iSegReg The index of the segment register to use for
7322 * this access. The base and limits are checked.
7323 * @param GCPtrMem The address of the guest memory.
7324 * @param u16Value The value to store.
7325 */
7326void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7327{
7328 /* The lazy approach for now... */
7329 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7330 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7331 *pu16Dst = u16Value;
7332 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7333}
7334#endif
7335
7336
7337/**
7338 * Stores a data dword.
7339 *
7340 * @returns Strict VBox status code.
7341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7342 * @param iSegReg The index of the segment register to use for
7343 * this access. The base and limits are checked.
7344 * @param GCPtrMem The address of the guest memory.
7345 * @param u32Value The value to store.
7346 */
7347VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7348{
7349 /* The lazy approach for now... */
7350 uint32_t *pu32Dst;
7351 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7352 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7353 if (rc == VINF_SUCCESS)
7354 {
7355 *pu32Dst = u32Value;
7356 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7357 }
7358 return rc;
7359}
7360
7361
7362#ifdef IEM_WITH_SETJMP
7363/**
7364 * Stores a data dword.
7365 *
7366 * @returns Strict VBox status code.
7367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 * @param u32Value The value to store.
7372 */
7373void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7374{
7375 /* The lazy approach for now... */
7376 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7377 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7378 *pu32Dst = u32Value;
7379 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7380}
7381#endif
7382
7383
7384/**
7385 * Stores a data qword.
7386 *
7387 * @returns Strict VBox status code.
7388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7389 * @param iSegReg The index of the segment register to use for
7390 * this access. The base and limits are checked.
7391 * @param GCPtrMem The address of the guest memory.
7392 * @param u64Value The value to store.
7393 */
7394VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7395{
7396 /* The lazy approach for now... */
7397 uint64_t *pu64Dst;
7398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7399 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7400 if (rc == VINF_SUCCESS)
7401 {
7402 *pu64Dst = u64Value;
7403 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7404 }
7405 return rc;
7406}
7407
7408
7409#ifdef IEM_WITH_SETJMP
7410/**
7411 * Stores a data qword, longjmp on error.
7412 *
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 * @param iSegReg The index of the segment register to use for
7415 * this access. The base and limits are checked.
7416 * @param GCPtrMem The address of the guest memory.
7417 * @param u64Value The value to store.
7418 */
7419void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7420{
7421 /* The lazy approach for now... */
7422 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7423 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7424 *pu64Dst = u64Value;
7425 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7426}
7427#endif
7428
7429
7430/**
7431 * Stores a data dqword.
7432 *
7433 * @returns Strict VBox status code.
7434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7435 * @param iSegReg The index of the segment register to use for
7436 * this access. The base and limits are checked.
7437 * @param GCPtrMem The address of the guest memory.
7438 * @param u128Value The value to store.
7439 */
7440VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7441{
7442 /* The lazy approach for now... */
7443 PRTUINT128U pu128Dst;
7444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7445 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7446 if (rc == VINF_SUCCESS)
7447 {
7448 pu128Dst->au64[0] = u128Value.au64[0];
7449 pu128Dst->au64[1] = u128Value.au64[1];
7450 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7451 }
7452 return rc;
7453}
7454
7455
7456#ifdef IEM_WITH_SETJMP
7457/**
7458 * Stores a data dqword, longjmp on error.
7459 *
7460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7461 * @param iSegReg The index of the segment register to use for
7462 * this access. The base and limits are checked.
7463 * @param GCPtrMem The address of the guest memory.
7464 * @param u128Value The value to store.
7465 */
7466void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7467{
7468 /* The lazy approach for now... */
7469 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7470 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7471 pu128Dst->au64[0] = u128Value.au64[0];
7472 pu128Dst->au64[1] = u128Value.au64[1];
7473 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7474}
7475#endif
7476
7477
7478/**
7479 * Stores a data dqword, SSE aligned.
7480 *
7481 * @returns Strict VBox status code.
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 * @param iSegReg The index of the segment register to use for
7484 * this access. The base and limits are checked.
7485 * @param GCPtrMem The address of the guest memory.
7486 * @param u128Value The value to store.
7487 */
7488VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7489{
7490 /* The lazy approach for now... */
7491 PRTUINT128U pu128Dst;
7492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7493 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7494 if (rc == VINF_SUCCESS)
7495 {
7496 pu128Dst->au64[0] = u128Value.au64[0];
7497 pu128Dst->au64[1] = u128Value.au64[1];
7498 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7499 }
7500 return rc;
7501}
7502
7503
7504#ifdef IEM_WITH_SETJMP
7505/**
7506 * Stores a data dqword, SSE aligned.
7507 *
7508 * @returns Strict VBox status code.
7509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7510 * @param iSegReg The index of the segment register to use for
7511 * this access. The base and limits are checked.
7512 * @param GCPtrMem The address of the guest memory.
7513 * @param u128Value The value to store.
7514 */
7515void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7516{
7517 /* The lazy approach for now... */
7518 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7519 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7520 pu128Dst->au64[0] = u128Value.au64[0];
7521 pu128Dst->au64[1] = u128Value.au64[1];
7522 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7523}
7524#endif
7525
7526
7527/**
7528 * Stores a data dqword.
7529 *
7530 * @returns Strict VBox status code.
7531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7532 * @param iSegReg The index of the segment register to use for
7533 * this access. The base and limits are checked.
7534 * @param GCPtrMem The address of the guest memory.
7535 * @param pu256Value Pointer to the value to store.
7536 */
7537VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7538{
7539 /* The lazy approach for now... */
7540 PRTUINT256U pu256Dst;
7541 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7542 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7543 if (rc == VINF_SUCCESS)
7544 {
7545 pu256Dst->au64[0] = pu256Value->au64[0];
7546 pu256Dst->au64[1] = pu256Value->au64[1];
7547 pu256Dst->au64[2] = pu256Value->au64[2];
7548 pu256Dst->au64[3] = pu256Value->au64[3];
7549 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7550 }
7551 return rc;
7552}
7553
7554
7555#ifdef IEM_WITH_SETJMP
7556/**
7557 * Stores a data dqword, longjmp on error.
7558 *
7559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7560 * @param iSegReg The index of the segment register to use for
7561 * this access. The base and limits are checked.
7562 * @param GCPtrMem The address of the guest memory.
7563 * @param pu256Value Pointer to the value to store.
7564 */
7565void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7566{
7567 /* The lazy approach for now... */
7568 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7569 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7570 pu256Dst->au64[0] = pu256Value->au64[0];
7571 pu256Dst->au64[1] = pu256Value->au64[1];
7572 pu256Dst->au64[2] = pu256Value->au64[2];
7573 pu256Dst->au64[3] = pu256Value->au64[3];
7574 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7575}
7576#endif
7577
7578
7579/**
7580 * Stores a data dqword, AVX \#GP(0) aligned.
7581 *
7582 * @returns Strict VBox status code.
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param iSegReg The index of the segment register to use for
7585 * this access. The base and limits are checked.
7586 * @param GCPtrMem The address of the guest memory.
7587 * @param pu256Value Pointer to the value to store.
7588 */
7589VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7590{
7591 /* The lazy approach for now... */
7592 PRTUINT256U pu256Dst;
7593 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7594 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7595 if (rc == VINF_SUCCESS)
7596 {
7597 pu256Dst->au64[0] = pu256Value->au64[0];
7598 pu256Dst->au64[1] = pu256Value->au64[1];
7599 pu256Dst->au64[2] = pu256Value->au64[2];
7600 pu256Dst->au64[3] = pu256Value->au64[3];
7601 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7602 }
7603 return rc;
7604}
7605
7606
7607#ifdef IEM_WITH_SETJMP
7608/**
7609 * Stores a data dqword, AVX aligned.
7610 *
7611 * @returns Strict VBox status code.
7612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7613 * @param iSegReg The index of the segment register to use for
7614 * this access. The base and limits are checked.
7615 * @param GCPtrMem The address of the guest memory.
7616 * @param pu256Value Pointer to the value to store.
7617 */
7618void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7619{
7620 /* The lazy approach for now... */
7621 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7622 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7623 pu256Dst->au64[0] = pu256Value->au64[0];
7624 pu256Dst->au64[1] = pu256Value->au64[1];
7625 pu256Dst->au64[2] = pu256Value->au64[2];
7626 pu256Dst->au64[3] = pu256Value->au64[3];
7627 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7628}
7629#endif
7630
7631
7632/**
7633 * Stores a descriptor register (sgdt, sidt).
7634 *
7635 * @returns Strict VBox status code.
7636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7637 * @param cbLimit The limit.
7638 * @param GCPtrBase The base address.
7639 * @param iSegReg The index of the segment register to use for
7640 * this access. The base and limits are checked.
7641 * @param GCPtrMem The address of the guest memory.
7642 */
7643VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7644{
7645 /*
7646 * The SIDT and SGDT instructions actually stores the data using two
7647 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7648 * does not respond to opsize prefixes.
7649 */
7650 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7651 if (rcStrict == VINF_SUCCESS)
7652 {
7653 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7654 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7655 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7656 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7657 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7658 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7659 else
7660 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7661 }
7662 return rcStrict;
7663}
7664
7665
7666/**
7667 * Pushes a word onto the stack.
7668 *
7669 * @returns Strict VBox status code.
7670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7671 * @param u16Value The value to push.
7672 */
7673VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7674{
7675 /* Increment the stack pointer. */
7676 uint64_t uNewRsp;
7677 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7678
7679 /* Write the word the lazy way. */
7680 uint16_t *pu16Dst;
7681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7682 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7683 if (rc == VINF_SUCCESS)
7684 {
7685 *pu16Dst = u16Value;
7686 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7687 }
7688
7689 /* Commit the new RSP value unless we an access handler made trouble. */
7690 if (rc == VINF_SUCCESS)
7691 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7692
7693 return rc;
7694}
7695
7696
7697/**
7698 * Pushes a dword onto the stack.
7699 *
7700 * @returns Strict VBox status code.
7701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7702 * @param u32Value The value to push.
7703 */
7704VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7705{
7706 /* Increment the stack pointer. */
7707 uint64_t uNewRsp;
7708 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7709
7710 /* Write the dword the lazy way. */
7711 uint32_t *pu32Dst;
7712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7713 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7714 if (rc == VINF_SUCCESS)
7715 {
7716 *pu32Dst = u32Value;
7717 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7718 }
7719
7720 /* Commit the new RSP value unless we an access handler made trouble. */
7721 if (rc == VINF_SUCCESS)
7722 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7723
7724 return rc;
7725}
7726
7727
7728/**
7729 * Pushes a dword segment register value onto the stack.
7730 *
7731 * @returns Strict VBox status code.
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 * @param u32Value The value to push.
7734 */
7735VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7736{
7737 /* Increment the stack pointer. */
7738 uint64_t uNewRsp;
7739 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7740
7741 /* The intel docs talks about zero extending the selector register
7742 value. My actual intel CPU here might be zero extending the value
7743 but it still only writes the lower word... */
7744 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7745 * happens when crossing an electric page boundrary, is the high word checked
7746 * for write accessibility or not? Probably it is. What about segment limits?
7747 * It appears this behavior is also shared with trap error codes.
7748 *
7749 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7750 * ancient hardware when it actually did change. */
7751 uint16_t *pu16Dst;
7752 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7753 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7754 if (rc == VINF_SUCCESS)
7755 {
7756 *pu16Dst = (uint16_t)u32Value;
7757 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7758 }
7759
7760 /* Commit the new RSP value unless we an access handler made trouble. */
7761 if (rc == VINF_SUCCESS)
7762 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7763
7764 return rc;
7765}
7766
7767
7768/**
7769 * Pushes a qword onto the stack.
7770 *
7771 * @returns Strict VBox status code.
7772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7773 * @param u64Value The value to push.
7774 */
7775VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7776{
7777 /* Increment the stack pointer. */
7778 uint64_t uNewRsp;
7779 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7780
7781 /* Write the word the lazy way. */
7782 uint64_t *pu64Dst;
7783 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7784 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7785 if (rc == VINF_SUCCESS)
7786 {
7787 *pu64Dst = u64Value;
7788 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7789 }
7790
7791 /* Commit the new RSP value unless we an access handler made trouble. */
7792 if (rc == VINF_SUCCESS)
7793 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7794
7795 return rc;
7796}
7797
7798
7799/**
7800 * Pops a word from the stack.
7801 *
7802 * @returns Strict VBox status code.
7803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7804 * @param pu16Value Where to store the popped value.
7805 */
7806VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7807{
7808 /* Increment the stack pointer. */
7809 uint64_t uNewRsp;
7810 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7811
7812 /* Write the word the lazy way. */
7813 uint16_t const *pu16Src;
7814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7815 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7816 if (rc == VINF_SUCCESS)
7817 {
7818 *pu16Value = *pu16Src;
7819 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7820
7821 /* Commit the new RSP value. */
7822 if (rc == VINF_SUCCESS)
7823 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7824 }
7825
7826 return rc;
7827}
7828
7829
7830/**
7831 * Pops a dword from the stack.
7832 *
7833 * @returns Strict VBox status code.
7834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7835 * @param pu32Value Where to store the popped value.
7836 */
7837VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7838{
7839 /* Increment the stack pointer. */
7840 uint64_t uNewRsp;
7841 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7842
7843 /* Write the word the lazy way. */
7844 uint32_t const *pu32Src;
7845 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7846 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7847 if (rc == VINF_SUCCESS)
7848 {
7849 *pu32Value = *pu32Src;
7850 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7851
7852 /* Commit the new RSP value. */
7853 if (rc == VINF_SUCCESS)
7854 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7855 }
7856
7857 return rc;
7858}
7859
7860
7861/**
7862 * Pops a qword from the stack.
7863 *
7864 * @returns Strict VBox status code.
7865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7866 * @param pu64Value Where to store the popped value.
7867 */
7868VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7869{
7870 /* Increment the stack pointer. */
7871 uint64_t uNewRsp;
7872 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7873
7874 /* Write the word the lazy way. */
7875 uint64_t const *pu64Src;
7876 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
7877 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
7878 if (rc == VINF_SUCCESS)
7879 {
7880 *pu64Value = *pu64Src;
7881 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7882
7883 /* Commit the new RSP value. */
7884 if (rc == VINF_SUCCESS)
7885 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7886 }
7887
7888 return rc;
7889}
7890
7891
7892/**
7893 * Pushes a word onto the stack, using a temporary stack pointer.
7894 *
7895 * @returns Strict VBox status code.
7896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7897 * @param u16Value The value to push.
7898 * @param pTmpRsp Pointer to the temporary stack pointer.
7899 */
7900VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7901{
7902 /* Increment the stack pointer. */
7903 RTUINT64U NewRsp = *pTmpRsp;
7904 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7905
7906 /* Write the word the lazy way. */
7907 uint16_t *pu16Dst;
7908 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7909 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7910 if (rc == VINF_SUCCESS)
7911 {
7912 *pu16Dst = u16Value;
7913 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7914 }
7915
7916 /* Commit the new RSP value unless we an access handler made trouble. */
7917 if (rc == VINF_SUCCESS)
7918 *pTmpRsp = NewRsp;
7919
7920 return rc;
7921}
7922
7923
7924/**
7925 * Pushes a dword onto the stack, using a temporary stack pointer.
7926 *
7927 * @returns Strict VBox status code.
7928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7929 * @param u32Value The value to push.
7930 * @param pTmpRsp Pointer to the temporary stack pointer.
7931 */
7932VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7933{
7934 /* Increment the stack pointer. */
7935 RTUINT64U NewRsp = *pTmpRsp;
7936 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
7937
7938 /* Write the word the lazy way. */
7939 uint32_t *pu32Dst;
7940 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7941 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7942 if (rc == VINF_SUCCESS)
7943 {
7944 *pu32Dst = u32Value;
7945 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7946 }
7947
7948 /* Commit the new RSP value unless we an access handler made trouble. */
7949 if (rc == VINF_SUCCESS)
7950 *pTmpRsp = NewRsp;
7951
7952 return rc;
7953}
7954
7955
7956/**
7957 * Pushes a dword onto the stack, using a temporary stack pointer.
7958 *
7959 * @returns Strict VBox status code.
7960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7961 * @param u64Value The value to push.
7962 * @param pTmpRsp Pointer to the temporary stack pointer.
7963 */
7964VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7965{
7966 /* Increment the stack pointer. */
7967 RTUINT64U NewRsp = *pTmpRsp;
7968 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
7969
7970 /* Write the word the lazy way. */
7971 uint64_t *pu64Dst;
7972 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7973 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7974 if (rc == VINF_SUCCESS)
7975 {
7976 *pu64Dst = u64Value;
7977 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7978 }
7979
7980 /* Commit the new RSP value unless we an access handler made trouble. */
7981 if (rc == VINF_SUCCESS)
7982 *pTmpRsp = NewRsp;
7983
7984 return rc;
7985}
7986
7987
7988/**
7989 * Pops a word from the stack, using a temporary stack pointer.
7990 *
7991 * @returns Strict VBox status code.
7992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7993 * @param pu16Value Where to store the popped value.
7994 * @param pTmpRsp Pointer to the temporary stack pointer.
7995 */
7996VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7997{
7998 /* Increment the stack pointer. */
7999 RTUINT64U NewRsp = *pTmpRsp;
8000 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8001
8002 /* Write the word the lazy way. */
8003 uint16_t const *pu16Src;
8004 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8005 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8006 if (rc == VINF_SUCCESS)
8007 {
8008 *pu16Value = *pu16Src;
8009 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8010
8011 /* Commit the new RSP value. */
8012 if (rc == VINF_SUCCESS)
8013 *pTmpRsp = NewRsp;
8014 }
8015
8016 return rc;
8017}
8018
8019
8020/**
8021 * Pops a dword from the stack, using a temporary stack pointer.
8022 *
8023 * @returns Strict VBox status code.
8024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8025 * @param pu32Value Where to store the popped value.
8026 * @param pTmpRsp Pointer to the temporary stack pointer.
8027 */
8028VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8029{
8030 /* Increment the stack pointer. */
8031 RTUINT64U NewRsp = *pTmpRsp;
8032 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8033
8034 /* Write the word the lazy way. */
8035 uint32_t const *pu32Src;
8036 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8037 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8038 if (rc == VINF_SUCCESS)
8039 {
8040 *pu32Value = *pu32Src;
8041 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8042
8043 /* Commit the new RSP value. */
8044 if (rc == VINF_SUCCESS)
8045 *pTmpRsp = NewRsp;
8046 }
8047
8048 return rc;
8049}
8050
8051
8052/**
8053 * Pops a qword from the stack, using a temporary stack pointer.
8054 *
8055 * @returns Strict VBox status code.
8056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8057 * @param pu64Value Where to store the popped value.
8058 * @param pTmpRsp Pointer to the temporary stack pointer.
8059 */
8060VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8061{
8062 /* Increment the stack pointer. */
8063 RTUINT64U NewRsp = *pTmpRsp;
8064 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8065
8066 /* Write the word the lazy way. */
8067 uint64_t const *pu64Src;
8068 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8069 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8070 if (rcStrict == VINF_SUCCESS)
8071 {
8072 *pu64Value = *pu64Src;
8073 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8074
8075 /* Commit the new RSP value. */
8076 if (rcStrict == VINF_SUCCESS)
8077 *pTmpRsp = NewRsp;
8078 }
8079
8080 return rcStrict;
8081}
8082
8083
8084/**
8085 * Begin a special stack push (used by interrupt, exceptions and such).
8086 *
8087 * This will raise \#SS or \#PF if appropriate.
8088 *
8089 * @returns Strict VBox status code.
8090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8091 * @param cbMem The number of bytes to push onto the stack.
8092 * @param cbAlign The alignment mask (7, 3, 1).
8093 * @param ppvMem Where to return the pointer to the stack memory.
8094 * As with the other memory functions this could be
8095 * direct access or bounce buffered access, so
8096 * don't commit register until the commit call
8097 * succeeds.
8098 * @param puNewRsp Where to return the new RSP value. This must be
8099 * passed unchanged to
8100 * iemMemStackPushCommitSpecial().
8101 */
8102VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8103 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8104{
8105 Assert(cbMem < UINT8_MAX);
8106 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8107 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8108 IEM_ACCESS_STACK_W, cbAlign);
8109}
8110
8111
8112/**
8113 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8114 *
8115 * This will update the rSP.
8116 *
8117 * @returns Strict VBox status code.
8118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8119 * @param pvMem The pointer returned by
8120 * iemMemStackPushBeginSpecial().
8121 * @param uNewRsp The new RSP value returned by
8122 * iemMemStackPushBeginSpecial().
8123 */
8124VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8125{
8126 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8127 if (rcStrict == VINF_SUCCESS)
8128 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8129 return rcStrict;
8130}
8131
8132
8133/**
8134 * Begin a special stack pop (used by iret, retf and such).
8135 *
8136 * This will raise \#SS or \#PF if appropriate.
8137 *
8138 * @returns Strict VBox status code.
8139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8140 * @param cbMem The number of bytes to pop from the stack.
8141 * @param cbAlign The alignment mask (7, 3, 1).
8142 * @param ppvMem Where to return the pointer to the stack memory.
8143 * @param puNewRsp Where to return the new RSP value. This must be
8144 * assigned to CPUMCTX::rsp manually some time
8145 * after iemMemStackPopDoneSpecial() has been
8146 * called.
8147 */
8148VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8149 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8150{
8151 Assert(cbMem < UINT8_MAX);
8152 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8153 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8154}
8155
8156
8157/**
8158 * Continue a special stack pop (used by iret and retf).
8159 *
8160 * This will raise \#SS or \#PF if appropriate.
8161 *
8162 * @returns Strict VBox status code.
8163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8164 * @param cbMem The number of bytes to pop from the stack.
8165 * @param ppvMem Where to return the pointer to the stack memory.
8166 * @param puNewRsp Where to return the new RSP value. This must be
8167 * assigned to CPUMCTX::rsp manually some time
8168 * after iemMemStackPopDoneSpecial() has been
8169 * called.
8170 */
8171VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8172{
8173 Assert(cbMem < UINT8_MAX);
8174 RTUINT64U NewRsp;
8175 NewRsp.u = *puNewRsp;
8176 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8177 *puNewRsp = NewRsp.u;
8178 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R,
8179 0 /* checked in iemMemStackPopBeginSpecial */);
8180}
8181
8182
8183/**
8184 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8185 * iemMemStackPopContinueSpecial).
8186 *
8187 * The caller will manually commit the rSP.
8188 *
8189 * @returns Strict VBox status code.
8190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8191 * @param pvMem The pointer returned by
8192 * iemMemStackPopBeginSpecial() or
8193 * iemMemStackPopContinueSpecial().
8194 */
8195VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8196{
8197 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8198}
8199
8200
8201/**
8202 * Fetches a system table byte.
8203 *
8204 * @returns Strict VBox status code.
8205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8206 * @param pbDst Where to return the byte.
8207 * @param iSegReg The index of the segment register to use for
8208 * this access. The base and limits are checked.
8209 * @param GCPtrMem The address of the guest memory.
8210 */
8211VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8212{
8213 /* The lazy approach for now... */
8214 uint8_t const *pbSrc;
8215 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8216 if (rc == VINF_SUCCESS)
8217 {
8218 *pbDst = *pbSrc;
8219 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8220 }
8221 return rc;
8222}
8223
8224
8225/**
8226 * Fetches a system table word.
8227 *
8228 * @returns Strict VBox status code.
8229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8230 * @param pu16Dst Where to return the word.
8231 * @param iSegReg The index of the segment register to use for
8232 * this access. The base and limits are checked.
8233 * @param GCPtrMem The address of the guest memory.
8234 */
8235VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8236{
8237 /* The lazy approach for now... */
8238 uint16_t const *pu16Src;
8239 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8240 if (rc == VINF_SUCCESS)
8241 {
8242 *pu16Dst = *pu16Src;
8243 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8244 }
8245 return rc;
8246}
8247
8248
8249/**
8250 * Fetches a system table dword.
8251 *
8252 * @returns Strict VBox status code.
8253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8254 * @param pu32Dst Where to return the dword.
8255 * @param iSegReg The index of the segment register to use for
8256 * this access. The base and limits are checked.
8257 * @param GCPtrMem The address of the guest memory.
8258 */
8259VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8260{
8261 /* The lazy approach for now... */
8262 uint32_t const *pu32Src;
8263 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8264 if (rc == VINF_SUCCESS)
8265 {
8266 *pu32Dst = *pu32Src;
8267 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8268 }
8269 return rc;
8270}
8271
8272
8273/**
8274 * Fetches a system table qword.
8275 *
8276 * @returns Strict VBox status code.
8277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8278 * @param pu64Dst Where to return the qword.
8279 * @param iSegReg The index of the segment register to use for
8280 * this access. The base and limits are checked.
8281 * @param GCPtrMem The address of the guest memory.
8282 */
8283VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8284{
8285 /* The lazy approach for now... */
8286 uint64_t const *pu64Src;
8287 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8288 if (rc == VINF_SUCCESS)
8289 {
8290 *pu64Dst = *pu64Src;
8291 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8292 }
8293 return rc;
8294}
8295
8296
8297/**
8298 * Fetches a descriptor table entry with caller specified error code.
8299 *
8300 * @returns Strict VBox status code.
8301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8302 * @param pDesc Where to return the descriptor table entry.
8303 * @param uSel The selector which table entry to fetch.
8304 * @param uXcpt The exception to raise on table lookup error.
8305 * @param uErrorCode The error code associated with the exception.
8306 */
8307static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8308 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8309{
8310 AssertPtr(pDesc);
8311 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8312
8313 /** @todo did the 286 require all 8 bytes to be accessible? */
8314 /*
8315 * Get the selector table base and check bounds.
8316 */
8317 RTGCPTR GCPtrBase;
8318 if (uSel & X86_SEL_LDT)
8319 {
8320 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8321 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8322 {
8323 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8324 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8325 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8326 uErrorCode, 0);
8327 }
8328
8329 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8330 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8331 }
8332 else
8333 {
8334 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8335 {
8336 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8337 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8338 uErrorCode, 0);
8339 }
8340 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8341 }
8342
8343 /*
8344 * Read the legacy descriptor and maybe the long mode extensions if
8345 * required.
8346 */
8347 VBOXSTRICTRC rcStrict;
8348 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8349 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8350 else
8351 {
8352 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8353 if (rcStrict == VINF_SUCCESS)
8354 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8355 if (rcStrict == VINF_SUCCESS)
8356 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8357 if (rcStrict == VINF_SUCCESS)
8358 pDesc->Legacy.au16[3] = 0;
8359 else
8360 return rcStrict;
8361 }
8362
8363 if (rcStrict == VINF_SUCCESS)
8364 {
8365 if ( !IEM_IS_LONG_MODE(pVCpu)
8366 || pDesc->Legacy.Gen.u1DescType)
8367 pDesc->Long.au64[1] = 0;
8368 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8369 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8370 else
8371 {
8372 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8373 /** @todo is this the right exception? */
8374 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8375 }
8376 }
8377 return rcStrict;
8378}
8379
8380
8381/**
8382 * Fetches a descriptor table entry.
8383 *
8384 * @returns Strict VBox status code.
8385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8386 * @param pDesc Where to return the descriptor table entry.
8387 * @param uSel The selector which table entry to fetch.
8388 * @param uXcpt The exception to raise on table lookup error.
8389 */
8390VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8391{
8392 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8393}
8394
8395
8396/**
8397 * Marks the selector descriptor as accessed (only non-system descriptors).
8398 *
8399 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8400 * will therefore skip the limit checks.
8401 *
8402 * @returns Strict VBox status code.
8403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8404 * @param uSel The selector.
8405 */
8406VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8407{
8408 /*
8409 * Get the selector table base and calculate the entry address.
8410 */
8411 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8412 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8413 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8414 GCPtr += uSel & X86_SEL_MASK;
8415
8416 /*
8417 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8418 * ugly stuff to avoid this. This will make sure it's an atomic access
8419 * as well more or less remove any question about 8-bit or 32-bit accesss.
8420 */
8421 VBOXSTRICTRC rcStrict;
8422 uint32_t volatile *pu32;
8423 if ((GCPtr & 3) == 0)
8424 {
8425 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8426 GCPtr += 2 + 2;
8427 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8428 if (rcStrict != VINF_SUCCESS)
8429 return rcStrict;
8430 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8431 }
8432 else
8433 {
8434 /* The misaligned GDT/LDT case, map the whole thing. */
8435 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8436 if (rcStrict != VINF_SUCCESS)
8437 return rcStrict;
8438 switch ((uintptr_t)pu32 & 3)
8439 {
8440 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8441 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8442 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8443 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8444 }
8445 }
8446
8447 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8448}
8449
8450/** @} */
8451
8452/** @name Opcode Helpers.
8453 * @{
8454 */
8455
8456/**
8457 * Calculates the effective address of a ModR/M memory operand.
8458 *
8459 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8460 *
8461 * @return Strict VBox status code.
8462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8463 * @param bRm The ModRM byte.
8464 * @param cbImm The size of any immediate following the
8465 * effective address opcode bytes. Important for
8466 * RIP relative addressing.
8467 * @param pGCPtrEff Where to return the effective address.
8468 */
8469VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8470{
8471 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8472# define SET_SS_DEF() \
8473 do \
8474 { \
8475 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8476 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8477 } while (0)
8478
8479 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8480 {
8481/** @todo Check the effective address size crap! */
8482 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8483 {
8484 uint16_t u16EffAddr;
8485
8486 /* Handle the disp16 form with no registers first. */
8487 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8488 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8489 else
8490 {
8491 /* Get the displacment. */
8492 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8493 {
8494 case 0: u16EffAddr = 0; break;
8495 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8496 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8497 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8498 }
8499
8500 /* Add the base and index registers to the disp. */
8501 switch (bRm & X86_MODRM_RM_MASK)
8502 {
8503 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8504 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8505 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8506 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8507 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8508 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8509 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8510 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8511 }
8512 }
8513
8514 *pGCPtrEff = u16EffAddr;
8515 }
8516 else
8517 {
8518 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8519 uint32_t u32EffAddr;
8520
8521 /* Handle the disp32 form with no registers first. */
8522 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8523 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8524 else
8525 {
8526 /* Get the register (or SIB) value. */
8527 switch ((bRm & X86_MODRM_RM_MASK))
8528 {
8529 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8530 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8531 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8532 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8533 case 4: /* SIB */
8534 {
8535 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8536
8537 /* Get the index and scale it. */
8538 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8539 {
8540 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8541 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8542 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8543 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8544 case 4: u32EffAddr = 0; /*none */ break;
8545 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8546 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8547 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8548 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8549 }
8550 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8551
8552 /* add base */
8553 switch (bSib & X86_SIB_BASE_MASK)
8554 {
8555 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8556 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8557 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8558 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8559 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8560 case 5:
8561 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8562 {
8563 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8564 SET_SS_DEF();
8565 }
8566 else
8567 {
8568 uint32_t u32Disp;
8569 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8570 u32EffAddr += u32Disp;
8571 }
8572 break;
8573 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8574 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8575 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8576 }
8577 break;
8578 }
8579 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8580 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8581 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8582 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8583 }
8584
8585 /* Get and add the displacement. */
8586 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8587 {
8588 case 0:
8589 break;
8590 case 1:
8591 {
8592 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8593 u32EffAddr += i8Disp;
8594 break;
8595 }
8596 case 2:
8597 {
8598 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8599 u32EffAddr += u32Disp;
8600 break;
8601 }
8602 default:
8603 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8604 }
8605
8606 }
8607 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8608 *pGCPtrEff = u32EffAddr;
8609 else
8610 {
8611 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8612 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8613 }
8614 }
8615 }
8616 else
8617 {
8618 uint64_t u64EffAddr;
8619
8620 /* Handle the rip+disp32 form with no registers first. */
8621 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8622 {
8623 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8624 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8625 }
8626 else
8627 {
8628 /* Get the register (or SIB) value. */
8629 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8630 {
8631 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8632 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8633 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8634 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8635 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8636 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8637 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8638 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8639 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8640 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8641 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8642 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8643 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8644 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8645 /* SIB */
8646 case 4:
8647 case 12:
8648 {
8649 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8650
8651 /* Get the index and scale it. */
8652 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8653 {
8654 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8655 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8656 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8657 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8658 case 4: u64EffAddr = 0; /*none */ break;
8659 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8660 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8661 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8662 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8663 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8664 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8665 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8666 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8667 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8668 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8669 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8671 }
8672 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8673
8674 /* add base */
8675 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8676 {
8677 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8678 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8679 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8680 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8681 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8682 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8683 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8684 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8685 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8686 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8687 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8688 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8689 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8690 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8691 /* complicated encodings */
8692 case 5:
8693 case 13:
8694 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8695 {
8696 if (!pVCpu->iem.s.uRexB)
8697 {
8698 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8699 SET_SS_DEF();
8700 }
8701 else
8702 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8703 }
8704 else
8705 {
8706 uint32_t u32Disp;
8707 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8708 u64EffAddr += (int32_t)u32Disp;
8709 }
8710 break;
8711 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8712 }
8713 break;
8714 }
8715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8716 }
8717
8718 /* Get and add the displacement. */
8719 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8720 {
8721 case 0:
8722 break;
8723 case 1:
8724 {
8725 int8_t i8Disp;
8726 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8727 u64EffAddr += i8Disp;
8728 break;
8729 }
8730 case 2:
8731 {
8732 uint32_t u32Disp;
8733 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8734 u64EffAddr += (int32_t)u32Disp;
8735 break;
8736 }
8737 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8738 }
8739
8740 }
8741
8742 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8743 *pGCPtrEff = u64EffAddr;
8744 else
8745 {
8746 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8747 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8748 }
8749 }
8750
8751 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8752 return VINF_SUCCESS;
8753}
8754
8755
8756/**
8757 * Calculates the effective address of a ModR/M memory operand.
8758 *
8759 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8760 *
8761 * @return Strict VBox status code.
8762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8763 * @param bRm The ModRM byte.
8764 * @param cbImm The size of any immediate following the
8765 * effective address opcode bytes. Important for
8766 * RIP relative addressing.
8767 * @param pGCPtrEff Where to return the effective address.
8768 * @param offRsp RSP displacement.
8769 */
8770VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8771{
8772 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8773# define SET_SS_DEF() \
8774 do \
8775 { \
8776 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8777 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8778 } while (0)
8779
8780 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8781 {
8782/** @todo Check the effective address size crap! */
8783 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8784 {
8785 uint16_t u16EffAddr;
8786
8787 /* Handle the disp16 form with no registers first. */
8788 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8789 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8790 else
8791 {
8792 /* Get the displacment. */
8793 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8794 {
8795 case 0: u16EffAddr = 0; break;
8796 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8797 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8798 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8799 }
8800
8801 /* Add the base and index registers to the disp. */
8802 switch (bRm & X86_MODRM_RM_MASK)
8803 {
8804 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8805 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8806 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8807 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8808 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8809 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8810 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8811 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8812 }
8813 }
8814
8815 *pGCPtrEff = u16EffAddr;
8816 }
8817 else
8818 {
8819 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8820 uint32_t u32EffAddr;
8821
8822 /* Handle the disp32 form with no registers first. */
8823 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8824 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8825 else
8826 {
8827 /* Get the register (or SIB) value. */
8828 switch ((bRm & X86_MODRM_RM_MASK))
8829 {
8830 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8831 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8832 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8833 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8834 case 4: /* SIB */
8835 {
8836 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8837
8838 /* Get the index and scale it. */
8839 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8840 {
8841 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8842 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8843 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8844 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8845 case 4: u32EffAddr = 0; /*none */ break;
8846 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8847 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8848 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8850 }
8851 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8852
8853 /* add base */
8854 switch (bSib & X86_SIB_BASE_MASK)
8855 {
8856 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8857 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8858 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8859 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8860 case 4:
8861 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8862 SET_SS_DEF();
8863 break;
8864 case 5:
8865 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8866 {
8867 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8868 SET_SS_DEF();
8869 }
8870 else
8871 {
8872 uint32_t u32Disp;
8873 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8874 u32EffAddr += u32Disp;
8875 }
8876 break;
8877 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8878 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8879 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8880 }
8881 break;
8882 }
8883 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8884 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8885 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8886 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8887 }
8888
8889 /* Get and add the displacement. */
8890 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8891 {
8892 case 0:
8893 break;
8894 case 1:
8895 {
8896 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8897 u32EffAddr += i8Disp;
8898 break;
8899 }
8900 case 2:
8901 {
8902 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8903 u32EffAddr += u32Disp;
8904 break;
8905 }
8906 default:
8907 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8908 }
8909
8910 }
8911 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8912 *pGCPtrEff = u32EffAddr;
8913 else
8914 {
8915 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8916 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8917 }
8918 }
8919 }
8920 else
8921 {
8922 uint64_t u64EffAddr;
8923
8924 /* Handle the rip+disp32 form with no registers first. */
8925 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8926 {
8927 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8928 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8929 }
8930 else
8931 {
8932 /* Get the register (or SIB) value. */
8933 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8934 {
8935 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8936 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8937 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8938 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8939 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8940 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8941 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8942 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8943 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8944 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8945 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8946 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8947 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8948 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8949 /* SIB */
8950 case 4:
8951 case 12:
8952 {
8953 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8954
8955 /* Get the index and scale it. */
8956 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8957 {
8958 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8959 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8960 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8961 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8962 case 4: u64EffAddr = 0; /*none */ break;
8963 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8964 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8965 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8966 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8967 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8968 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8969 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8970 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8971 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8972 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8973 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8975 }
8976 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8977
8978 /* add base */
8979 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8980 {
8981 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8982 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8983 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8984 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8985 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
8986 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8987 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8988 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8989 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8990 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8991 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8992 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8993 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8994 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8995 /* complicated encodings */
8996 case 5:
8997 case 13:
8998 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8999 {
9000 if (!pVCpu->iem.s.uRexB)
9001 {
9002 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9003 SET_SS_DEF();
9004 }
9005 else
9006 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9007 }
9008 else
9009 {
9010 uint32_t u32Disp;
9011 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9012 u64EffAddr += (int32_t)u32Disp;
9013 }
9014 break;
9015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9016 }
9017 break;
9018 }
9019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9020 }
9021
9022 /* Get and add the displacement. */
9023 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9024 {
9025 case 0:
9026 break;
9027 case 1:
9028 {
9029 int8_t i8Disp;
9030 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9031 u64EffAddr += i8Disp;
9032 break;
9033 }
9034 case 2:
9035 {
9036 uint32_t u32Disp;
9037 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9038 u64EffAddr += (int32_t)u32Disp;
9039 break;
9040 }
9041 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9042 }
9043
9044 }
9045
9046 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9047 *pGCPtrEff = u64EffAddr;
9048 else
9049 {
9050 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9051 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9052 }
9053 }
9054
9055 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9056 return VINF_SUCCESS;
9057}
9058
9059
9060#ifdef IEM_WITH_SETJMP
9061/**
9062 * Calculates the effective address of a ModR/M memory operand.
9063 *
9064 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9065 *
9066 * May longjmp on internal error.
9067 *
9068 * @return The effective address.
9069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9070 * @param bRm The ModRM byte.
9071 * @param cbImm The size of any immediate following the
9072 * effective address opcode bytes. Important for
9073 * RIP relative addressing.
9074 */
9075RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9076{
9077 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9078# define SET_SS_DEF() \
9079 do \
9080 { \
9081 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9082 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9083 } while (0)
9084
9085 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9086 {
9087/** @todo Check the effective address size crap! */
9088 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9089 {
9090 uint16_t u16EffAddr;
9091
9092 /* Handle the disp16 form with no registers first. */
9093 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9094 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9095 else
9096 {
9097 /* Get the displacment. */
9098 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9099 {
9100 case 0: u16EffAddr = 0; break;
9101 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9102 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9103 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9104 }
9105
9106 /* Add the base and index registers to the disp. */
9107 switch (bRm & X86_MODRM_RM_MASK)
9108 {
9109 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9110 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9111 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9112 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9113 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9114 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9115 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9116 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9117 }
9118 }
9119
9120 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9121 return u16EffAddr;
9122 }
9123
9124 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9125 uint32_t u32EffAddr;
9126
9127 /* Handle the disp32 form with no registers first. */
9128 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9129 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9130 else
9131 {
9132 /* Get the register (or SIB) value. */
9133 switch ((bRm & X86_MODRM_RM_MASK))
9134 {
9135 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9136 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9137 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9138 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9139 case 4: /* SIB */
9140 {
9141 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9142
9143 /* Get the index and scale it. */
9144 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9145 {
9146 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9147 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9148 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9149 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9150 case 4: u32EffAddr = 0; /*none */ break;
9151 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9152 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9153 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9154 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9155 }
9156 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9157
9158 /* add base */
9159 switch (bSib & X86_SIB_BASE_MASK)
9160 {
9161 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9162 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9163 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9164 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9165 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9166 case 5:
9167 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9168 {
9169 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9170 SET_SS_DEF();
9171 }
9172 else
9173 {
9174 uint32_t u32Disp;
9175 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9176 u32EffAddr += u32Disp;
9177 }
9178 break;
9179 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9180 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9181 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9182 }
9183 break;
9184 }
9185 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9186 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9187 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9188 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9189 }
9190
9191 /* Get and add the displacement. */
9192 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9193 {
9194 case 0:
9195 break;
9196 case 1:
9197 {
9198 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9199 u32EffAddr += i8Disp;
9200 break;
9201 }
9202 case 2:
9203 {
9204 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9205 u32EffAddr += u32Disp;
9206 break;
9207 }
9208 default:
9209 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9210 }
9211 }
9212
9213 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9214 {
9215 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9216 return u32EffAddr;
9217 }
9218 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9219 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9220 return u32EffAddr & UINT16_MAX;
9221 }
9222
9223 uint64_t u64EffAddr;
9224
9225 /* Handle the rip+disp32 form with no registers first. */
9226 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9227 {
9228 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9229 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9230 }
9231 else
9232 {
9233 /* Get the register (or SIB) value. */
9234 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9235 {
9236 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9237 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9238 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9239 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9240 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9241 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9242 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9243 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9244 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9245 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9246 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9247 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9248 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9249 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9250 /* SIB */
9251 case 4:
9252 case 12:
9253 {
9254 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9255
9256 /* Get the index and scale it. */
9257 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9258 {
9259 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9260 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9261 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9262 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9263 case 4: u64EffAddr = 0; /*none */ break;
9264 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9265 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9266 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9267 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9268 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9269 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9270 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9271 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9272 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9273 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9274 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9275 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9276 }
9277 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9278
9279 /* add base */
9280 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9281 {
9282 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9283 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9284 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9285 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9286 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9287 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9288 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9289 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9290 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9291 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9292 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9293 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9294 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9295 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9296 /* complicated encodings */
9297 case 5:
9298 case 13:
9299 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9300 {
9301 if (!pVCpu->iem.s.uRexB)
9302 {
9303 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9304 SET_SS_DEF();
9305 }
9306 else
9307 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9308 }
9309 else
9310 {
9311 uint32_t u32Disp;
9312 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9313 u64EffAddr += (int32_t)u32Disp;
9314 }
9315 break;
9316 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9317 }
9318 break;
9319 }
9320 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9321 }
9322
9323 /* Get and add the displacement. */
9324 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9325 {
9326 case 0:
9327 break;
9328 case 1:
9329 {
9330 int8_t i8Disp;
9331 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9332 u64EffAddr += i8Disp;
9333 break;
9334 }
9335 case 2:
9336 {
9337 uint32_t u32Disp;
9338 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9339 u64EffAddr += (int32_t)u32Disp;
9340 break;
9341 }
9342 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9343 }
9344
9345 }
9346
9347 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9348 {
9349 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9350 return u64EffAddr;
9351 }
9352 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9353 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9354 return u64EffAddr & UINT32_MAX;
9355}
9356#endif /* IEM_WITH_SETJMP */
9357
9358/** @} */
9359
9360
9361#ifdef LOG_ENABLED
9362/**
9363 * Logs the current instruction.
9364 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9365 * @param fSameCtx Set if we have the same context information as the VMM,
9366 * clear if we may have already executed an instruction in
9367 * our debug context. When clear, we assume IEMCPU holds
9368 * valid CPU mode info.
9369 *
9370 * The @a fSameCtx parameter is now misleading and obsolete.
9371 * @param pszFunction The IEM function doing the execution.
9372 */
9373static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9374{
9375# ifdef IN_RING3
9376 if (LogIs2Enabled())
9377 {
9378 char szInstr[256];
9379 uint32_t cbInstr = 0;
9380 if (fSameCtx)
9381 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9382 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9383 szInstr, sizeof(szInstr), &cbInstr);
9384 else
9385 {
9386 uint32_t fFlags = 0;
9387 switch (pVCpu->iem.s.enmCpuMode)
9388 {
9389 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9390 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9391 case IEMMODE_16BIT:
9392 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9393 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9394 else
9395 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9396 break;
9397 }
9398 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9399 szInstr, sizeof(szInstr), &cbInstr);
9400 }
9401
9402 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9403 Log2(("**** %s\n"
9404 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9405 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9406 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9407 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9408 " %s\n"
9409 , pszFunction,
9410 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9411 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9412 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9413 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9414 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9415 szInstr));
9416
9417 if (LogIs3Enabled())
9418 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9419 }
9420 else
9421# endif
9422 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9423 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9424 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9425}
9426#endif /* LOG_ENABLED */
9427
9428
9429#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9430/**
9431 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9432 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9433 *
9434 * @returns Modified rcStrict.
9435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9436 * @param rcStrict The instruction execution status.
9437 */
9438static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9439{
9440 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9441 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9442 {
9443 /* VMX preemption timer takes priority over NMI-window exits. */
9444 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9445 {
9446 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9447 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9448 }
9449 /*
9450 * Check remaining intercepts.
9451 *
9452 * NMI-window and Interrupt-window VM-exits.
9453 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9454 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9455 *
9456 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9457 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9458 */
9459 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9460 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9461 && !TRPMHasTrap(pVCpu))
9462 {
9463 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9464 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9465 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9466 {
9467 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9468 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9469 }
9470 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9471 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9472 {
9473 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9474 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9475 }
9476 }
9477 }
9478 /* TPR-below threshold/APIC write has the highest priority. */
9479 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9480 {
9481 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9482 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9483 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9484 }
9485 /* MTF takes priority over VMX-preemption timer. */
9486 else
9487 {
9488 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9489 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9490 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9491 }
9492 return rcStrict;
9493}
9494#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9495
9496
9497/**
9498 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9499 * IEMExecOneWithPrefetchedByPC.
9500 *
9501 * Similar code is found in IEMExecLots.
9502 *
9503 * @return Strict VBox status code.
9504 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9505 * @param fExecuteInhibit If set, execute the instruction following CLI,
9506 * POP SS and MOV SS,GR.
9507 * @param pszFunction The calling function name.
9508 */
9509DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9510{
9511 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9512 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9513 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9514 RT_NOREF_PV(pszFunction);
9515
9516#ifdef IEM_WITH_SETJMP
9517 VBOXSTRICTRC rcStrict;
9518 jmp_buf JmpBuf;
9519 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9520 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9521 if ((rcStrict = setjmp(JmpBuf)) == 0)
9522 {
9523 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9524 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9525 }
9526 else
9527 pVCpu->iem.s.cLongJumps++;
9528 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9529#else
9530 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9531 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9532#endif
9533 if (rcStrict == VINF_SUCCESS)
9534 pVCpu->iem.s.cInstructions++;
9535 if (pVCpu->iem.s.cActiveMappings > 0)
9536 {
9537 Assert(rcStrict != VINF_SUCCESS);
9538 iemMemRollback(pVCpu);
9539 }
9540 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9541 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9542 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9543
9544//#ifdef DEBUG
9545// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9546//#endif
9547
9548#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9549 /*
9550 * Perform any VMX nested-guest instruction boundary actions.
9551 *
9552 * If any of these causes a VM-exit, we must skip executing the next
9553 * instruction (would run into stale page tables). A VM-exit makes sure
9554 * there is no interrupt-inhibition, so that should ensure we don't go
9555 * to try execute the next instruction. Clearing fExecuteInhibit is
9556 * problematic because of the setjmp/longjmp clobbering above.
9557 */
9558 if ( rcStrict == VINF_SUCCESS
9559 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9560 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9561 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9562#endif
9563
9564 /* Execute the next instruction as well if a cli, pop ss or
9565 mov ss, Gr has just completed successfully. */
9566 if ( fExecuteInhibit
9567 && rcStrict == VINF_SUCCESS
9568 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9569 && EMIsInhibitInterruptsActive(pVCpu))
9570 {
9571 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9572 if (rcStrict == VINF_SUCCESS)
9573 {
9574#ifdef LOG_ENABLED
9575 iemLogCurInstr(pVCpu, false, pszFunction);
9576#endif
9577#ifdef IEM_WITH_SETJMP
9578 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9579 if ((rcStrict = setjmp(JmpBuf)) == 0)
9580 {
9581 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9582 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9583 }
9584 else
9585 pVCpu->iem.s.cLongJumps++;
9586 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9587#else
9588 IEM_OPCODE_GET_NEXT_U8(&b);
9589 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9590#endif
9591 if (rcStrict == VINF_SUCCESS)
9592 pVCpu->iem.s.cInstructions++;
9593 if (pVCpu->iem.s.cActiveMappings > 0)
9594 {
9595 Assert(rcStrict != VINF_SUCCESS);
9596 iemMemRollback(pVCpu);
9597 }
9598 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9599 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9600 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9601 }
9602 else if (pVCpu->iem.s.cActiveMappings > 0)
9603 iemMemRollback(pVCpu);
9604 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9605 }
9606
9607 /*
9608 * Return value fiddling, statistics and sanity assertions.
9609 */
9610 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9611
9612 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9613 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9614 return rcStrict;
9615}
9616
9617
9618/**
9619 * Execute one instruction.
9620 *
9621 * @return Strict VBox status code.
9622 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9623 */
9624VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9625{
9626 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9627#ifdef LOG_ENABLED
9628 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9629#endif
9630
9631 /*
9632 * Do the decoding and emulation.
9633 */
9634 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9635 if (rcStrict == VINF_SUCCESS)
9636 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9637 else if (pVCpu->iem.s.cActiveMappings > 0)
9638 iemMemRollback(pVCpu);
9639
9640 if (rcStrict != VINF_SUCCESS)
9641 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9642 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9643 return rcStrict;
9644}
9645
9646
9647VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9648{
9649 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9650
9651 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9652 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9653 if (rcStrict == VINF_SUCCESS)
9654 {
9655 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9656 if (pcbWritten)
9657 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9658 }
9659 else if (pVCpu->iem.s.cActiveMappings > 0)
9660 iemMemRollback(pVCpu);
9661
9662 return rcStrict;
9663}
9664
9665
9666VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9667 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9668{
9669 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9670
9671 VBOXSTRICTRC rcStrict;
9672 if ( cbOpcodeBytes
9673 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9674 {
9675 iemInitDecoder(pVCpu, false, false);
9676#ifdef IEM_WITH_CODE_TLB
9677 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9678 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9679 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9680 pVCpu->iem.s.offCurInstrStart = 0;
9681 pVCpu->iem.s.offInstrNextByte = 0;
9682#else
9683 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9684 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9685#endif
9686 rcStrict = VINF_SUCCESS;
9687 }
9688 else
9689 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9690 if (rcStrict == VINF_SUCCESS)
9691 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9692 else if (pVCpu->iem.s.cActiveMappings > 0)
9693 iemMemRollback(pVCpu);
9694
9695 return rcStrict;
9696}
9697
9698
9699VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9700{
9701 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9702
9703 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9704 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9705 if (rcStrict == VINF_SUCCESS)
9706 {
9707 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9708 if (pcbWritten)
9709 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9710 }
9711 else if (pVCpu->iem.s.cActiveMappings > 0)
9712 iemMemRollback(pVCpu);
9713
9714 return rcStrict;
9715}
9716
9717
9718VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9719 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9720{
9721 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9722
9723 VBOXSTRICTRC rcStrict;
9724 if ( cbOpcodeBytes
9725 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9726 {
9727 iemInitDecoder(pVCpu, true, false);
9728#ifdef IEM_WITH_CODE_TLB
9729 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9730 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9731 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9732 pVCpu->iem.s.offCurInstrStart = 0;
9733 pVCpu->iem.s.offInstrNextByte = 0;
9734#else
9735 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9736 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9737#endif
9738 rcStrict = VINF_SUCCESS;
9739 }
9740 else
9741 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9742 if (rcStrict == VINF_SUCCESS)
9743 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9744 else if (pVCpu->iem.s.cActiveMappings > 0)
9745 iemMemRollback(pVCpu);
9746
9747 return rcStrict;
9748}
9749
9750
9751/**
9752 * For debugging DISGetParamSize, may come in handy.
9753 *
9754 * @returns Strict VBox status code.
9755 * @param pVCpu The cross context virtual CPU structure of the
9756 * calling EMT.
9757 * @param pCtxCore The context core structure.
9758 * @param OpcodeBytesPC The PC of the opcode bytes.
9759 * @param pvOpcodeBytes Prefeched opcode bytes.
9760 * @param cbOpcodeBytes Number of prefetched bytes.
9761 * @param pcbWritten Where to return the number of bytes written.
9762 * Optional.
9763 */
9764VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9765 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9766 uint32_t *pcbWritten)
9767{
9768 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9769
9770 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9771 VBOXSTRICTRC rcStrict;
9772 if ( cbOpcodeBytes
9773 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9774 {
9775 iemInitDecoder(pVCpu, true, false);
9776#ifdef IEM_WITH_CODE_TLB
9777 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9778 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9779 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9780 pVCpu->iem.s.offCurInstrStart = 0;
9781 pVCpu->iem.s.offInstrNextByte = 0;
9782#else
9783 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9784 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9785#endif
9786 rcStrict = VINF_SUCCESS;
9787 }
9788 else
9789 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9790 if (rcStrict == VINF_SUCCESS)
9791 {
9792 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9793 if (pcbWritten)
9794 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9795 }
9796 else if (pVCpu->iem.s.cActiveMappings > 0)
9797 iemMemRollback(pVCpu);
9798
9799 return rcStrict;
9800}
9801
9802
9803/**
9804 * For handling split cacheline lock operations when the host has split-lock
9805 * detection enabled.
9806 *
9807 * This will cause the interpreter to disregard the lock prefix and implicit
9808 * locking (xchg).
9809 *
9810 * @returns Strict VBox status code.
9811 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9812 */
9813VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9814{
9815 /*
9816 * Do the decoding and emulation.
9817 */
9818 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9819 if (rcStrict == VINF_SUCCESS)
9820 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9821 else if (pVCpu->iem.s.cActiveMappings > 0)
9822 iemMemRollback(pVCpu);
9823
9824 if (rcStrict != VINF_SUCCESS)
9825 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9826 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9827 return rcStrict;
9828}
9829
9830
9831VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9832{
9833 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9834 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9835
9836 /*
9837 * See if there is an interrupt pending in TRPM, inject it if we can.
9838 */
9839 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9840#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9841 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9842 if (fIntrEnabled)
9843 {
9844 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9845 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9846 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9847 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9848 else
9849 {
9850 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9851 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9852 }
9853 }
9854#else
9855 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9856#endif
9857
9858 /** @todo What if we are injecting an exception and not an interrupt? Is that
9859 * possible here? For now we assert it is indeed only an interrupt. */
9860 if ( fIntrEnabled
9861 && TRPMHasTrap(pVCpu)
9862 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9863 {
9864 uint8_t u8TrapNo;
9865 TRPMEVENT enmType;
9866 uint32_t uErrCode;
9867 RTGCPTR uCr2;
9868 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9869 AssertRC(rc2);
9870 Assert(enmType == TRPM_HARDWARE_INT);
9871 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9872 TRPMResetTrap(pVCpu);
9873#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9874 /* Injecting an event may cause a VM-exit. */
9875 if ( rcStrict != VINF_SUCCESS
9876 && rcStrict != VINF_IEM_RAISED_XCPT)
9877 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9878#else
9879 NOREF(rcStrict);
9880#endif
9881 }
9882
9883 /*
9884 * Initial decoder init w/ prefetch, then setup setjmp.
9885 */
9886 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9887 if (rcStrict == VINF_SUCCESS)
9888 {
9889#ifdef IEM_WITH_SETJMP
9890 jmp_buf JmpBuf;
9891 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9892 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9893 pVCpu->iem.s.cActiveMappings = 0;
9894 if ((rcStrict = setjmp(JmpBuf)) == 0)
9895#endif
9896 {
9897 /*
9898 * The run loop. We limit ourselves to 4096 instructions right now.
9899 */
9900 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9901 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9902 for (;;)
9903 {
9904 /*
9905 * Log the state.
9906 */
9907#ifdef LOG_ENABLED
9908 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9909#endif
9910
9911 /*
9912 * Do the decoding and emulation.
9913 */
9914 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9915 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9916 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9917 {
9918 Assert(pVCpu->iem.s.cActiveMappings == 0);
9919 pVCpu->iem.s.cInstructions++;
9920 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9921 {
9922 uint64_t fCpu = pVCpu->fLocalForcedActions
9923 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9924 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9925 | VMCPU_FF_TLB_FLUSH
9926 | VMCPU_FF_INHIBIT_INTERRUPTS
9927 | VMCPU_FF_BLOCK_NMIS
9928 | VMCPU_FF_UNHALT ));
9929
9930 if (RT_LIKELY( ( !fCpu
9931 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9932 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9933 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9934 {
9935 if (cMaxInstructionsGccStupidity-- > 0)
9936 {
9937 /* Poll timers every now an then according to the caller's specs. */
9938 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9939 || !TMTimerPollBool(pVM, pVCpu))
9940 {
9941 Assert(pVCpu->iem.s.cActiveMappings == 0);
9942 iemReInitDecoder(pVCpu);
9943 continue;
9944 }
9945 }
9946 }
9947 }
9948 Assert(pVCpu->iem.s.cActiveMappings == 0);
9949 }
9950 else if (pVCpu->iem.s.cActiveMappings > 0)
9951 iemMemRollback(pVCpu);
9952 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9953 break;
9954 }
9955 }
9956#ifdef IEM_WITH_SETJMP
9957 else
9958 {
9959 if (pVCpu->iem.s.cActiveMappings > 0)
9960 iemMemRollback(pVCpu);
9961# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9962 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9963# endif
9964 pVCpu->iem.s.cLongJumps++;
9965 }
9966 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9967#endif
9968
9969 /*
9970 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9971 */
9972 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9973 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9974 }
9975 else
9976 {
9977 if (pVCpu->iem.s.cActiveMappings > 0)
9978 iemMemRollback(pVCpu);
9979
9980#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9981 /*
9982 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9983 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9984 */
9985 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9986#endif
9987 }
9988
9989 /*
9990 * Maybe re-enter raw-mode and log.
9991 */
9992 if (rcStrict != VINF_SUCCESS)
9993 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9994 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9995 if (pcInstructions)
9996 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9997 return rcStrict;
9998}
9999
10000
10001/**
10002 * Interface used by EMExecuteExec, does exit statistics and limits.
10003 *
10004 * @returns Strict VBox status code.
10005 * @param pVCpu The cross context virtual CPU structure.
10006 * @param fWillExit To be defined.
10007 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10008 * @param cMaxInstructions Maximum number of instructions to execute.
10009 * @param cMaxInstructionsWithoutExits
10010 * The max number of instructions without exits.
10011 * @param pStats Where to return statistics.
10012 */
10013VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10014 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10015{
10016 NOREF(fWillExit); /** @todo define flexible exit crits */
10017
10018 /*
10019 * Initialize return stats.
10020 */
10021 pStats->cInstructions = 0;
10022 pStats->cExits = 0;
10023 pStats->cMaxExitDistance = 0;
10024 pStats->cReserved = 0;
10025
10026 /*
10027 * Initial decoder init w/ prefetch, then setup setjmp.
10028 */
10029 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10030 if (rcStrict == VINF_SUCCESS)
10031 {
10032#ifdef IEM_WITH_SETJMP
10033 jmp_buf JmpBuf;
10034 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10035 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10036 pVCpu->iem.s.cActiveMappings = 0;
10037 if ((rcStrict = setjmp(JmpBuf)) == 0)
10038#endif
10039 {
10040#ifdef IN_RING0
10041 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10042#endif
10043 uint32_t cInstructionSinceLastExit = 0;
10044
10045 /*
10046 * The run loop. We limit ourselves to 4096 instructions right now.
10047 */
10048 PVM pVM = pVCpu->CTX_SUFF(pVM);
10049 for (;;)
10050 {
10051 /*
10052 * Log the state.
10053 */
10054#ifdef LOG_ENABLED
10055 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10056#endif
10057
10058 /*
10059 * Do the decoding and emulation.
10060 */
10061 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10062
10063 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10064 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10065
10066 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10067 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10068 {
10069 pStats->cExits += 1;
10070 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10071 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10072 cInstructionSinceLastExit = 0;
10073 }
10074
10075 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10076 {
10077 Assert(pVCpu->iem.s.cActiveMappings == 0);
10078 pVCpu->iem.s.cInstructions++;
10079 pStats->cInstructions++;
10080 cInstructionSinceLastExit++;
10081 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10082 {
10083 uint64_t fCpu = pVCpu->fLocalForcedActions
10084 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10085 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10086 | VMCPU_FF_TLB_FLUSH
10087 | VMCPU_FF_INHIBIT_INTERRUPTS
10088 | VMCPU_FF_BLOCK_NMIS
10089 | VMCPU_FF_UNHALT ));
10090
10091 if (RT_LIKELY( ( ( !fCpu
10092 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10093 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10094 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10095 || pStats->cInstructions < cMinInstructions))
10096 {
10097 if (pStats->cInstructions < cMaxInstructions)
10098 {
10099 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10100 {
10101#ifdef IN_RING0
10102 if ( !fCheckPreemptionPending
10103 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10104#endif
10105 {
10106 Assert(pVCpu->iem.s.cActiveMappings == 0);
10107 iemReInitDecoder(pVCpu);
10108 continue;
10109 }
10110#ifdef IN_RING0
10111 rcStrict = VINF_EM_RAW_INTERRUPT;
10112 break;
10113#endif
10114 }
10115 }
10116 }
10117 Assert(!(fCpu & VMCPU_FF_IEM));
10118 }
10119 Assert(pVCpu->iem.s.cActiveMappings == 0);
10120 }
10121 else if (pVCpu->iem.s.cActiveMappings > 0)
10122 iemMemRollback(pVCpu);
10123 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10124 break;
10125 }
10126 }
10127#ifdef IEM_WITH_SETJMP
10128 else
10129 {
10130 if (pVCpu->iem.s.cActiveMappings > 0)
10131 iemMemRollback(pVCpu);
10132 pVCpu->iem.s.cLongJumps++;
10133 }
10134 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10135#endif
10136
10137 /*
10138 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10139 */
10140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10142 }
10143 else
10144 {
10145 if (pVCpu->iem.s.cActiveMappings > 0)
10146 iemMemRollback(pVCpu);
10147
10148#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10149 /*
10150 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10151 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10152 */
10153 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10154#endif
10155 }
10156
10157 /*
10158 * Maybe re-enter raw-mode and log.
10159 */
10160 if (rcStrict != VINF_SUCCESS)
10161 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10162 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10163 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10164 return rcStrict;
10165}
10166
10167
10168/**
10169 * Injects a trap, fault, abort, software interrupt or external interrupt.
10170 *
10171 * The parameter list matches TRPMQueryTrapAll pretty closely.
10172 *
10173 * @returns Strict VBox status code.
10174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10175 * @param u8TrapNo The trap number.
10176 * @param enmType What type is it (trap/fault/abort), software
10177 * interrupt or hardware interrupt.
10178 * @param uErrCode The error code if applicable.
10179 * @param uCr2 The CR2 value if applicable.
10180 * @param cbInstr The instruction length (only relevant for
10181 * software interrupts).
10182 */
10183VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10184 uint8_t cbInstr)
10185{
10186 iemInitDecoder(pVCpu, false, false);
10187#ifdef DBGFTRACE_ENABLED
10188 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10189 u8TrapNo, enmType, uErrCode, uCr2);
10190#endif
10191
10192 uint32_t fFlags;
10193 switch (enmType)
10194 {
10195 case TRPM_HARDWARE_INT:
10196 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10197 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10198 uErrCode = uCr2 = 0;
10199 break;
10200
10201 case TRPM_SOFTWARE_INT:
10202 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10203 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10204 uErrCode = uCr2 = 0;
10205 break;
10206
10207 case TRPM_TRAP:
10208 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10209 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10210 if (u8TrapNo == X86_XCPT_PF)
10211 fFlags |= IEM_XCPT_FLAGS_CR2;
10212 switch (u8TrapNo)
10213 {
10214 case X86_XCPT_DF:
10215 case X86_XCPT_TS:
10216 case X86_XCPT_NP:
10217 case X86_XCPT_SS:
10218 case X86_XCPT_PF:
10219 case X86_XCPT_AC:
10220 case X86_XCPT_GP:
10221 fFlags |= IEM_XCPT_FLAGS_ERR;
10222 break;
10223 }
10224 break;
10225
10226 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10227 }
10228
10229 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10230
10231 if (pVCpu->iem.s.cActiveMappings > 0)
10232 iemMemRollback(pVCpu);
10233
10234 return rcStrict;
10235}
10236
10237
10238/**
10239 * Injects the active TRPM event.
10240 *
10241 * @returns Strict VBox status code.
10242 * @param pVCpu The cross context virtual CPU structure.
10243 */
10244VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10245{
10246#ifndef IEM_IMPLEMENTS_TASKSWITCH
10247 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10248#else
10249 uint8_t u8TrapNo;
10250 TRPMEVENT enmType;
10251 uint32_t uErrCode;
10252 RTGCUINTPTR uCr2;
10253 uint8_t cbInstr;
10254 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10255 if (RT_FAILURE(rc))
10256 return rc;
10257
10258 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10259 * ICEBP \#DB injection as a special case. */
10260 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10261#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10262 if (rcStrict == VINF_SVM_VMEXIT)
10263 rcStrict = VINF_SUCCESS;
10264#endif
10265#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10266 if (rcStrict == VINF_VMX_VMEXIT)
10267 rcStrict = VINF_SUCCESS;
10268#endif
10269 /** @todo Are there any other codes that imply the event was successfully
10270 * delivered to the guest? See @bugref{6607}. */
10271 if ( rcStrict == VINF_SUCCESS
10272 || rcStrict == VINF_IEM_RAISED_XCPT)
10273 TRPMResetTrap(pVCpu);
10274
10275 return rcStrict;
10276#endif
10277}
10278
10279
10280VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10281{
10282 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10283 return VERR_NOT_IMPLEMENTED;
10284}
10285
10286
10287VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10288{
10289 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10290 return VERR_NOT_IMPLEMENTED;
10291}
10292
10293
10294#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10295/**
10296 * Executes a IRET instruction with default operand size.
10297 *
10298 * This is for PATM.
10299 *
10300 * @returns VBox status code.
10301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10302 * @param pCtxCore The register frame.
10303 */
10304VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10305{
10306 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10307
10308 iemCtxCoreToCtx(pCtx, pCtxCore);
10309 iemInitDecoder(pVCpu);
10310 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10311 if (rcStrict == VINF_SUCCESS)
10312 iemCtxToCtxCore(pCtxCore, pCtx);
10313 else
10314 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10315 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10316 return rcStrict;
10317}
10318#endif
10319
10320
10321/**
10322 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10323 *
10324 * This API ASSUMES that the caller has already verified that the guest code is
10325 * allowed to access the I/O port. (The I/O port is in the DX register in the
10326 * guest state.)
10327 *
10328 * @returns Strict VBox status code.
10329 * @param pVCpu The cross context virtual CPU structure.
10330 * @param cbValue The size of the I/O port access (1, 2, or 4).
10331 * @param enmAddrMode The addressing mode.
10332 * @param fRepPrefix Indicates whether a repeat prefix is used
10333 * (doesn't matter which for this instruction).
10334 * @param cbInstr The instruction length in bytes.
10335 * @param iEffSeg The effective segment address.
10336 * @param fIoChecked Whether the access to the I/O port has been
10337 * checked or not. It's typically checked in the
10338 * HM scenario.
10339 */
10340VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10341 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10342{
10343 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10344 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10345
10346 /*
10347 * State init.
10348 */
10349 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10350
10351 /*
10352 * Switch orgy for getting to the right handler.
10353 */
10354 VBOXSTRICTRC rcStrict;
10355 if (fRepPrefix)
10356 {
10357 switch (enmAddrMode)
10358 {
10359 case IEMMODE_16BIT:
10360 switch (cbValue)
10361 {
10362 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10363 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10364 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10365 default:
10366 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10367 }
10368 break;
10369
10370 case IEMMODE_32BIT:
10371 switch (cbValue)
10372 {
10373 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10374 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10375 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10376 default:
10377 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10378 }
10379 break;
10380
10381 case IEMMODE_64BIT:
10382 switch (cbValue)
10383 {
10384 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10385 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10386 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10387 default:
10388 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10389 }
10390 break;
10391
10392 default:
10393 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10394 }
10395 }
10396 else
10397 {
10398 switch (enmAddrMode)
10399 {
10400 case IEMMODE_16BIT:
10401 switch (cbValue)
10402 {
10403 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10404 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10405 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10406 default:
10407 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10408 }
10409 break;
10410
10411 case IEMMODE_32BIT:
10412 switch (cbValue)
10413 {
10414 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10415 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10416 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10417 default:
10418 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10419 }
10420 break;
10421
10422 case IEMMODE_64BIT:
10423 switch (cbValue)
10424 {
10425 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10426 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10427 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10428 default:
10429 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10430 }
10431 break;
10432
10433 default:
10434 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10435 }
10436 }
10437
10438 if (pVCpu->iem.s.cActiveMappings)
10439 iemMemRollback(pVCpu);
10440
10441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10442}
10443
10444
10445/**
10446 * Interface for HM and EM for executing string I/O IN (read) instructions.
10447 *
10448 * This API ASSUMES that the caller has already verified that the guest code is
10449 * allowed to access the I/O port. (The I/O port is in the DX register in the
10450 * guest state.)
10451 *
10452 * @returns Strict VBox status code.
10453 * @param pVCpu The cross context virtual CPU structure.
10454 * @param cbValue The size of the I/O port access (1, 2, or 4).
10455 * @param enmAddrMode The addressing mode.
10456 * @param fRepPrefix Indicates whether a repeat prefix is used
10457 * (doesn't matter which for this instruction).
10458 * @param cbInstr The instruction length in bytes.
10459 * @param fIoChecked Whether the access to the I/O port has been
10460 * checked or not. It's typically checked in the
10461 * HM scenario.
10462 */
10463VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10464 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10465{
10466 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10467
10468 /*
10469 * State init.
10470 */
10471 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10472
10473 /*
10474 * Switch orgy for getting to the right handler.
10475 */
10476 VBOXSTRICTRC rcStrict;
10477 if (fRepPrefix)
10478 {
10479 switch (enmAddrMode)
10480 {
10481 case IEMMODE_16BIT:
10482 switch (cbValue)
10483 {
10484 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10485 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10486 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10487 default:
10488 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10489 }
10490 break;
10491
10492 case IEMMODE_32BIT:
10493 switch (cbValue)
10494 {
10495 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10496 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10497 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10498 default:
10499 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10500 }
10501 break;
10502
10503 case IEMMODE_64BIT:
10504 switch (cbValue)
10505 {
10506 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10507 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10508 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10509 default:
10510 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10511 }
10512 break;
10513
10514 default:
10515 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10516 }
10517 }
10518 else
10519 {
10520 switch (enmAddrMode)
10521 {
10522 case IEMMODE_16BIT:
10523 switch (cbValue)
10524 {
10525 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10526 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10527 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10528 default:
10529 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10530 }
10531 break;
10532
10533 case IEMMODE_32BIT:
10534 switch (cbValue)
10535 {
10536 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10537 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10538 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10539 default:
10540 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10541 }
10542 break;
10543
10544 case IEMMODE_64BIT:
10545 switch (cbValue)
10546 {
10547 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10548 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10549 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10550 default:
10551 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10552 }
10553 break;
10554
10555 default:
10556 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10557 }
10558 }
10559
10560 if ( pVCpu->iem.s.cActiveMappings == 0
10561 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10562 { /* likely */ }
10563 else
10564 {
10565 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10566 iemMemRollback(pVCpu);
10567 }
10568 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10569}
10570
10571
10572/**
10573 * Interface for rawmode to write execute an OUT instruction.
10574 *
10575 * @returns Strict VBox status code.
10576 * @param pVCpu The cross context virtual CPU structure.
10577 * @param cbInstr The instruction length in bytes.
10578 * @param u16Port The port to read.
10579 * @param fImm Whether the port is specified using an immediate operand or
10580 * using the implicit DX register.
10581 * @param cbReg The register size.
10582 *
10583 * @remarks In ring-0 not all of the state needs to be synced in.
10584 */
10585VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10586{
10587 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10588 Assert(cbReg <= 4 && cbReg != 3);
10589
10590 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10591 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10592 Assert(!pVCpu->iem.s.cActiveMappings);
10593 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10594}
10595
10596
10597/**
10598 * Interface for rawmode to write execute an IN instruction.
10599 *
10600 * @returns Strict VBox status code.
10601 * @param pVCpu The cross context virtual CPU structure.
10602 * @param cbInstr The instruction length in bytes.
10603 * @param u16Port The port to read.
10604 * @param fImm Whether the port is specified using an immediate operand or
10605 * using the implicit DX.
10606 * @param cbReg The register size.
10607 */
10608VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10609{
10610 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10611 Assert(cbReg <= 4 && cbReg != 3);
10612
10613 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10614 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10615 Assert(!pVCpu->iem.s.cActiveMappings);
10616 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10617}
10618
10619
10620/**
10621 * Interface for HM and EM to write to a CRx register.
10622 *
10623 * @returns Strict VBox status code.
10624 * @param pVCpu The cross context virtual CPU structure.
10625 * @param cbInstr The instruction length in bytes.
10626 * @param iCrReg The control register number (destination).
10627 * @param iGReg The general purpose register number (source).
10628 *
10629 * @remarks In ring-0 not all of the state needs to be synced in.
10630 */
10631VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10632{
10633 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10634 Assert(iCrReg < 16);
10635 Assert(iGReg < 16);
10636
10637 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10638 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10639 Assert(!pVCpu->iem.s.cActiveMappings);
10640 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10641}
10642
10643
10644/**
10645 * Interface for HM and EM to read from a CRx register.
10646 *
10647 * @returns Strict VBox status code.
10648 * @param pVCpu The cross context virtual CPU structure.
10649 * @param cbInstr The instruction length in bytes.
10650 * @param iGReg The general purpose register number (destination).
10651 * @param iCrReg The control register number (source).
10652 *
10653 * @remarks In ring-0 not all of the state needs to be synced in.
10654 */
10655VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10656{
10657 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10658 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10659 | CPUMCTX_EXTRN_APIC_TPR);
10660 Assert(iCrReg < 16);
10661 Assert(iGReg < 16);
10662
10663 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10664 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10665 Assert(!pVCpu->iem.s.cActiveMappings);
10666 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10667}
10668
10669
10670/**
10671 * Interface for HM and EM to clear the CR0[TS] bit.
10672 *
10673 * @returns Strict VBox status code.
10674 * @param pVCpu The cross context virtual CPU structure.
10675 * @param cbInstr The instruction length in bytes.
10676 *
10677 * @remarks In ring-0 not all of the state needs to be synced in.
10678 */
10679VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10680{
10681 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10682
10683 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10684 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10685 Assert(!pVCpu->iem.s.cActiveMappings);
10686 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10687}
10688
10689
10690/**
10691 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10692 *
10693 * @returns Strict VBox status code.
10694 * @param pVCpu The cross context virtual CPU structure.
10695 * @param cbInstr The instruction length in bytes.
10696 * @param uValue The value to load into CR0.
10697 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10698 * memory operand. Otherwise pass NIL_RTGCPTR.
10699 *
10700 * @remarks In ring-0 not all of the state needs to be synced in.
10701 */
10702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10703{
10704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10705
10706 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10707 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10708 Assert(!pVCpu->iem.s.cActiveMappings);
10709 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10710}
10711
10712
10713/**
10714 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10715 *
10716 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10717 *
10718 * @returns Strict VBox status code.
10719 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10720 * @param cbInstr The instruction length in bytes.
10721 * @remarks In ring-0 not all of the state needs to be synced in.
10722 * @thread EMT(pVCpu)
10723 */
10724VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10725{
10726 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10727
10728 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10729 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10730 Assert(!pVCpu->iem.s.cActiveMappings);
10731 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10732}
10733
10734
10735/**
10736 * Interface for HM and EM to emulate the WBINVD instruction.
10737 *
10738 * @returns Strict VBox status code.
10739 * @param pVCpu The cross context virtual CPU structure.
10740 * @param cbInstr The instruction length in bytes.
10741 *
10742 * @remarks In ring-0 not all of the state needs to be synced in.
10743 */
10744VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10745{
10746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10747
10748 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10749 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10750 Assert(!pVCpu->iem.s.cActiveMappings);
10751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10752}
10753
10754
10755/**
10756 * Interface for HM and EM to emulate the INVD instruction.
10757 *
10758 * @returns Strict VBox status code.
10759 * @param pVCpu The cross context virtual CPU structure.
10760 * @param cbInstr The instruction length in bytes.
10761 *
10762 * @remarks In ring-0 not all of the state needs to be synced in.
10763 */
10764VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10765{
10766 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10767
10768 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10769 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10770 Assert(!pVCpu->iem.s.cActiveMappings);
10771 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10772}
10773
10774
10775/**
10776 * Interface for HM and EM to emulate the INVLPG instruction.
10777 *
10778 * @returns Strict VBox status code.
10779 * @retval VINF_PGM_SYNC_CR3
10780 *
10781 * @param pVCpu The cross context virtual CPU structure.
10782 * @param cbInstr The instruction length in bytes.
10783 * @param GCPtrPage The effective address of the page to invalidate.
10784 *
10785 * @remarks In ring-0 not all of the state needs to be synced in.
10786 */
10787VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10788{
10789 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10790
10791 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10792 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10793 Assert(!pVCpu->iem.s.cActiveMappings);
10794 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10795}
10796
10797
10798/**
10799 * Interface for HM and EM to emulate the INVPCID instruction.
10800 *
10801 * @returns Strict VBox status code.
10802 * @retval VINF_PGM_SYNC_CR3
10803 *
10804 * @param pVCpu The cross context virtual CPU structure.
10805 * @param cbInstr The instruction length in bytes.
10806 * @param iEffSeg The effective segment register.
10807 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10808 * @param uType The invalidation type.
10809 *
10810 * @remarks In ring-0 not all of the state needs to be synced in.
10811 */
10812VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10813 uint64_t uType)
10814{
10815 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10816
10817 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10818 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10819 Assert(!pVCpu->iem.s.cActiveMappings);
10820 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10821}
10822
10823
10824/**
10825 * Interface for HM and EM to emulate the CPUID instruction.
10826 *
10827 * @returns Strict VBox status code.
10828 *
10829 * @param pVCpu The cross context virtual CPU structure.
10830 * @param cbInstr The instruction length in bytes.
10831 *
10832 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10833 */
10834VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10835{
10836 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10837 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10838
10839 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10840 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10841 Assert(!pVCpu->iem.s.cActiveMappings);
10842 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10843}
10844
10845
10846/**
10847 * Interface for HM and EM to emulate the RDPMC instruction.
10848 *
10849 * @returns Strict VBox status code.
10850 *
10851 * @param pVCpu The cross context virtual CPU structure.
10852 * @param cbInstr The instruction length in bytes.
10853 *
10854 * @remarks Not all of the state needs to be synced in.
10855 */
10856VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10857{
10858 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10859 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10860
10861 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10862 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10863 Assert(!pVCpu->iem.s.cActiveMappings);
10864 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10865}
10866
10867
10868/**
10869 * Interface for HM and EM to emulate the RDTSC instruction.
10870 *
10871 * @returns Strict VBox status code.
10872 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10873 *
10874 * @param pVCpu The cross context virtual CPU structure.
10875 * @param cbInstr The instruction length in bytes.
10876 *
10877 * @remarks Not all of the state needs to be synced in.
10878 */
10879VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10880{
10881 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10883
10884 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10885 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10886 Assert(!pVCpu->iem.s.cActiveMappings);
10887 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10888}
10889
10890
10891/**
10892 * Interface for HM and EM to emulate the RDTSCP instruction.
10893 *
10894 * @returns Strict VBox status code.
10895 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10896 *
10897 * @param pVCpu The cross context virtual CPU structure.
10898 * @param cbInstr The instruction length in bytes.
10899 *
10900 * @remarks Not all of the state needs to be synced in. Recommended
10901 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10902 */
10903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10904{
10905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10906 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10907
10908 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10909 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10910 Assert(!pVCpu->iem.s.cActiveMappings);
10911 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10912}
10913
10914
10915/**
10916 * Interface for HM and EM to emulate the RDMSR instruction.
10917 *
10918 * @returns Strict VBox status code.
10919 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10920 *
10921 * @param pVCpu The cross context virtual CPU structure.
10922 * @param cbInstr The instruction length in bytes.
10923 *
10924 * @remarks Not all of the state needs to be synced in. Requires RCX and
10925 * (currently) all MSRs.
10926 */
10927VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10928{
10929 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10930 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10931
10932 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10933 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10934 Assert(!pVCpu->iem.s.cActiveMappings);
10935 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10936}
10937
10938
10939/**
10940 * Interface for HM and EM to emulate the WRMSR instruction.
10941 *
10942 * @returns Strict VBox status code.
10943 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10944 *
10945 * @param pVCpu The cross context virtual CPU structure.
10946 * @param cbInstr The instruction length in bytes.
10947 *
10948 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10949 * and (currently) all MSRs.
10950 */
10951VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10952{
10953 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10954 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10955 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10956
10957 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10958 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10959 Assert(!pVCpu->iem.s.cActiveMappings);
10960 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10961}
10962
10963
10964/**
10965 * Interface for HM and EM to emulate the MONITOR instruction.
10966 *
10967 * @returns Strict VBox status code.
10968 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10969 *
10970 * @param pVCpu The cross context virtual CPU structure.
10971 * @param cbInstr The instruction length in bytes.
10972 *
10973 * @remarks Not all of the state needs to be synced in.
10974 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10975 * are used.
10976 */
10977VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10978{
10979 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10980 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10981
10982 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10983 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10984 Assert(!pVCpu->iem.s.cActiveMappings);
10985 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10986}
10987
10988
10989/**
10990 * Interface for HM and EM to emulate the MWAIT instruction.
10991 *
10992 * @returns Strict VBox status code.
10993 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10994 *
10995 * @param pVCpu The cross context virtual CPU structure.
10996 * @param cbInstr The instruction length in bytes.
10997 *
10998 * @remarks Not all of the state needs to be synced in.
10999 */
11000VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11001{
11002 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11003 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11004
11005 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11006 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11007 Assert(!pVCpu->iem.s.cActiveMappings);
11008 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11009}
11010
11011
11012/**
11013 * Interface for HM and EM to emulate the HLT instruction.
11014 *
11015 * @returns Strict VBox status code.
11016 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11017 *
11018 * @param pVCpu The cross context virtual CPU structure.
11019 * @param cbInstr The instruction length in bytes.
11020 *
11021 * @remarks Not all of the state needs to be synced in.
11022 */
11023VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11024{
11025 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11026
11027 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11028 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11029 Assert(!pVCpu->iem.s.cActiveMappings);
11030 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11031}
11032
11033
11034/**
11035 * Checks if IEM is in the process of delivering an event (interrupt or
11036 * exception).
11037 *
11038 * @returns true if we're in the process of raising an interrupt or exception,
11039 * false otherwise.
11040 * @param pVCpu The cross context virtual CPU structure.
11041 * @param puVector Where to store the vector associated with the
11042 * currently delivered event, optional.
11043 * @param pfFlags Where to store th event delivery flags (see
11044 * IEM_XCPT_FLAGS_XXX), optional.
11045 * @param puErr Where to store the error code associated with the
11046 * event, optional.
11047 * @param puCr2 Where to store the CR2 associated with the event,
11048 * optional.
11049 * @remarks The caller should check the flags to determine if the error code and
11050 * CR2 are valid for the event.
11051 */
11052VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11053{
11054 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11055 if (fRaisingXcpt)
11056 {
11057 if (puVector)
11058 *puVector = pVCpu->iem.s.uCurXcpt;
11059 if (pfFlags)
11060 *pfFlags = pVCpu->iem.s.fCurXcpt;
11061 if (puErr)
11062 *puErr = pVCpu->iem.s.uCurXcptErr;
11063 if (puCr2)
11064 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11065 }
11066 return fRaisingXcpt;
11067}
11068
11069#ifdef IN_RING3
11070
11071/**
11072 * Handles the unlikely and probably fatal merge cases.
11073 *
11074 * @returns Merged status code.
11075 * @param rcStrict Current EM status code.
11076 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11077 * with @a rcStrict.
11078 * @param iMemMap The memory mapping index. For error reporting only.
11079 * @param pVCpu The cross context virtual CPU structure of the calling
11080 * thread, for error reporting only.
11081 */
11082DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11083 unsigned iMemMap, PVMCPUCC pVCpu)
11084{
11085 if (RT_FAILURE_NP(rcStrict))
11086 return rcStrict;
11087
11088 if (RT_FAILURE_NP(rcStrictCommit))
11089 return rcStrictCommit;
11090
11091 if (rcStrict == rcStrictCommit)
11092 return rcStrictCommit;
11093
11094 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11095 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11097 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11098 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11099 return VERR_IOM_FF_STATUS_IPE;
11100}
11101
11102
11103/**
11104 * Helper for IOMR3ProcessForceFlag.
11105 *
11106 * @returns Merged status code.
11107 * @param rcStrict Current EM status code.
11108 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11109 * with @a rcStrict.
11110 * @param iMemMap The memory mapping index. For error reporting only.
11111 * @param pVCpu The cross context virtual CPU structure of the calling
11112 * thread, for error reporting only.
11113 */
11114DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11115{
11116 /* Simple. */
11117 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11118 return rcStrictCommit;
11119
11120 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11121 return rcStrict;
11122
11123 /* EM scheduling status codes. */
11124 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11125 && rcStrict <= VINF_EM_LAST))
11126 {
11127 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11128 && rcStrictCommit <= VINF_EM_LAST))
11129 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11130 }
11131
11132 /* Unlikely */
11133 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11134}
11135
11136
11137/**
11138 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11139 *
11140 * @returns Merge between @a rcStrict and what the commit operation returned.
11141 * @param pVM The cross context VM structure.
11142 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11143 * @param rcStrict The status code returned by ring-0 or raw-mode.
11144 */
11145VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11146{
11147 /*
11148 * Reset the pending commit.
11149 */
11150 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11151 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11152 ("%#x %#x %#x\n",
11153 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11154 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11155
11156 /*
11157 * Commit the pending bounce buffers (usually just one).
11158 */
11159 unsigned cBufs = 0;
11160 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11161 while (iMemMap-- > 0)
11162 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11163 {
11164 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11165 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11166 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11167
11168 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11169 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11170 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11171
11172 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11173 {
11174 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11175 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11176 pbBuf,
11177 cbFirst,
11178 PGMACCESSORIGIN_IEM);
11179 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11180 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11181 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11182 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11183 }
11184
11185 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11186 {
11187 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11188 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11189 pbBuf + cbFirst,
11190 cbSecond,
11191 PGMACCESSORIGIN_IEM);
11192 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11193 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11194 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11195 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11196 }
11197 cBufs++;
11198 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11199 }
11200
11201 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11202 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11203 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11204 pVCpu->iem.s.cActiveMappings = 0;
11205 return rcStrict;
11206}
11207
11208#endif /* IN_RING3 */
11209
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette