VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 94866

Last change on this file since 94866 was 94845, checked in by vboxsync, 3 years ago

VMM/IEM: More TLB work. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 446.4 KB
Line 
1/* $Id: IEMAll.cpp 94845 2022-05-05 11:32:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 */
75
76/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
77#ifdef _MSC_VER
78# pragma warning(disable:4505)
79#endif
80
81
82/*********************************************************************************************************************************
83* Header Files *
84*********************************************************************************************************************************/
85#define LOG_GROUP LOG_GROUP_IEM
86#define VMCPU_INCL_CPUM_GST_CTX
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/apic.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/nem.h>
96#include <VBox/vmm/gim.h>
97#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
98# include <VBox/vmm/em.h>
99# include <VBox/vmm/hm_svm.h>
100#endif
101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
102# include <VBox/vmm/hmvmxinline.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#include "IEMInternal.h"
108#include <VBox/vmm/vmcc.h>
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <VBox/param.h>
112#include <VBox/dis.h>
113#include <VBox/disopcode.h>
114#include <iprt/asm-math.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119#include "IEMInline.h"
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/**
126 * CPU exception classes.
127 */
128typedef enum IEMXCPTCLASS
129{
130 IEMXCPTCLASS_BENIGN,
131 IEMXCPTCLASS_CONTRIBUTORY,
132 IEMXCPTCLASS_PAGE_FAULT,
133 IEMXCPTCLASS_DOUBLE_FAULT
134} IEMXCPTCLASS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140#if defined(IEM_LOG_MEMORY_WRITES)
141/** What IEM just wrote. */
142uint8_t g_abIemWrote[256];
143/** How much IEM just wrote. */
144size_t g_cbIemWrote;
145#endif
146
147
148/*********************************************************************************************************************************
149* Internal Functions *
150*********************************************************************************************************************************/
151static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
152 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
153
154
155/**
156 * Initializes the decoder state.
157 *
158 * iemReInitDecoder is mostly a copy of this function.
159 *
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param fBypassHandlers Whether to bypass access handlers.
163 * @param fDisregardLock Whether to disregard the LOCK prefix.
164 */
165DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
166{
167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
168 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
177
178 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
179 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
180 pVCpu->iem.s.enmCpuMode = enmMode;
181 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
182 pVCpu->iem.s.enmEffAddrMode = enmMode;
183 if (enmMode != IEMMODE_64BIT)
184 {
185 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
186 pVCpu->iem.s.enmEffOpSize = enmMode;
187 }
188 else
189 {
190 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
191 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
192 }
193 pVCpu->iem.s.fPrefixes = 0;
194 pVCpu->iem.s.uRexReg = 0;
195 pVCpu->iem.s.uRexB = 0;
196 pVCpu->iem.s.uRexIndex = 0;
197 pVCpu->iem.s.idxPrefix = 0;
198 pVCpu->iem.s.uVex3rdReg = 0;
199 pVCpu->iem.s.uVexLength = 0;
200 pVCpu->iem.s.fEvexStuff = 0;
201 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
202#ifdef IEM_WITH_CODE_TLB
203 pVCpu->iem.s.pbInstrBuf = NULL;
204 pVCpu->iem.s.offInstrNextByte = 0;
205 pVCpu->iem.s.offCurInstrStart = 0;
206# ifdef VBOX_STRICT
207 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
208 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
209 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
210# endif
211#else
212 pVCpu->iem.s.offOpcode = 0;
213 pVCpu->iem.s.cbOpcode = 0;
214#endif
215 pVCpu->iem.s.offModRm = 0;
216 pVCpu->iem.s.cActiveMappings = 0;
217 pVCpu->iem.s.iNextMapping = 0;
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
220 pVCpu->iem.s.fDisregardLock = fDisregardLock;
221
222#ifdef DBGFTRACE_ENABLED
223 switch (enmMode)
224 {
225 case IEMMODE_64BIT:
226 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
227 break;
228 case IEMMODE_32BIT:
229 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
230 break;
231 case IEMMODE_16BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
233 break;
234 }
235#endif
236}
237
238
239/**
240 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
241 *
242 * This is mostly a copy of iemInitDecoder.
243 *
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 */
246DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
247{
248 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
257
258 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
259 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
260 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
261 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
262 pVCpu->iem.s.enmEffAddrMode = enmMode;
263 if (enmMode != IEMMODE_64BIT)
264 {
265 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffOpSize = enmMode;
267 }
268 else
269 {
270 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
271 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
272 }
273 pVCpu->iem.s.fPrefixes = 0;
274 pVCpu->iem.s.uRexReg = 0;
275 pVCpu->iem.s.uRexB = 0;
276 pVCpu->iem.s.uRexIndex = 0;
277 pVCpu->iem.s.idxPrefix = 0;
278 pVCpu->iem.s.uVex3rdReg = 0;
279 pVCpu->iem.s.uVexLength = 0;
280 pVCpu->iem.s.fEvexStuff = 0;
281 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
282#ifdef IEM_WITH_CODE_TLB
283 if (pVCpu->iem.s.pbInstrBuf)
284 {
285 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
286 - pVCpu->iem.s.uInstrBufPc;
287 if (off < pVCpu->iem.s.cbInstrBufTotal)
288 {
289 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
290 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
291 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
292 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
293 else
294 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
295 }
296 else
297 {
298 pVCpu->iem.s.pbInstrBuf = NULL;
299 pVCpu->iem.s.offInstrNextByte = 0;
300 pVCpu->iem.s.offCurInstrStart = 0;
301 pVCpu->iem.s.cbInstrBuf = 0;
302 pVCpu->iem.s.cbInstrBufTotal = 0;
303 }
304 }
305 else
306 {
307 pVCpu->iem.s.offInstrNextByte = 0;
308 pVCpu->iem.s.offCurInstrStart = 0;
309 pVCpu->iem.s.cbInstrBuf = 0;
310 pVCpu->iem.s.cbInstrBufTotal = 0;
311 }
312#else
313 pVCpu->iem.s.cbOpcode = 0;
314 pVCpu->iem.s.offOpcode = 0;
315#endif
316 pVCpu->iem.s.offModRm = 0;
317 Assert(pVCpu->iem.s.cActiveMappings == 0);
318 pVCpu->iem.s.iNextMapping = 0;
319 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
320 Assert(pVCpu->iem.s.fBypassHandlers == false);
321
322#ifdef DBGFTRACE_ENABLED
323 switch (enmMode)
324 {
325 case IEMMODE_64BIT:
326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
327 break;
328 case IEMMODE_32BIT:
329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
330 break;
331 case IEMMODE_16BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
333 break;
334 }
335#endif
336}
337
338
339
340/**
341 * Prefetch opcodes the first time when starting executing.
342 *
343 * @returns Strict VBox status code.
344 * @param pVCpu The cross context virtual CPU structure of the
345 * calling thread.
346 * @param fBypassHandlers Whether to bypass access handlers.
347 * @param fDisregardLock Whether to disregard LOCK prefixes.
348 *
349 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
350 * store them as such.
351 */
352static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
353{
354 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
355
356#ifdef IEM_WITH_CODE_TLB
357 /** @todo Do ITLB lookup here. */
358
359#else /* !IEM_WITH_CODE_TLB */
360
361 /*
362 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
363 *
364 * First translate CS:rIP to a physical address.
365 */
366 uint32_t cbToTryRead;
367 RTGCPTR GCPtrPC;
368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
369 {
370 cbToTryRead = GUEST_PAGE_SIZE;
371 GCPtrPC = pVCpu->cpum.GstCtx.rip;
372 if (IEM_IS_CANONICAL(GCPtrPC))
373 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
374 else
375 return iemRaiseGeneralProtectionFault0(pVCpu);
376 }
377 else
378 {
379 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
381 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
382 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
383 else
384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
385 if (cbToTryRead) { /* likely */ }
386 else /* overflowed */
387 {
388 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
389 cbToTryRead = UINT32_MAX;
390 }
391 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
392 Assert(GCPtrPC <= UINT32_MAX);
393 }
394
395 PGMPTWALK Walk;
396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
397 if (RT_SUCCESS(rc))
398 Assert(Walk.fSucceeded); /* probable. */
399 else
400 {
401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
405#endif
406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
407 }
408 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
409 else
410 {
411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
413 if (Walk.fFailed & PGM_WALKFAIL_EPT)
414 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
415#endif
416 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
417 }
418 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
419 else
420 {
421 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
422#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
423 if (Walk.fFailed & PGM_WALKFAIL_EPT)
424 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
425#endif
426 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
427 }
428 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
429 /** @todo Check reserved bits and such stuff. PGM is better at doing
430 * that, so do it when implementing the guest virtual address
431 * TLB... */
432
433 /*
434 * Read the bytes at this address.
435 */
436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
437 if (cbToTryRead > cbLeftOnPage)
438 cbToTryRead = cbLeftOnPage;
439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
441
442 if (!pVCpu->iem.s.fBypassHandlers)
443 {
444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
446 { /* likely */ }
447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
448 {
449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
452 }
453 else
454 {
455 Log((RT_SUCCESS(rcStrict)
456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
459 return rcStrict;
460 }
461 }
462 else
463 {
464 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
465 if (RT_SUCCESS(rc))
466 { /* likely */ }
467 else
468 {
469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
470 GCPtrPC, GCPhys, rc, cbToTryRead));
471 return rc;
472 }
473 }
474 pVCpu->iem.s.cbOpcode = cbToTryRead;
475#endif /* !IEM_WITH_CODE_TLB */
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Invalidates the IEM TLBs.
482 *
483 * This is called internally as well as by PGM when moving GC mappings.
484 *
485 * @returns
486 * @param pVCpu The cross context virtual CPU structure of the calling
487 * thread.
488 */
489VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
490{
491#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
492 Log10(("IEMTlbInvalidateAll\n"));
493# ifdef IEM_WITH_CODE_TLB
494 pVCpu->iem.s.cbInstrBufTotal = 0;
495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
497 { /* very likely */ }
498 else
499 {
500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
502 while (i-- > 0)
503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
504 }
505# endif
506
507# ifdef IEM_WITH_DATA_TLB
508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
510 { /* very likely */ }
511 else
512 {
513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
515 while (i-- > 0)
516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
517 }
518# endif
519#else
520 RT_NOREF(pVCpu);
521#endif
522}
523
524
525/**
526 * Invalidates a page in the TLBs.
527 *
528 * @param pVCpu The cross context virtual CPU structure of the calling
529 * thread.
530 * @param GCPtr The address of the page to invalidate
531 * @thread EMT(pVCpu)
532 */
533VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
534{
535#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
536 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
537 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
538 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
539
540# ifdef IEM_WITH_CODE_TLB
541 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
542 {
543 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
544 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
545 pVCpu->iem.s.cbInstrBufTotal = 0;
546 }
547# endif
548
549# ifdef IEM_WITH_DATA_TLB
550 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
551 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
552# endif
553#else
554 NOREF(pVCpu); NOREF(GCPtr);
555#endif
556}
557
558
559#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
560/**
561 * Invalid both TLBs slow fashion following a rollover.
562 *
563 * Worker for IEMTlbInvalidateAllPhysical,
564 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
565 * iemMemMapJmp and others.
566 *
567 * @thread EMT(pVCpu)
568 */
569static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
570{
571 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
572 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
573 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
574
575 unsigned i;
576# ifdef IEM_WITH_CODE_TLB
577 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
578 while (i-- > 0)
579 {
580 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
581 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
582 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
583 }
584# endif
585# ifdef IEM_WITH_DATA_TLB
586 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
587 while (i-- > 0)
588 {
589 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
590 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
591 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
592 }
593# endif
594
595}
596#endif
597
598
599/**
600 * Invalidates the host physical aspects of the IEM TLBs.
601 *
602 * This is called internally as well as by PGM when moving GC mappings.
603 *
604 * @param pVCpu The cross context virtual CPU structure of the calling
605 * thread.
606 * @note Currently not used.
607 */
608VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
609{
610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
611 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
612 Log10(("IEMTlbInvalidateAllPhysical\n"));
613
614# ifdef IEM_WITH_CODE_TLB
615 pVCpu->iem.s.cbInstrBufTotal = 0;
616# endif
617 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
618 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
619 {
620 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
621 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
622 }
623 else
624 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
625#else
626 NOREF(pVCpu);
627#endif
628}
629
630
631/**
632 * Invalidates the host physical aspects of the IEM TLBs.
633 *
634 * This is called internally as well as by PGM when moving GC mappings.
635 *
636 * @param pVM The cross context VM structure.
637 * @param idCpuCaller The ID of the calling EMT if available to the caller,
638 * otherwise NIL_VMCPUID.
639 *
640 * @remarks Caller holds the PGM lock.
641 */
642VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
643{
644#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
645 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
646 if (pVCpuCaller)
647 VMCPU_ASSERT_EMT(pVCpuCaller);
648 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
649
650 VMCC_FOR_EACH_VMCPU(pVM)
651 {
652# ifdef IEM_WITH_CODE_TLB
653 if (pVCpuCaller == pVCpu)
654 pVCpu->iem.s.cbInstrBufTotal = 0;
655# endif
656
657 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
658 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
659 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
660 { /* likely */}
661 else if (pVCpuCaller == pVCpu)
662 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
663 else
664 {
665 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
666 continue;
667 }
668 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
669 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
670 }
671 VMCC_FOR_EACH_VMCPU_END(pVM);
672
673#else
674 RT_NOREF(pVM, idCpuCaller);
675#endif
676}
677
678#ifdef IEM_WITH_CODE_TLB
679
680/**
681 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
682 * failure and jumps.
683 *
684 * We end up here for a number of reasons:
685 * - pbInstrBuf isn't yet initialized.
686 * - Advancing beyond the buffer boundrary (e.g. cross page).
687 * - Advancing beyond the CS segment limit.
688 * - Fetching from non-mappable page (e.g. MMIO).
689 *
690 * @param pVCpu The cross context virtual CPU structure of the
691 * calling thread.
692 * @param pvDst Where to return the bytes.
693 * @param cbDst Number of bytes to read.
694 *
695 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
696 */
697void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
698{
699#ifdef IN_RING3
700 for (;;)
701 {
702 Assert(cbDst <= 8);
703 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
704
705 /*
706 * We might have a partial buffer match, deal with that first to make the
707 * rest simpler. This is the first part of the cross page/buffer case.
708 */
709 if (pVCpu->iem.s.pbInstrBuf != NULL)
710 {
711 if (offBuf < pVCpu->iem.s.cbInstrBuf)
712 {
713 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
714 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
715 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
716
717 cbDst -= cbCopy;
718 pvDst = (uint8_t *)pvDst + cbCopy;
719 offBuf += cbCopy;
720 pVCpu->iem.s.offInstrNextByte += offBuf;
721 }
722 }
723
724 /*
725 * Check segment limit, figuring how much we're allowed to access at this point.
726 *
727 * We will fault immediately if RIP is past the segment limit / in non-canonical
728 * territory. If we do continue, there are one or more bytes to read before we
729 * end up in trouble and we need to do that first before faulting.
730 */
731 RTGCPTR GCPtrFirst;
732 uint32_t cbMaxRead;
733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
734 {
735 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
736 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
737 { /* likely */ }
738 else
739 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
740 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
741 }
742 else
743 {
744 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
745 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
746 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
747 { /* likely */ }
748 else
749 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
750 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
751 if (cbMaxRead != 0)
752 { /* likely */ }
753 else
754 {
755 /* Overflowed because address is 0 and limit is max. */
756 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
757 cbMaxRead = X86_PAGE_SIZE;
758 }
759 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
760 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
761 if (cbMaxRead2 < cbMaxRead)
762 cbMaxRead = cbMaxRead2;
763 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
764 }
765
766 /*
767 * Get the TLB entry for this piece of code.
768 */
769 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
770 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
771 if (pTlbe->uTag == uTag)
772 {
773 /* likely when executing lots of code, otherwise unlikely */
774# ifdef VBOX_WITH_STATISTICS
775 pVCpu->iem.s.CodeTlb.cTlbHits++;
776# endif
777 }
778 else
779 {
780 pVCpu->iem.s.CodeTlb.cTlbMisses++;
781 PGMPTWALK Walk;
782 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
783 if (RT_FAILURE(rc))
784 {
785#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
786 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
787 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
788#endif
789 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
791 }
792
793 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
794 Assert(Walk.fSucceeded);
795 pTlbe->uTag = uTag;
796 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
797 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
798 pTlbe->GCPhys = Walk.GCPhys;
799 pTlbe->pbMappingR3 = NULL;
800 }
801
802 /*
803 * Check TLB page table level access flags.
804 */
805 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
806 {
807 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
808 {
809 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
810 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
811 }
812 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
813 {
814 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
815 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
816 }
817 }
818
819 /*
820 * Look up the physical page info if necessary.
821 */
822 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
823 { /* not necessary */ }
824 else
825 {
826 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
827 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
828 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
829 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
830 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
831 { /* likely */ }
832 else
833 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
834 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
835 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
836 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
837 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
838 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
839 }
840
841# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
842 /*
843 * Try do a direct read using the pbMappingR3 pointer.
844 */
845 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
846 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
847 {
848 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
849 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
850 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
851 {
852 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
853 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
854 }
855 else
856 {
857 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
858 Assert(cbInstr < cbMaxRead);
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
860 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
861 }
862 if (cbDst <= cbMaxRead)
863 {
864 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
865 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
866 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
867 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
868 return;
869 }
870 pVCpu->iem.s.pbInstrBuf = NULL;
871
872 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
873 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
874 }
875 else
876# endif
877#if 0
878 /*
879 * If there is no special read handling, so we can read a bit more and
880 * put it in the prefetch buffer.
881 */
882 if ( cbDst < cbMaxRead
883 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
884 {
885 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
886 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
888 { /* likely */ }
889 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
890 {
891 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
894 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
895 }
896 else
897 {
898 Log((RT_SUCCESS(rcStrict)
899 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
900 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
901 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
903 }
904 }
905 /*
906 * Special read handling, so only read exactly what's needed.
907 * This is a highly unlikely scenario.
908 */
909 else
910#endif
911 {
912 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
913 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
914 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
915 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
916 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
917 { /* likely */ }
918 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
919 {
920 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
922 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
923 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
924 }
925 else
926 {
927 Log((RT_SUCCESS(rcStrict)
928 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
929 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
930 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
931 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
932 }
933 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
934 if (cbToRead == cbDst)
935 return;
936 }
937
938 /*
939 * More to read, loop.
940 */
941 cbDst -= cbMaxRead;
942 pvDst = (uint8_t *)pvDst + cbMaxRead;
943 }
944#else
945 RT_NOREF(pvDst, cbDst);
946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
947#endif
948}
949
950#else
951
952/**
953 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
954 * exception if it fails.
955 *
956 * @returns Strict VBox status code.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling thread.
959 * @param cbMin The minimum number of bytes relative offOpcode
960 * that must be read.
961 */
962VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
963{
964 /*
965 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
966 *
967 * First translate CS:rIP to a physical address.
968 */
969 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
970 uint32_t cbToTryRead;
971 RTGCPTR GCPtrNext;
972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
973 {
974 cbToTryRead = GUEST_PAGE_SIZE;
975 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
976 if (!IEM_IS_CANONICAL(GCPtrNext))
977 return iemRaiseGeneralProtectionFault0(pVCpu);
978 }
979 else
980 {
981 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
982 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
983 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
984 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
985 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
986 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
987 if (!cbToTryRead) /* overflowed */
988 {
989 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
990 cbToTryRead = UINT32_MAX;
991 /** @todo check out wrapping around the code segment. */
992 }
993 if (cbToTryRead < cbMin - cbLeft)
994 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
995 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
996 }
997
998 /* Only read up to the end of the page, and make sure we don't read more
999 than the opcode buffer can hold. */
1000 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1001 if (cbToTryRead > cbLeftOnPage)
1002 cbToTryRead = cbLeftOnPage;
1003 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1004 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1005/** @todo r=bird: Convert assertion into undefined opcode exception? */
1006 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1007
1008 PGMPTWALK Walk;
1009 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1010 if (RT_FAILURE(rc))
1011 {
1012 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1014 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1015 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1016#endif
1017 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1023 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1024 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1025#endif
1026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1029 {
1030 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1032 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1033 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1034#endif
1035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1036 }
1037 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1038 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1039 /** @todo Check reserved bits and such stuff. PGM is better at doing
1040 * that, so do it when implementing the guest virtual address
1041 * TLB... */
1042
1043 /*
1044 * Read the bytes at this address.
1045 *
1046 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1047 * and since PATM should only patch the start of an instruction there
1048 * should be no need to check again here.
1049 */
1050 if (!pVCpu->iem.s.fBypassHandlers)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1053 cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 }
1062 else
1063 {
1064 Log((RT_SUCCESS(rcStrict)
1065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1068 return rcStrict;
1069 }
1070 }
1071 else
1072 {
1073 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1074 if (RT_SUCCESS(rc))
1075 { /* likely */ }
1076 else
1077 {
1078 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1079 return rc;
1080 }
1081 }
1082 pVCpu->iem.s.cbOpcode += cbToTryRead;
1083 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1084
1085 return VINF_SUCCESS;
1086}
1087
1088#endif /* !IEM_WITH_CODE_TLB */
1089#ifndef IEM_WITH_SETJMP
1090
1091/**
1092 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1093 *
1094 * @returns Strict VBox status code.
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param pb Where to return the opcode byte.
1098 */
1099VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1100{
1101 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1102 if (rcStrict == VINF_SUCCESS)
1103 {
1104 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1105 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1106 pVCpu->iem.s.offOpcode = offOpcode + 1;
1107 }
1108 else
1109 *pb = 0;
1110 return rcStrict;
1111}
1112
1113#else /* IEM_WITH_SETJMP */
1114
1115/**
1116 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1117 *
1118 * @returns The opcode byte.
1119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1120 */
1121uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1122{
1123# ifdef IEM_WITH_CODE_TLB
1124 uint8_t u8;
1125 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1126 return u8;
1127# else
1128 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1129 if (rcStrict == VINF_SUCCESS)
1130 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1131 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1132# endif
1133}
1134
1135#endif /* IEM_WITH_SETJMP */
1136
1137#ifndef IEM_WITH_SETJMP
1138
1139/**
1140 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu16 Where to return the opcode dword.
1145 */
1146VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1147{
1148 uint8_t u8;
1149 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1150 if (rcStrict == VINF_SUCCESS)
1151 *pu16 = (int8_t)u8;
1152 return rcStrict;
1153}
1154
1155
1156/**
1157 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1161 * @param pu32 Where to return the opcode dword.
1162 */
1163VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1164{
1165 uint8_t u8;
1166 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1167 if (rcStrict == VINF_SUCCESS)
1168 *pu32 = (int8_t)u8;
1169 return rcStrict;
1170}
1171
1172
1173/**
1174 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1175 *
1176 * @returns Strict VBox status code.
1177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1178 * @param pu64 Where to return the opcode qword.
1179 */
1180VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1181{
1182 uint8_t u8;
1183 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1184 if (rcStrict == VINF_SUCCESS)
1185 *pu64 = (int8_t)u8;
1186 return rcStrict;
1187}
1188
1189#endif /* !IEM_WITH_SETJMP */
1190
1191
1192#ifndef IEM_WITH_SETJMP
1193
1194/**
1195 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1196 *
1197 * @returns Strict VBox status code.
1198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1199 * @param pu16 Where to return the opcode word.
1200 */
1201VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1202{
1203 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1204 if (rcStrict == VINF_SUCCESS)
1205 {
1206 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1207# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1208 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1209# else
1210 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1211# endif
1212 pVCpu->iem.s.offOpcode = offOpcode + 2;
1213 }
1214 else
1215 *pu16 = 0;
1216 return rcStrict;
1217}
1218
1219#else /* IEM_WITH_SETJMP */
1220
1221/**
1222 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1223 *
1224 * @returns The opcode word.
1225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1226 */
1227uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1228{
1229# ifdef IEM_WITH_CODE_TLB
1230 uint16_t u16;
1231 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1232 return u16;
1233# else
1234 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1235 if (rcStrict == VINF_SUCCESS)
1236 {
1237 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1238 pVCpu->iem.s.offOpcode += 2;
1239# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1240 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1241# else
1242 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1243# endif
1244 }
1245 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1246# endif
1247}
1248
1249#endif /* IEM_WITH_SETJMP */
1250
1251#ifndef IEM_WITH_SETJMP
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1258 * @param pu32 Where to return the opcode double word.
1259 */
1260VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1266 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1267 pVCpu->iem.s.offOpcode = offOpcode + 2;
1268 }
1269 else
1270 *pu32 = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu64 Where to return the opcode quad word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu64 = 0;
1293 return rcStrict;
1294}
1295
1296#endif /* !IEM_WITH_SETJMP */
1297
1298#ifndef IEM_WITH_SETJMP
1299
1300/**
1301 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1302 *
1303 * @returns Strict VBox status code.
1304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1305 * @param pu32 Where to return the opcode dword.
1306 */
1307VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1308{
1309 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1310 if (rcStrict == VINF_SUCCESS)
1311 {
1312 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1314 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1315# else
1316 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1317 pVCpu->iem.s.abOpcode[offOpcode + 1],
1318 pVCpu->iem.s.abOpcode[offOpcode + 2],
1319 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1320# endif
1321 pVCpu->iem.s.offOpcode = offOpcode + 4;
1322 }
1323 else
1324 *pu32 = 0;
1325 return rcStrict;
1326}
1327
1328#else /* IEM_WITH_SETJMP */
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1332 *
1333 * @returns The opcode dword.
1334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1335 */
1336uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1337{
1338# ifdef IEM_WITH_CODE_TLB
1339 uint32_t u32;
1340 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1341 return u32;
1342# else
1343 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1344 if (rcStrict == VINF_SUCCESS)
1345 {
1346 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1347 pVCpu->iem.s.offOpcode = offOpcode + 4;
1348# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1349 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1350# else
1351 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1352 pVCpu->iem.s.abOpcode[offOpcode + 1],
1353 pVCpu->iem.s.abOpcode[offOpcode + 2],
1354 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1355# endif
1356 }
1357 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1358# endif
1359}
1360
1361#endif /* IEM_WITH_SETJMP */
1362
1363#ifndef IEM_WITH_SETJMP
1364
1365/**
1366 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1367 *
1368 * @returns Strict VBox status code.
1369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1370 * @param pu64 Where to return the opcode dword.
1371 */
1372VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1373{
1374 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1375 if (rcStrict == VINF_SUCCESS)
1376 {
1377 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1378 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1379 pVCpu->iem.s.abOpcode[offOpcode + 1],
1380 pVCpu->iem.s.abOpcode[offOpcode + 2],
1381 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1382 pVCpu->iem.s.offOpcode = offOpcode + 4;
1383 }
1384 else
1385 *pu64 = 0;
1386 return rcStrict;
1387}
1388
1389
1390/**
1391 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1392 *
1393 * @returns Strict VBox status code.
1394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1395 * @param pu64 Where to return the opcode qword.
1396 */
1397VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1398{
1399 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1403 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1404 pVCpu->iem.s.abOpcode[offOpcode + 1],
1405 pVCpu->iem.s.abOpcode[offOpcode + 2],
1406 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1407 pVCpu->iem.s.offOpcode = offOpcode + 4;
1408 }
1409 else
1410 *pu64 = 0;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416#ifndef IEM_WITH_SETJMP
1417
1418/**
1419 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1423 * @param pu64 Where to return the opcode qword.
1424 */
1425VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1426{
1427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1428 if (rcStrict == VINF_SUCCESS)
1429 {
1430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1432 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1433# else
1434 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1435 pVCpu->iem.s.abOpcode[offOpcode + 1],
1436 pVCpu->iem.s.abOpcode[offOpcode + 2],
1437 pVCpu->iem.s.abOpcode[offOpcode + 3],
1438 pVCpu->iem.s.abOpcode[offOpcode + 4],
1439 pVCpu->iem.s.abOpcode[offOpcode + 5],
1440 pVCpu->iem.s.abOpcode[offOpcode + 6],
1441 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1442# endif
1443 pVCpu->iem.s.offOpcode = offOpcode + 8;
1444 }
1445 else
1446 *pu64 = 0;
1447 return rcStrict;
1448}
1449
1450#else /* IEM_WITH_SETJMP */
1451
1452/**
1453 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1454 *
1455 * @returns The opcode qword.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 */
1458uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1459{
1460# ifdef IEM_WITH_CODE_TLB
1461 uint64_t u64;
1462 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1463 return u64;
1464# else
1465 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1466 if (rcStrict == VINF_SUCCESS)
1467 {
1468 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1469 pVCpu->iem.s.offOpcode = offOpcode + 8;
1470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1471 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1472# else
1473 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1474 pVCpu->iem.s.abOpcode[offOpcode + 1],
1475 pVCpu->iem.s.abOpcode[offOpcode + 2],
1476 pVCpu->iem.s.abOpcode[offOpcode + 3],
1477 pVCpu->iem.s.abOpcode[offOpcode + 4],
1478 pVCpu->iem.s.abOpcode[offOpcode + 5],
1479 pVCpu->iem.s.abOpcode[offOpcode + 6],
1480 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1481# endif
1482 }
1483 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1484# endif
1485}
1486
1487#endif /* IEM_WITH_SETJMP */
1488
1489
1490
1491/** @name Misc Worker Functions.
1492 * @{
1493 */
1494
1495/**
1496 * Gets the exception class for the specified exception vector.
1497 *
1498 * @returns The class of the specified exception.
1499 * @param uVector The exception vector.
1500 */
1501static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1502{
1503 Assert(uVector <= X86_XCPT_LAST);
1504 switch (uVector)
1505 {
1506 case X86_XCPT_DE:
1507 case X86_XCPT_TS:
1508 case X86_XCPT_NP:
1509 case X86_XCPT_SS:
1510 case X86_XCPT_GP:
1511 case X86_XCPT_SX: /* AMD only */
1512 return IEMXCPTCLASS_CONTRIBUTORY;
1513
1514 case X86_XCPT_PF:
1515 case X86_XCPT_VE: /* Intel only */
1516 return IEMXCPTCLASS_PAGE_FAULT;
1517
1518 case X86_XCPT_DF:
1519 return IEMXCPTCLASS_DOUBLE_FAULT;
1520 }
1521 return IEMXCPTCLASS_BENIGN;
1522}
1523
1524
1525/**
1526 * Evaluates how to handle an exception caused during delivery of another event
1527 * (exception / interrupt).
1528 *
1529 * @returns How to handle the recursive exception.
1530 * @param pVCpu The cross context virtual CPU structure of the
1531 * calling thread.
1532 * @param fPrevFlags The flags of the previous event.
1533 * @param uPrevVector The vector of the previous event.
1534 * @param fCurFlags The flags of the current exception.
1535 * @param uCurVector The vector of the current exception.
1536 * @param pfXcptRaiseInfo Where to store additional information about the
1537 * exception condition. Optional.
1538 */
1539VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1540 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1541{
1542 /*
1543 * Only CPU exceptions can be raised while delivering other events, software interrupt
1544 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1545 */
1546 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1547 Assert(pVCpu); RT_NOREF(pVCpu);
1548 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1549
1550 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1551 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1552 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1553 {
1554 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1555 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1556 {
1557 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1558 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1559 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1560 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1561 {
1562 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1563 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1564 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1565 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1566 uCurVector, pVCpu->cpum.GstCtx.cr2));
1567 }
1568 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1569 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1570 {
1571 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1572 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1573 }
1574 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1575 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1577 {
1578 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1580 }
1581 }
1582 else
1583 {
1584 if (uPrevVector == X86_XCPT_NMI)
1585 {
1586 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1587 if (uCurVector == X86_XCPT_PF)
1588 {
1589 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1590 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1591 }
1592 }
1593 else if ( uPrevVector == X86_XCPT_AC
1594 && uCurVector == X86_XCPT_AC)
1595 {
1596 enmRaise = IEMXCPTRAISE_CPU_HANG;
1597 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1598 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1599 }
1600 }
1601 }
1602 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1603 {
1604 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1605 if (uCurVector == X86_XCPT_PF)
1606 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1607 }
1608 else
1609 {
1610 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1611 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1612 }
1613
1614 if (pfXcptRaiseInfo)
1615 *pfXcptRaiseInfo = fRaiseInfo;
1616 return enmRaise;
1617}
1618
1619
1620/**
1621 * Enters the CPU shutdown state initiated by a triple fault or other
1622 * unrecoverable conditions.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pVCpu The cross context virtual CPU structure of the
1626 * calling thread.
1627 */
1628static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1629{
1630 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1631 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1632
1633 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1634 {
1635 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1636 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1637 }
1638
1639 RT_NOREF(pVCpu);
1640 return VINF_EM_TRIPLE_FAULT;
1641}
1642
1643
1644/**
1645 * Validates a new SS segment.
1646 *
1647 * @returns VBox strict status code.
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param NewSS The new SS selctor.
1651 * @param uCpl The CPL to load the stack for.
1652 * @param pDesc Where to return the descriptor.
1653 */
1654static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1655{
1656 /* Null selectors are not allowed (we're not called for dispatching
1657 interrupts with SS=0 in long mode). */
1658 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1659 {
1660 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1661 return iemRaiseTaskSwitchFault0(pVCpu);
1662 }
1663
1664 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1665 if ((NewSS & X86_SEL_RPL) != uCpl)
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1668 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1669 }
1670
1671 /*
1672 * Read the descriptor.
1673 */
1674 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /*
1679 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1680 */
1681 if (!pDesc->Legacy.Gen.u1DescType)
1682 {
1683 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1684 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1685 }
1686
1687 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1688 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1694 {
1695 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1696 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1697 }
1698
1699 /* Is it there? */
1700 /** @todo testcase: Is this checked before the canonical / limit check below? */
1701 if (!pDesc->Legacy.Gen.u1Present)
1702 {
1703 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1704 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1705 }
1706
1707 return VINF_SUCCESS;
1708}
1709
1710/** @} */
1711
1712
1713/** @name Raising Exceptions.
1714 *
1715 * @{
1716 */
1717
1718
1719/**
1720 * Loads the specified stack far pointer from the TSS.
1721 *
1722 * @returns VBox strict status code.
1723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1724 * @param uCpl The CPL to load the stack for.
1725 * @param pSelSS Where to return the new stack segment.
1726 * @param puEsp Where to return the new stack pointer.
1727 */
1728static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1729{
1730 VBOXSTRICTRC rcStrict;
1731 Assert(uCpl < 4);
1732
1733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1734 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1735 {
1736 /*
1737 * 16-bit TSS (X86TSS16).
1738 */
1739 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1740 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1741 {
1742 uint32_t off = uCpl * 4 + 2;
1743 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1744 {
1745 /** @todo check actual access pattern here. */
1746 uint32_t u32Tmp = 0; /* gcc maybe... */
1747 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1748 if (rcStrict == VINF_SUCCESS)
1749 {
1750 *puEsp = RT_LOWORD(u32Tmp);
1751 *pSelSS = RT_HIWORD(u32Tmp);
1752 return VINF_SUCCESS;
1753 }
1754 }
1755 else
1756 {
1757 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1758 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1759 }
1760 break;
1761 }
1762
1763 /*
1764 * 32-bit TSS (X86TSS32).
1765 */
1766 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1767 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1768 {
1769 uint32_t off = uCpl * 8 + 4;
1770 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1771 {
1772/** @todo check actual access pattern here. */
1773 uint64_t u64Tmp;
1774 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1775 if (rcStrict == VINF_SUCCESS)
1776 {
1777 *puEsp = u64Tmp & UINT32_MAX;
1778 *pSelSS = (RTSEL)(u64Tmp >> 32);
1779 return VINF_SUCCESS;
1780 }
1781 }
1782 else
1783 {
1784 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1785 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1786 }
1787 break;
1788 }
1789
1790 default:
1791 AssertFailed();
1792 rcStrict = VERR_IEM_IPE_4;
1793 break;
1794 }
1795
1796 *puEsp = 0; /* make gcc happy */
1797 *pSelSS = 0; /* make gcc happy */
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * Loads the specified stack pointer from the 64-bit TSS.
1804 *
1805 * @returns VBox strict status code.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param uCpl The CPL to load the stack for.
1808 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1809 * @param puRsp Where to return the new stack pointer.
1810 */
1811static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1812{
1813 Assert(uCpl < 4);
1814 Assert(uIst < 8);
1815 *puRsp = 0; /* make gcc happy */
1816
1817 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1818 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1819
1820 uint32_t off;
1821 if (uIst)
1822 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1823 else
1824 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1825 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1826 {
1827 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1828 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1829 }
1830
1831 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1832}
1833
1834
1835/**
1836 * Adjust the CPU state according to the exception being raised.
1837 *
1838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1839 * @param u8Vector The exception that has been raised.
1840 */
1841DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1842{
1843 switch (u8Vector)
1844 {
1845 case X86_XCPT_DB:
1846 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1847 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1848 break;
1849 /** @todo Read the AMD and Intel exception reference... */
1850 }
1851}
1852
1853
1854/**
1855 * Implements exceptions and interrupts for real mode.
1856 *
1857 * @returns VBox strict status code.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param cbInstr The number of bytes to offset rIP by in the return
1860 * address.
1861 * @param u8Vector The interrupt / exception vector number.
1862 * @param fFlags The flags.
1863 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1864 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1865 */
1866static VBOXSTRICTRC
1867iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1868 uint8_t cbInstr,
1869 uint8_t u8Vector,
1870 uint32_t fFlags,
1871 uint16_t uErr,
1872 uint64_t uCr2) RT_NOEXCEPT
1873{
1874 NOREF(uErr); NOREF(uCr2);
1875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1876
1877 /*
1878 * Read the IDT entry.
1879 */
1880 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1881 {
1882 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1883 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1884 }
1885 RTFAR16 Idte;
1886 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1887 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1888 {
1889 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1890 return rcStrict;
1891 }
1892
1893 /*
1894 * Push the stack frame.
1895 */
1896 uint16_t *pu16Frame;
1897 uint64_t uNewRsp;
1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1903#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1904 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1905 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1906 fEfl |= UINT16_C(0xf000);
1907#endif
1908 pu16Frame[2] = (uint16_t)fEfl;
1909 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1910 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1911 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1912 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1913 return rcStrict;
1914
1915 /*
1916 * Load the vector address into cs:ip and make exception specific state
1917 * adjustments.
1918 */
1919 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1920 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1921 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1922 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1923 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1924 pVCpu->cpum.GstCtx.rip = Idte.off;
1925 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1926 IEMMISC_SET_EFL(pVCpu, fEfl);
1927
1928 /** @todo do we actually do this in real mode? */
1929 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1930 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1931
1932 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1933}
1934
1935
1936/**
1937 * Loads a NULL data selector into when coming from V8086 mode.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 * @param pSReg Pointer to the segment register.
1941 */
1942DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1943{
1944 pSReg->Sel = 0;
1945 pSReg->ValidSel = 0;
1946 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1947 {
1948 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1949 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1950 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1951 }
1952 else
1953 {
1954 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1955 /** @todo check this on AMD-V */
1956 pSReg->u64Base = 0;
1957 pSReg->u32Limit = 0;
1958 }
1959}
1960
1961
1962/**
1963 * Loads a segment selector during a task switch in V8086 mode.
1964 *
1965 * @param pSReg Pointer to the segment register.
1966 * @param uSel The selector value to load.
1967 */
1968DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1969{
1970 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1971 pSReg->Sel = uSel;
1972 pSReg->ValidSel = uSel;
1973 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1974 pSReg->u64Base = uSel << 4;
1975 pSReg->u32Limit = 0xffff;
1976 pSReg->Attr.u = 0xf3;
1977}
1978
1979
1980/**
1981 * Loads a segment selector during a task switch in protected mode.
1982 *
1983 * In this task switch scenario, we would throw \#TS exceptions rather than
1984 * \#GPs.
1985 *
1986 * @returns VBox strict status code.
1987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1988 * @param pSReg Pointer to the segment register.
1989 * @param uSel The new selector value.
1990 *
1991 * @remarks This does _not_ handle CS or SS.
1992 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
1993 */
1994static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
1995{
1996 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
1997
1998 /* Null data selector. */
1999 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2000 {
2001 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2004 return VINF_SUCCESS;
2005 }
2006
2007 /* Fetch the descriptor. */
2008 IEMSELDESC Desc;
2009 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2010 if (rcStrict != VINF_SUCCESS)
2011 {
2012 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2013 VBOXSTRICTRC_VAL(rcStrict)));
2014 return rcStrict;
2015 }
2016
2017 /* Must be a data segment or readable code segment. */
2018 if ( !Desc.Legacy.Gen.u1DescType
2019 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2020 {
2021 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2022 Desc.Legacy.Gen.u4Type));
2023 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2024 }
2025
2026 /* Check privileges for data segments and non-conforming code segments. */
2027 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2028 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2029 {
2030 /* The RPL and the new CPL must be less than or equal to the DPL. */
2031 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2032 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2035 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2036 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2037 }
2038 }
2039
2040 /* Is it there? */
2041 if (!Desc.Legacy.Gen.u1Present)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2044 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2045 }
2046
2047 /* The base and limit. */
2048 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2049 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2050
2051 /*
2052 * Ok, everything checked out fine. Now set the accessed bit before
2053 * committing the result into the registers.
2054 */
2055 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2056 {
2057 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2061 }
2062
2063 /* Commit */
2064 pSReg->Sel = uSel;
2065 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2066 pSReg->u32Limit = cbLimit;
2067 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2068 pSReg->ValidSel = uSel;
2069 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2070 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2071 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2072
2073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/**
2080 * Performs a task switch.
2081 *
2082 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2083 * caller is responsible for performing the necessary checks (like DPL, TSS
2084 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2085 * reference for JMP, CALL, IRET.
2086 *
2087 * If the task switch is the due to a software interrupt or hardware exception,
2088 * the caller is responsible for validating the TSS selector and descriptor. See
2089 * Intel Instruction reference for INT n.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param enmTaskSwitch The cause of the task switch.
2094 * @param uNextEip The EIP effective after the task switch.
2095 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 * @param SelTSS The TSS selector of the new task.
2099 * @param pNewDescTSS Pointer to the new TSS descriptor.
2100 */
2101VBOXSTRICTRC
2102iemTaskSwitch(PVMCPUCC pVCpu,
2103 IEMTASKSWITCH enmTaskSwitch,
2104 uint32_t uNextEip,
2105 uint32_t fFlags,
2106 uint16_t uErr,
2107 uint64_t uCr2,
2108 RTSEL SelTSS,
2109 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2110{
2111 Assert(!IEM_IS_REAL_MODE(pVCpu));
2112 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2114
2115 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2116 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2117 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2118 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2119 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2120
2121 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2122 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2123
2124 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2125 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2126
2127 /* Update CR2 in case it's a page-fault. */
2128 /** @todo This should probably be done much earlier in IEM/PGM. See
2129 * @bugref{5653#c49}. */
2130 if (fFlags & IEM_XCPT_FLAGS_CR2)
2131 pVCpu->cpum.GstCtx.cr2 = uCr2;
2132
2133 /*
2134 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2135 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2136 */
2137 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2138 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2139 if (uNewTSSLimit < uNewTSSLimitMin)
2140 {
2141 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2142 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2143 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2144 }
2145
2146 /*
2147 * Task switches in VMX non-root mode always cause task switches.
2148 * The new TSS must have been read and validated (DPL, limits etc.) before a
2149 * task-switch VM-exit commences.
2150 *
2151 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2152 */
2153 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2154 {
2155 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2156 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2157 }
2158
2159 /*
2160 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2161 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2162 */
2163 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2164 {
2165 uint32_t const uExitInfo1 = SelTSS;
2166 uint32_t uExitInfo2 = uErr;
2167 switch (enmTaskSwitch)
2168 {
2169 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2170 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2171 default: break;
2172 }
2173 if (fFlags & IEM_XCPT_FLAGS_ERR)
2174 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2175 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2176 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2177
2178 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2179 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2180 RT_NOREF2(uExitInfo1, uExitInfo2);
2181 }
2182
2183 /*
2184 * Check the current TSS limit. The last written byte to the current TSS during the
2185 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2186 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2187 *
2188 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2189 * end up with smaller than "legal" TSS limits.
2190 */
2191 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2192 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2193 if (uCurTSSLimit < uCurTSSLimitMin)
2194 {
2195 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2196 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2198 }
2199
2200 /*
2201 * Verify that the new TSS can be accessed and map it. Map only the required contents
2202 * and not the entire TSS.
2203 */
2204 void *pvNewTSS;
2205 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2206 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2207 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2208 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2209 * not perform correct translation if this happens. See Intel spec. 7.2.1
2210 * "Task-State Segment". */
2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2212 if (rcStrict != VINF_SUCCESS)
2213 {
2214 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2215 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2216 return rcStrict;
2217 }
2218
2219 /*
2220 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2221 */
2222 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2223 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2224 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2225 {
2226 PX86DESC pDescCurTSS;
2227 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2232 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2233 return rcStrict;
2234 }
2235
2236 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2237 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2238 if (rcStrict != VINF_SUCCESS)
2239 {
2240 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2242 return rcStrict;
2243 }
2244
2245 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2246 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2249 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2250 u32EFlags &= ~X86_EFL_NT;
2251 }
2252 }
2253
2254 /*
2255 * Save the CPU state into the current TSS.
2256 */
2257 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2258 if (GCPtrNewTSS == GCPtrCurTSS)
2259 {
2260 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2261 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2262 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2263 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2264 pVCpu->cpum.GstCtx.ldtr.Sel));
2265 }
2266 if (fIsNewTSS386)
2267 {
2268 /*
2269 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2270 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2271 */
2272 void *pvCurTSS32;
2273 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2274 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2275 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2277 if (rcStrict != VINF_SUCCESS)
2278 {
2279 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2280 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2281 return rcStrict;
2282 }
2283
2284 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2285 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2286 pCurTSS32->eip = uNextEip;
2287 pCurTSS32->eflags = u32EFlags;
2288 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2289 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2290 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2291 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2292 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2293 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2294 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2295 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2296 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2297 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2298 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2299 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2300 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2301 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2302
2303 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2304 if (rcStrict != VINF_SUCCESS)
2305 {
2306 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2307 VBOXSTRICTRC_VAL(rcStrict)));
2308 return rcStrict;
2309 }
2310 }
2311 else
2312 {
2313 /*
2314 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2315 */
2316 void *pvCurTSS16;
2317 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2318 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2319 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2321 if (rcStrict != VINF_SUCCESS)
2322 {
2323 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2324 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327
2328 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2329 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2330 pCurTSS16->ip = uNextEip;
2331 pCurTSS16->flags = u32EFlags;
2332 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2333 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2334 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2335 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2336 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2337 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2338 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2339 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2340 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2341 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2342 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2343 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2344
2345 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2346 if (rcStrict != VINF_SUCCESS)
2347 {
2348 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2349 VBOXSTRICTRC_VAL(rcStrict)));
2350 return rcStrict;
2351 }
2352 }
2353
2354 /*
2355 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2356 */
2357 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2358 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2359 {
2360 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2361 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2362 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2363 }
2364
2365 /*
2366 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2367 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2368 */
2369 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2370 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2371 bool fNewDebugTrap;
2372 if (fIsNewTSS386)
2373 {
2374 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2375 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2376 uNewEip = pNewTSS32->eip;
2377 uNewEflags = pNewTSS32->eflags;
2378 uNewEax = pNewTSS32->eax;
2379 uNewEcx = pNewTSS32->ecx;
2380 uNewEdx = pNewTSS32->edx;
2381 uNewEbx = pNewTSS32->ebx;
2382 uNewEsp = pNewTSS32->esp;
2383 uNewEbp = pNewTSS32->ebp;
2384 uNewEsi = pNewTSS32->esi;
2385 uNewEdi = pNewTSS32->edi;
2386 uNewES = pNewTSS32->es;
2387 uNewCS = pNewTSS32->cs;
2388 uNewSS = pNewTSS32->ss;
2389 uNewDS = pNewTSS32->ds;
2390 uNewFS = pNewTSS32->fs;
2391 uNewGS = pNewTSS32->gs;
2392 uNewLdt = pNewTSS32->selLdt;
2393 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2394 }
2395 else
2396 {
2397 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2398 uNewCr3 = 0;
2399 uNewEip = pNewTSS16->ip;
2400 uNewEflags = pNewTSS16->flags;
2401 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2402 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2403 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2404 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2405 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2406 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2407 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2408 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2409 uNewES = pNewTSS16->es;
2410 uNewCS = pNewTSS16->cs;
2411 uNewSS = pNewTSS16->ss;
2412 uNewDS = pNewTSS16->ds;
2413 uNewFS = 0;
2414 uNewGS = 0;
2415 uNewLdt = pNewTSS16->selLdt;
2416 fNewDebugTrap = false;
2417 }
2418
2419 if (GCPtrNewTSS == GCPtrCurTSS)
2420 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2421 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2422
2423 /*
2424 * We're done accessing the new TSS.
2425 */
2426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432
2433 /*
2434 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2435 */
2436 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2437 {
2438 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2440 if (rcStrict != VINF_SUCCESS)
2441 {
2442 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2443 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2444 return rcStrict;
2445 }
2446
2447 /* Check that the descriptor indicates the new TSS is available (not busy). */
2448 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2450 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2451
2452 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2453 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2454 if (rcStrict != VINF_SUCCESS)
2455 {
2456 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2457 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2458 return rcStrict;
2459 }
2460 }
2461
2462 /*
2463 * From this point on, we're technically in the new task. We will defer exceptions
2464 * until the completion of the task switch but before executing any instructions in the new task.
2465 */
2466 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2467 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2468 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2469 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2470 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2471 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2472 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2473
2474 /* Set the busy bit in TR. */
2475 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2476
2477 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2478 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2479 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2480 {
2481 uNewEflags |= X86_EFL_NT;
2482 }
2483
2484 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2485 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2486 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2487
2488 pVCpu->cpum.GstCtx.eip = uNewEip;
2489 pVCpu->cpum.GstCtx.eax = uNewEax;
2490 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2491 pVCpu->cpum.GstCtx.edx = uNewEdx;
2492 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2493 pVCpu->cpum.GstCtx.esp = uNewEsp;
2494 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2495 pVCpu->cpum.GstCtx.esi = uNewEsi;
2496 pVCpu->cpum.GstCtx.edi = uNewEdi;
2497
2498 uNewEflags &= X86_EFL_LIVE_MASK;
2499 uNewEflags |= X86_EFL_RA1_MASK;
2500 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2501
2502 /*
2503 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2504 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2505 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2506 */
2507 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2508 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2509
2510 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2511 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2512
2513 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2514 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2515
2516 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2517 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2518
2519 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2520 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2521
2522 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2523 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2524 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2525
2526 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2527 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2528 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2529 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2530
2531 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2532 {
2533 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2534 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2535 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2536 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2537 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2538 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2539 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2540 }
2541
2542 /*
2543 * Switch CR3 for the new task.
2544 */
2545 if ( fIsNewTSS386
2546 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2547 {
2548 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2549 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2550 AssertRCSuccessReturn(rc, rc);
2551
2552 /* Inform PGM. */
2553 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2554 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2555 AssertRCReturn(rc, rc);
2556 /* ignore informational status codes */
2557
2558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2559 }
2560
2561 /*
2562 * Switch LDTR for the new task.
2563 */
2564 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2565 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2566 else
2567 {
2568 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2569
2570 IEMSELDESC DescNewLdt;
2571 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2575 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578 if ( !DescNewLdt.Legacy.Gen.u1Present
2579 || DescNewLdt.Legacy.Gen.u1DescType
2580 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2581 {
2582 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2583 uNewLdt, DescNewLdt.Legacy.u));
2584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2585 }
2586
2587 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2588 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2589 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2590 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2591 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2592 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2593 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2595 }
2596
2597 IEMSELDESC DescSS;
2598 if (IEM_IS_V86_MODE(pVCpu))
2599 {
2600 pVCpu->iem.s.uCpl = 3;
2601 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2602 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2603 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2604 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2605 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2606 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2607
2608 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2609 DescSS.Legacy.u = 0;
2610 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2611 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2612 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2613 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2614 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2615 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2616 DescSS.Legacy.Gen.u2Dpl = 3;
2617 }
2618 else
2619 {
2620 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2621
2622 /*
2623 * Load the stack segment for the new task.
2624 */
2625 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2626 {
2627 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2629 }
2630
2631 /* Fetch the descriptor. */
2632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2636 VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* SS must be a data segment and writable. */
2641 if ( !DescSS.Legacy.Gen.u1DescType
2642 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2643 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2644 {
2645 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2646 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2648 }
2649
2650 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2651 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2652 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2653 {
2654 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2655 uNewCpl));
2656 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2657 }
2658
2659 /* Is it there? */
2660 if (!DescSS.Legacy.Gen.u1Present)
2661 {
2662 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2663 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2667 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2668
2669 /* Set the accessed bit before committing the result into SS. */
2670 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2671 {
2672 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2673 if (rcStrict != VINF_SUCCESS)
2674 return rcStrict;
2675 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2676 }
2677
2678 /* Commit SS. */
2679 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2680 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2681 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2682 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2683 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2684 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2685 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2686
2687 /* CPL has changed, update IEM before loading rest of segments. */
2688 pVCpu->iem.s.uCpl = uNewCpl;
2689
2690 /*
2691 * Load the data segments for the new task.
2692 */
2693 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2694 if (rcStrict != VINF_SUCCESS)
2695 return rcStrict;
2696 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2700 if (rcStrict != VINF_SUCCESS)
2701 return rcStrict;
2702 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2703 if (rcStrict != VINF_SUCCESS)
2704 return rcStrict;
2705
2706 /*
2707 * Load the code segment for the new task.
2708 */
2709 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2710 {
2711 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2712 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2713 }
2714
2715 /* Fetch the descriptor. */
2716 IEMSELDESC DescCS;
2717 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2718 if (rcStrict != VINF_SUCCESS)
2719 {
2720 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2721 return rcStrict;
2722 }
2723
2724 /* CS must be a code segment. */
2725 if ( !DescCS.Legacy.Gen.u1DescType
2726 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2727 {
2728 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2729 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2730 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2731 }
2732
2733 /* For conforming CS, DPL must be less than or equal to the RPL. */
2734 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2735 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2736 {
2737 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2738 DescCS.Legacy.Gen.u2Dpl));
2739 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2740 }
2741
2742 /* For non-conforming CS, DPL must match RPL. */
2743 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2744 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2745 {
2746 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2747 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2748 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2749 }
2750
2751 /* Is it there? */
2752 if (!DescCS.Legacy.Gen.u1Present)
2753 {
2754 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2755 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2759 u64Base = X86DESC_BASE(&DescCS.Legacy);
2760
2761 /* Set the accessed bit before committing the result into CS. */
2762 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2763 {
2764 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2765 if (rcStrict != VINF_SUCCESS)
2766 return rcStrict;
2767 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2768 }
2769
2770 /* Commit CS. */
2771 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2772 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2773 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2774 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2775 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2776 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2778 }
2779
2780 /** @todo Debug trap. */
2781 if (fIsNewTSS386 && fNewDebugTrap)
2782 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2783
2784 /*
2785 * Construct the error code masks based on what caused this task switch.
2786 * See Intel Instruction reference for INT.
2787 */
2788 uint16_t uExt;
2789 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2790 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2791 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2792 {
2793 uExt = 1;
2794 }
2795 else
2796 uExt = 0;
2797
2798 /*
2799 * Push any error code on to the new stack.
2800 */
2801 if (fFlags & IEM_XCPT_FLAGS_ERR)
2802 {
2803 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2804 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2805 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2806
2807 /* Check that there is sufficient space on the stack. */
2808 /** @todo Factor out segment limit checking for normal/expand down segments
2809 * into a separate function. */
2810 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2811 {
2812 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2813 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2814 {
2815 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2816 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2817 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2818 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2819 }
2820 }
2821 else
2822 {
2823 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2824 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2825 {
2826 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2827 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2828 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2829 }
2830 }
2831
2832
2833 if (fIsNewTSS386)
2834 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2835 else
2836 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2837 if (rcStrict != VINF_SUCCESS)
2838 {
2839 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2840 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2841 return rcStrict;
2842 }
2843 }
2844
2845 /* Check the new EIP against the new CS limit. */
2846 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2847 {
2848 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2849 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2850 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2851 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2852 }
2853
2854 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2855 pVCpu->cpum.GstCtx.ss.Sel));
2856 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements exceptions and interrupts for protected mode.
2862 *
2863 * @returns VBox strict status code.
2864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2865 * @param cbInstr The number of bytes to offset rIP by in the return
2866 * address.
2867 * @param u8Vector The interrupt / exception vector number.
2868 * @param fFlags The flags.
2869 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2870 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2871 */
2872static VBOXSTRICTRC
2873iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2874 uint8_t cbInstr,
2875 uint8_t u8Vector,
2876 uint32_t fFlags,
2877 uint16_t uErr,
2878 uint64_t uCr2) RT_NOEXCEPT
2879{
2880 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2881
2882 /*
2883 * Read the IDT entry.
2884 */
2885 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2886 {
2887 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2888 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2889 }
2890 X86DESC Idte;
2891 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2892 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2893 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2894 {
2895 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2896 return rcStrict;
2897 }
2898 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2899 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2900 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2901
2902 /*
2903 * Check the descriptor type, DPL and such.
2904 * ASSUMES this is done in the same order as described for call-gate calls.
2905 */
2906 if (Idte.Gate.u1DescType)
2907 {
2908 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2909 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2910 }
2911 bool fTaskGate = false;
2912 uint8_t f32BitGate = true;
2913 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2914 switch (Idte.Gate.u4Type)
2915 {
2916 case X86_SEL_TYPE_SYS_UNDEFINED:
2917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2918 case X86_SEL_TYPE_SYS_LDT:
2919 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2920 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2921 case X86_SEL_TYPE_SYS_UNDEFINED2:
2922 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2923 case X86_SEL_TYPE_SYS_UNDEFINED3:
2924 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2925 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2926 case X86_SEL_TYPE_SYS_UNDEFINED4:
2927 {
2928 /** @todo check what actually happens when the type is wrong...
2929 * esp. call gates. */
2930 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933
2934 case X86_SEL_TYPE_SYS_286_INT_GATE:
2935 f32BitGate = false;
2936 RT_FALL_THRU();
2937 case X86_SEL_TYPE_SYS_386_INT_GATE:
2938 fEflToClear |= X86_EFL_IF;
2939 break;
2940
2941 case X86_SEL_TYPE_SYS_TASK_GATE:
2942 fTaskGate = true;
2943#ifndef IEM_IMPLEMENTS_TASKSWITCH
2944 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2945#endif
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2949 f32BitGate = false;
2950 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2951 break;
2952
2953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2954 }
2955
2956 /* Check DPL against CPL if applicable. */
2957 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2958 {
2959 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2960 {
2961 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2962 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2963 }
2964 }
2965
2966 /* Is it there? */
2967 if (!Idte.Gate.u1Present)
2968 {
2969 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2971 }
2972
2973 /* Is it a task-gate? */
2974 if (fTaskGate)
2975 {
2976 /*
2977 * Construct the error code masks based on what caused this task switch.
2978 * See Intel Instruction reference for INT.
2979 */
2980 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2981 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2982 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2983 RTSEL SelTSS = Idte.Gate.u16Sel;
2984
2985 /*
2986 * Fetch the TSS descriptor in the GDT.
2987 */
2988 IEMSELDESC DescTSS;
2989 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2990 if (rcStrict != VINF_SUCCESS)
2991 {
2992 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
2993 VBOXSTRICTRC_VAL(rcStrict)));
2994 return rcStrict;
2995 }
2996
2997 /* The TSS descriptor must be a system segment and be available (not busy). */
2998 if ( DescTSS.Legacy.Gen.u1DescType
2999 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3000 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3001 {
3002 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3003 u8Vector, SelTSS, DescTSS.Legacy.au64));
3004 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3005 }
3006
3007 /* The TSS must be present. */
3008 if (!DescTSS.Legacy.Gen.u1Present)
3009 {
3010 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* Do the actual task switch. */
3015 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3016 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3017 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3018 }
3019
3020 /* A null CS is bad. */
3021 RTSEL NewCS = Idte.Gate.u16Sel;
3022 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3025 return iemRaiseGeneralProtectionFault0(pVCpu);
3026 }
3027
3028 /* Fetch the descriptor for the new CS. */
3029 IEMSELDESC DescCS;
3030 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3031 if (rcStrict != VINF_SUCCESS)
3032 {
3033 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3034 return rcStrict;
3035 }
3036
3037 /* Must be a code segment. */
3038 if (!DescCS.Legacy.Gen.u1DescType)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3041 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3042 }
3043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3044 {
3045 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3046 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3047 }
3048
3049 /* Don't allow lowering the privilege level. */
3050 /** @todo Does the lowering of privileges apply to software interrupts
3051 * only? This has bearings on the more-privileged or
3052 * same-privilege stack behavior further down. A testcase would
3053 * be nice. */
3054 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3055 {
3056 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3057 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3058 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 /* Make sure the selector is present. */
3062 if (!DescCS.Legacy.Gen.u1Present)
3063 {
3064 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3065 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3066 }
3067
3068 /* Check the new EIP against the new CS limit. */
3069 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3070 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3071 ? Idte.Gate.u16OffsetLow
3072 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3073 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3074 if (uNewEip > cbLimitCS)
3075 {
3076 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3077 u8Vector, uNewEip, cbLimitCS, NewCS));
3078 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3079 }
3080 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3081
3082 /* Calc the flag image to push. */
3083 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3084 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3085 fEfl &= ~X86_EFL_RF;
3086 else
3087 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3088
3089 /* From V8086 mode only go to CPL 0. */
3090 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3091 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3092 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3093 {
3094 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3095 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3096 }
3097
3098 /*
3099 * If the privilege level changes, we need to get a new stack from the TSS.
3100 * This in turns means validating the new SS and ESP...
3101 */
3102 if (uNewCpl != pVCpu->iem.s.uCpl)
3103 {
3104 RTSEL NewSS;
3105 uint32_t uNewEsp;
3106 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3107 if (rcStrict != VINF_SUCCESS)
3108 return rcStrict;
3109
3110 IEMSELDESC DescSS;
3111 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3112 if (rcStrict != VINF_SUCCESS)
3113 return rcStrict;
3114 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3115 if (!DescSS.Legacy.Gen.u1DefBig)
3116 {
3117 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3118 uNewEsp = (uint16_t)uNewEsp;
3119 }
3120
3121 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3122
3123 /* Check that there is sufficient space for the stack frame. */
3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3125 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3126 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3127 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3128
3129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3130 {
3131 if ( uNewEsp - 1 > cbLimitSS
3132 || uNewEsp < cbStackFrame)
3133 {
3134 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3135 u8Vector, NewSS, uNewEsp, cbStackFrame));
3136 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3137 }
3138 }
3139 else
3140 {
3141 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3142 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3143 {
3144 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3145 u8Vector, NewSS, uNewEsp, cbStackFrame));
3146 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3147 }
3148 }
3149
3150 /*
3151 * Start making changes.
3152 */
3153
3154 /* Set the new CPL so that stack accesses use it. */
3155 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3156 pVCpu->iem.s.uCpl = uNewCpl;
3157
3158 /* Create the stack frame. */
3159 RTPTRUNION uStackFrame;
3160 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3162 if (rcStrict != VINF_SUCCESS)
3163 return rcStrict;
3164 void * const pvStackFrame = uStackFrame.pv;
3165 if (f32BitGate)
3166 {
3167 if (fFlags & IEM_XCPT_FLAGS_ERR)
3168 *uStackFrame.pu32++ = uErr;
3169 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3170 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3171 uStackFrame.pu32[2] = fEfl;
3172 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3173 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3174 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3175 if (fEfl & X86_EFL_VM)
3176 {
3177 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3178 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3179 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3180 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3181 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3182 }
3183 }
3184 else
3185 {
3186 if (fFlags & IEM_XCPT_FLAGS_ERR)
3187 *uStackFrame.pu16++ = uErr;
3188 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3189 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3190 uStackFrame.pu16[2] = fEfl;
3191 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3192 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3193 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3194 if (fEfl & X86_EFL_VM)
3195 {
3196 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3197 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3198 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3199 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3200 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3201 }
3202 }
3203 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3204 if (rcStrict != VINF_SUCCESS)
3205 return rcStrict;
3206
3207 /* Mark the selectors 'accessed' (hope this is the correct time). */
3208 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3209 * after pushing the stack frame? (Write protect the gdt + stack to
3210 * find out.) */
3211 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3212 {
3213 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3214 if (rcStrict != VINF_SUCCESS)
3215 return rcStrict;
3216 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3217 }
3218
3219 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3220 {
3221 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3225 }
3226
3227 /*
3228 * Start comitting the register changes (joins with the DPL=CPL branch).
3229 */
3230 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3231 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3232 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3233 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3234 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3235 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3236 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3237 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3238 * SP is loaded).
3239 * Need to check the other combinations too:
3240 * - 16-bit TSS, 32-bit handler
3241 * - 32-bit TSS, 16-bit handler */
3242 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3243 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3244 else
3245 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3246
3247 if (fEfl & X86_EFL_VM)
3248 {
3249 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3250 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3251 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3252 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3253 }
3254 }
3255 /*
3256 * Same privilege, no stack change and smaller stack frame.
3257 */
3258 else
3259 {
3260 uint64_t uNewRsp;
3261 RTPTRUNION uStackFrame;
3262 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3263 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3264 if (rcStrict != VINF_SUCCESS)
3265 return rcStrict;
3266 void * const pvStackFrame = uStackFrame.pv;
3267
3268 if (f32BitGate)
3269 {
3270 if (fFlags & IEM_XCPT_FLAGS_ERR)
3271 *uStackFrame.pu32++ = uErr;
3272 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3273 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3274 uStackFrame.pu32[2] = fEfl;
3275 }
3276 else
3277 {
3278 if (fFlags & IEM_XCPT_FLAGS_ERR)
3279 *uStackFrame.pu16++ = uErr;
3280 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3281 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3282 uStackFrame.pu16[2] = fEfl;
3283 }
3284 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3285 if (rcStrict != VINF_SUCCESS)
3286 return rcStrict;
3287
3288 /* Mark the CS selector as 'accessed'. */
3289 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3290 {
3291 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3295 }
3296
3297 /*
3298 * Start committing the register changes (joins with the other branch).
3299 */
3300 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3301 }
3302
3303 /* ... register committing continues. */
3304 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3305 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3306 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3307 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3308 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3309 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3310
3311 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3312 fEfl &= ~fEflToClear;
3313 IEMMISC_SET_EFL(pVCpu, fEfl);
3314
3315 if (fFlags & IEM_XCPT_FLAGS_CR2)
3316 pVCpu->cpum.GstCtx.cr2 = uCr2;
3317
3318 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3319 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3320
3321 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3322}
3323
3324
3325/**
3326 * Implements exceptions and interrupts for long mode.
3327 *
3328 * @returns VBox strict status code.
3329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3330 * @param cbInstr The number of bytes to offset rIP by in the return
3331 * address.
3332 * @param u8Vector The interrupt / exception vector number.
3333 * @param fFlags The flags.
3334 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3335 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3336 */
3337static VBOXSTRICTRC
3338iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3339 uint8_t cbInstr,
3340 uint8_t u8Vector,
3341 uint32_t fFlags,
3342 uint16_t uErr,
3343 uint64_t uCr2) RT_NOEXCEPT
3344{
3345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3346
3347 /*
3348 * Read the IDT entry.
3349 */
3350 uint16_t offIdt = (uint16_t)u8Vector << 4;
3351 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3352 {
3353 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3354 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3355 }
3356 X86DESC64 Idte;
3357#ifdef _MSC_VER /* Shut up silly compiler warning. */
3358 Idte.au64[0] = 0;
3359 Idte.au64[1] = 0;
3360#endif
3361 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3362 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3363 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3364 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3365 {
3366 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3367 return rcStrict;
3368 }
3369 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3370 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3371 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3372
3373 /*
3374 * Check the descriptor type, DPL and such.
3375 * ASSUMES this is done in the same order as described for call-gate calls.
3376 */
3377 if (Idte.Gate.u1DescType)
3378 {
3379 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3380 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3381 }
3382 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3383 switch (Idte.Gate.u4Type)
3384 {
3385 case AMD64_SEL_TYPE_SYS_INT_GATE:
3386 fEflToClear |= X86_EFL_IF;
3387 break;
3388 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3389 break;
3390
3391 default:
3392 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3393 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3394 }
3395
3396 /* Check DPL against CPL if applicable. */
3397 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3398 {
3399 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3400 {
3401 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3402 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3403 }
3404 }
3405
3406 /* Is it there? */
3407 if (!Idte.Gate.u1Present)
3408 {
3409 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3410 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3411 }
3412
3413 /* A null CS is bad. */
3414 RTSEL NewCS = Idte.Gate.u16Sel;
3415 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3416 {
3417 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3418 return iemRaiseGeneralProtectionFault0(pVCpu);
3419 }
3420
3421 /* Fetch the descriptor for the new CS. */
3422 IEMSELDESC DescCS;
3423 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3424 if (rcStrict != VINF_SUCCESS)
3425 {
3426 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3427 return rcStrict;
3428 }
3429
3430 /* Must be a 64-bit code segment. */
3431 if (!DescCS.Long.Gen.u1DescType)
3432 {
3433 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3434 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3435 }
3436 if ( !DescCS.Long.Gen.u1Long
3437 || DescCS.Long.Gen.u1DefBig
3438 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3439 {
3440 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3441 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3442 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3443 }
3444
3445 /* Don't allow lowering the privilege level. For non-conforming CS
3446 selectors, the CS.DPL sets the privilege level the trap/interrupt
3447 handler runs at. For conforming CS selectors, the CPL remains
3448 unchanged, but the CS.DPL must be <= CPL. */
3449 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3450 * when CPU in Ring-0. Result \#GP? */
3451 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3452 {
3453 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3454 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3455 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3456 }
3457
3458
3459 /* Make sure the selector is present. */
3460 if (!DescCS.Legacy.Gen.u1Present)
3461 {
3462 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3463 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3464 }
3465
3466 /* Check that the new RIP is canonical. */
3467 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3468 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3469 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3470 if (!IEM_IS_CANONICAL(uNewRip))
3471 {
3472 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3473 return iemRaiseGeneralProtectionFault0(pVCpu);
3474 }
3475
3476 /*
3477 * If the privilege level changes or if the IST isn't zero, we need to get
3478 * a new stack from the TSS.
3479 */
3480 uint64_t uNewRsp;
3481 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3482 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3483 if ( uNewCpl != pVCpu->iem.s.uCpl
3484 || Idte.Gate.u3IST != 0)
3485 {
3486 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 }
3490 else
3491 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3492 uNewRsp &= ~(uint64_t)0xf;
3493
3494 /*
3495 * Calc the flag image to push.
3496 */
3497 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3498 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3499 fEfl &= ~X86_EFL_RF;
3500 else
3501 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3502
3503 /*
3504 * Start making changes.
3505 */
3506 /* Set the new CPL so that stack accesses use it. */
3507 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3508 pVCpu->iem.s.uCpl = uNewCpl;
3509
3510 /* Create the stack frame. */
3511 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3512 RTPTRUNION uStackFrame;
3513 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3514 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3515 if (rcStrict != VINF_SUCCESS)
3516 return rcStrict;
3517 void * const pvStackFrame = uStackFrame.pv;
3518
3519 if (fFlags & IEM_XCPT_FLAGS_ERR)
3520 *uStackFrame.pu64++ = uErr;
3521 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3522 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3523 uStackFrame.pu64[2] = fEfl;
3524 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3525 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3526 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529
3530 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3531 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3532 * after pushing the stack frame? (Write protect the gdt + stack to
3533 * find out.) */
3534 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3535 {
3536 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3537 if (rcStrict != VINF_SUCCESS)
3538 return rcStrict;
3539 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3540 }
3541
3542 /*
3543 * Start comitting the register changes.
3544 */
3545 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3546 * hidden registers when interrupting 32-bit or 16-bit code! */
3547 if (uNewCpl != uOldCpl)
3548 {
3549 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3550 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3551 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3552 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3553 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3554 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3555 }
3556 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3557 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3558 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3559 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3560 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3561 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3562 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3563 pVCpu->cpum.GstCtx.rip = uNewRip;
3564
3565 fEfl &= ~fEflToClear;
3566 IEMMISC_SET_EFL(pVCpu, fEfl);
3567
3568 if (fFlags & IEM_XCPT_FLAGS_CR2)
3569 pVCpu->cpum.GstCtx.cr2 = uCr2;
3570
3571 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3572 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3573
3574 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3575}
3576
3577
3578/**
3579 * Implements exceptions and interrupts.
3580 *
3581 * All exceptions and interrupts goes thru this function!
3582 *
3583 * @returns VBox strict status code.
3584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3585 * @param cbInstr The number of bytes to offset rIP by in the return
3586 * address.
3587 * @param u8Vector The interrupt / exception vector number.
3588 * @param fFlags The flags.
3589 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3590 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3591 */
3592VBOXSTRICTRC
3593iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3594 uint8_t cbInstr,
3595 uint8_t u8Vector,
3596 uint32_t fFlags,
3597 uint16_t uErr,
3598 uint64_t uCr2) RT_NOEXCEPT
3599{
3600 /*
3601 * Get all the state that we might need here.
3602 */
3603 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3604 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3605
3606#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3607 /*
3608 * Flush prefetch buffer
3609 */
3610 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3611#endif
3612
3613 /*
3614 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3615 */
3616 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3617 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3618 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3619 | IEM_XCPT_FLAGS_BP_INSTR
3620 | IEM_XCPT_FLAGS_ICEBP_INSTR
3621 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3622 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3623 {
3624 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3625 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3626 u8Vector = X86_XCPT_GP;
3627 uErr = 0;
3628 }
3629#ifdef DBGFTRACE_ENABLED
3630 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3631 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3632 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3633#endif
3634
3635 /*
3636 * Evaluate whether NMI blocking should be in effect.
3637 * Normally, NMI blocking is in effect whenever we inject an NMI.
3638 */
3639 bool fBlockNmi;
3640 if ( u8Vector == X86_XCPT_NMI
3641 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3642 fBlockNmi = true;
3643 else
3644 fBlockNmi = false;
3645
3646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3647 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3648 {
3649 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3650 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3651 return rcStrict0;
3652
3653 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3654 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3655 {
3656 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3657 fBlockNmi = false;
3658 }
3659 }
3660#endif
3661
3662#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3663 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3664 {
3665 /*
3666 * If the event is being injected as part of VMRUN, it isn't subject to event
3667 * intercepts in the nested-guest. However, secondary exceptions that occur
3668 * during injection of any event -are- subject to exception intercepts.
3669 *
3670 * See AMD spec. 15.20 "Event Injection".
3671 */
3672 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3673 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3674 else
3675 {
3676 /*
3677 * Check and handle if the event being raised is intercepted.
3678 */
3679 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3680 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3681 return rcStrict0;
3682 }
3683 }
3684#endif
3685
3686 /*
3687 * Set NMI blocking if necessary.
3688 */
3689 if ( fBlockNmi
3690 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3691 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3692
3693 /*
3694 * Do recursion accounting.
3695 */
3696 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3697 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3698 if (pVCpu->iem.s.cXcptRecursions == 0)
3699 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3700 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3701 else
3702 {
3703 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3704 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3705 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3706
3707 if (pVCpu->iem.s.cXcptRecursions >= 4)
3708 {
3709#ifdef DEBUG_bird
3710 AssertFailed();
3711#endif
3712 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3713 }
3714
3715 /*
3716 * Evaluate the sequence of recurring events.
3717 */
3718 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3719 NULL /* pXcptRaiseInfo */);
3720 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3721 { /* likely */ }
3722 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3723 {
3724 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3725 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3726 u8Vector = X86_XCPT_DF;
3727 uErr = 0;
3728#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3729 /* VMX nested-guest #DF intercept needs to be checked here. */
3730 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3731 {
3732 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3733 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3734 return rcStrict0;
3735 }
3736#endif
3737 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3738 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3739 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3740 }
3741 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3742 {
3743 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3744 return iemInitiateCpuShutdown(pVCpu);
3745 }
3746 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3747 {
3748 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3749 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3750 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3751 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3752 return VERR_EM_GUEST_CPU_HANG;
3753 }
3754 else
3755 {
3756 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3757 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3758 return VERR_IEM_IPE_9;
3759 }
3760
3761 /*
3762 * The 'EXT' bit is set when an exception occurs during deliver of an external
3763 * event (such as an interrupt or earlier exception)[1]. Privileged software
3764 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3765 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3766 *
3767 * [1] - Intel spec. 6.13 "Error Code"
3768 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3769 * [3] - Intel Instruction reference for INT n.
3770 */
3771 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3772 && (fFlags & IEM_XCPT_FLAGS_ERR)
3773 && u8Vector != X86_XCPT_PF
3774 && u8Vector != X86_XCPT_DF)
3775 {
3776 uErr |= X86_TRAP_ERR_EXTERNAL;
3777 }
3778 }
3779
3780 pVCpu->iem.s.cXcptRecursions++;
3781 pVCpu->iem.s.uCurXcpt = u8Vector;
3782 pVCpu->iem.s.fCurXcpt = fFlags;
3783 pVCpu->iem.s.uCurXcptErr = uErr;
3784 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3785
3786 /*
3787 * Extensive logging.
3788 */
3789#if defined(LOG_ENABLED) && defined(IN_RING3)
3790 if (LogIs3Enabled())
3791 {
3792 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3793 PVM pVM = pVCpu->CTX_SUFF(pVM);
3794 char szRegs[4096];
3795 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3796 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3797 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3798 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3799 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3800 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3801 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3802 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3803 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3804 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3805 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3806 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3807 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3808 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3809 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3810 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3811 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3812 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3813 " efer=%016VR{efer}\n"
3814 " pat=%016VR{pat}\n"
3815 " sf_mask=%016VR{sf_mask}\n"
3816 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3817 " lstar=%016VR{lstar}\n"
3818 " star=%016VR{star} cstar=%016VR{cstar}\n"
3819 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3820 );
3821
3822 char szInstr[256];
3823 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3824 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3825 szInstr, sizeof(szInstr), NULL);
3826 Log3(("%s%s\n", szRegs, szInstr));
3827 }
3828#endif /* LOG_ENABLED */
3829
3830 /*
3831 * Call the mode specific worker function.
3832 */
3833 VBOXSTRICTRC rcStrict;
3834 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3835 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3836 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3837 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3838 else
3839 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3840
3841 /* Flush the prefetch buffer. */
3842#ifdef IEM_WITH_CODE_TLB
3843 pVCpu->iem.s.pbInstrBuf = NULL;
3844#else
3845 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3846#endif
3847
3848 /*
3849 * Unwind.
3850 */
3851 pVCpu->iem.s.cXcptRecursions--;
3852 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3853 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3854 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3855 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3856 pVCpu->iem.s.cXcptRecursions + 1));
3857 return rcStrict;
3858}
3859
3860#ifdef IEM_WITH_SETJMP
3861/**
3862 * See iemRaiseXcptOrInt. Will not return.
3863 */
3864DECL_NO_RETURN(void)
3865iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3866 uint8_t cbInstr,
3867 uint8_t u8Vector,
3868 uint32_t fFlags,
3869 uint16_t uErr,
3870 uint64_t uCr2) RT_NOEXCEPT
3871{
3872 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3873 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3874}
3875#endif
3876
3877
3878/** \#DE - 00. */
3879VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3880{
3881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3882}
3883
3884
3885/** \#DB - 01.
3886 * @note This automatically clear DR7.GD. */
3887VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3888{
3889 /** @todo set/clear RF. */
3890 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3891 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3892}
3893
3894
3895/** \#BR - 05. */
3896VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3897{
3898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3899}
3900
3901
3902/** \#UD - 06. */
3903VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3904{
3905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3906}
3907
3908
3909/** \#NM - 07. */
3910VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3911{
3912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3913}
3914
3915
3916/** \#TS(err) - 0a. */
3917VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3918{
3919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3920}
3921
3922
3923/** \#TS(tr) - 0a. */
3924VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3925{
3926 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3927 pVCpu->cpum.GstCtx.tr.Sel, 0);
3928}
3929
3930
3931/** \#TS(0) - 0a. */
3932VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3933{
3934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3935 0, 0);
3936}
3937
3938
3939/** \#TS(err) - 0a. */
3940VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3941{
3942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3943 uSel & X86_SEL_MASK_OFF_RPL, 0);
3944}
3945
3946
3947/** \#NP(err) - 0b. */
3948VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3949{
3950 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3951}
3952
3953
3954/** \#NP(sel) - 0b. */
3955VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3956{
3957 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3958 uSel & ~X86_SEL_RPL, 0);
3959}
3960
3961
3962/** \#SS(seg) - 0c. */
3963VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3964{
3965 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3966 uSel & ~X86_SEL_RPL, 0);
3967}
3968
3969
3970/** \#SS(err) - 0c. */
3971VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3972{
3973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3974}
3975
3976
3977/** \#GP(n) - 0d. */
3978VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3979{
3980 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3981}
3982
3983
3984/** \#GP(0) - 0d. */
3985VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3986{
3987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3988}
3989
3990#ifdef IEM_WITH_SETJMP
3991/** \#GP(0) - 0d. */
3992DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
3993{
3994 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3995}
3996#endif
3997
3998
3999/** \#GP(sel) - 0d. */
4000VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4001{
4002 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4003 Sel & ~X86_SEL_RPL, 0);
4004}
4005
4006
4007/** \#GP(0) - 0d. */
4008VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4009{
4010 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4011}
4012
4013
4014/** \#GP(sel) - 0d. */
4015VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4016{
4017 NOREF(iSegReg); NOREF(fAccess);
4018 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4019 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4020}
4021
4022#ifdef IEM_WITH_SETJMP
4023/** \#GP(sel) - 0d, longjmp. */
4024DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4025{
4026 NOREF(iSegReg); NOREF(fAccess);
4027 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4028 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4029}
4030#endif
4031
4032/** \#GP(sel) - 0d. */
4033VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4034{
4035 NOREF(Sel);
4036 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4037}
4038
4039#ifdef IEM_WITH_SETJMP
4040/** \#GP(sel) - 0d, longjmp. */
4041DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4042{
4043 NOREF(Sel);
4044 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4045}
4046#endif
4047
4048
4049/** \#GP(sel) - 0d. */
4050VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4051{
4052 NOREF(iSegReg); NOREF(fAccess);
4053 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4054}
4055
4056#ifdef IEM_WITH_SETJMP
4057/** \#GP(sel) - 0d, longjmp. */
4058DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4059{
4060 NOREF(iSegReg); NOREF(fAccess);
4061 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4062}
4063#endif
4064
4065
4066/** \#PF(n) - 0e. */
4067VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4068{
4069 uint16_t uErr;
4070 switch (rc)
4071 {
4072 case VERR_PAGE_NOT_PRESENT:
4073 case VERR_PAGE_TABLE_NOT_PRESENT:
4074 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4075 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4076 uErr = 0;
4077 break;
4078
4079 default:
4080 AssertMsgFailed(("%Rrc\n", rc));
4081 RT_FALL_THRU();
4082 case VERR_ACCESS_DENIED:
4083 uErr = X86_TRAP_PF_P;
4084 break;
4085
4086 /** @todo reserved */
4087 }
4088
4089 if (pVCpu->iem.s.uCpl == 3)
4090 uErr |= X86_TRAP_PF_US;
4091
4092 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4093 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4094 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4095 uErr |= X86_TRAP_PF_ID;
4096
4097#if 0 /* This is so much non-sense, really. Why was it done like that? */
4098 /* Note! RW access callers reporting a WRITE protection fault, will clear
4099 the READ flag before calling. So, read-modify-write accesses (RW)
4100 can safely be reported as READ faults. */
4101 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4102 uErr |= X86_TRAP_PF_RW;
4103#else
4104 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4105 {
4106 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4107 /// (regardless of outcome of the comparison in the latter case).
4108 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4109 uErr |= X86_TRAP_PF_RW;
4110 }
4111#endif
4112
4113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4114 uErr, GCPtrWhere);
4115}
4116
4117#ifdef IEM_WITH_SETJMP
4118/** \#PF(n) - 0e, longjmp. */
4119DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4120{
4121 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4122}
4123#endif
4124
4125
4126/** \#MF(0) - 10. */
4127VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4128{
4129 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4130}
4131
4132
4133/** \#AC(0) - 11. */
4134VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4135{
4136 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4137}
4138
4139#ifdef IEM_WITH_SETJMP
4140/** \#AC(0) - 11, longjmp. */
4141DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4142{
4143 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4144}
4145#endif
4146
4147
4148/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4149IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4150{
4151 NOREF(cbInstr);
4152 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4153}
4154
4155
4156/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4157IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4158{
4159 NOREF(cbInstr);
4160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4161}
4162
4163
4164/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4165IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4166{
4167 NOREF(cbInstr);
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4169}
4170
4171
4172/** @} */
4173
4174/** @name Common opcode decoders.
4175 * @{
4176 */
4177//#include <iprt/mem.h>
4178
4179/**
4180 * Used to add extra details about a stub case.
4181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4182 */
4183void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4184{
4185#if defined(LOG_ENABLED) && defined(IN_RING3)
4186 PVM pVM = pVCpu->CTX_SUFF(pVM);
4187 char szRegs[4096];
4188 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4189 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4190 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4191 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4192 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4193 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4194 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4195 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4196 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4197 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4198 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4199 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4200 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4201 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4202 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4203 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4204 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4205 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4206 " efer=%016VR{efer}\n"
4207 " pat=%016VR{pat}\n"
4208 " sf_mask=%016VR{sf_mask}\n"
4209 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4210 " lstar=%016VR{lstar}\n"
4211 " star=%016VR{star} cstar=%016VR{cstar}\n"
4212 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4213 );
4214
4215 char szInstr[256];
4216 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4217 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4218 szInstr, sizeof(szInstr), NULL);
4219
4220 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4221#else
4222 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4223#endif
4224}
4225
4226/** @} */
4227
4228
4229
4230/** @name Register Access.
4231 * @{
4232 */
4233
4234/**
4235 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4236 *
4237 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4238 * segment limit.
4239 *
4240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4241 * @param offNextInstr The offset of the next instruction.
4242 */
4243VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4244{
4245 switch (pVCpu->iem.s.enmEffOpSize)
4246 {
4247 case IEMMODE_16BIT:
4248 {
4249 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4250 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4251 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4252 return iemRaiseGeneralProtectionFault0(pVCpu);
4253 pVCpu->cpum.GstCtx.rip = uNewIp;
4254 break;
4255 }
4256
4257 case IEMMODE_32BIT:
4258 {
4259 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4260 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4261
4262 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4263 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4264 return iemRaiseGeneralProtectionFault0(pVCpu);
4265 pVCpu->cpum.GstCtx.rip = uNewEip;
4266 break;
4267 }
4268
4269 case IEMMODE_64BIT:
4270 {
4271 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4272
4273 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4274 if (!IEM_IS_CANONICAL(uNewRip))
4275 return iemRaiseGeneralProtectionFault0(pVCpu);
4276 pVCpu->cpum.GstCtx.rip = uNewRip;
4277 break;
4278 }
4279
4280 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4281 }
4282
4283 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4284
4285#ifndef IEM_WITH_CODE_TLB
4286 /* Flush the prefetch buffer. */
4287 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4288#endif
4289
4290 return VINF_SUCCESS;
4291}
4292
4293
4294/**
4295 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4296 *
4297 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4298 * segment limit.
4299 *
4300 * @returns Strict VBox status code.
4301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4302 * @param offNextInstr The offset of the next instruction.
4303 */
4304VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4305{
4306 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4307
4308 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4309 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4310 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4311 return iemRaiseGeneralProtectionFault0(pVCpu);
4312 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4313 pVCpu->cpum.GstCtx.rip = uNewIp;
4314 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4315
4316#ifndef IEM_WITH_CODE_TLB
4317 /* Flush the prefetch buffer. */
4318 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4319#endif
4320
4321 return VINF_SUCCESS;
4322}
4323
4324
4325/**
4326 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4327 *
4328 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4329 * segment limit.
4330 *
4331 * @returns Strict VBox status code.
4332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4333 * @param offNextInstr The offset of the next instruction.
4334 */
4335VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4336{
4337 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4338
4339 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4340 {
4341 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4342
4343 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4344 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4345 return iemRaiseGeneralProtectionFault0(pVCpu);
4346 pVCpu->cpum.GstCtx.rip = uNewEip;
4347 }
4348 else
4349 {
4350 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4351
4352 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4353 if (!IEM_IS_CANONICAL(uNewRip))
4354 return iemRaiseGeneralProtectionFault0(pVCpu);
4355 pVCpu->cpum.GstCtx.rip = uNewRip;
4356 }
4357 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4358
4359#ifndef IEM_WITH_CODE_TLB
4360 /* Flush the prefetch buffer. */
4361 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4362#endif
4363
4364 return VINF_SUCCESS;
4365}
4366
4367
4368/**
4369 * Performs a near jump to the specified address.
4370 *
4371 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4372 * segment limit.
4373 *
4374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4375 * @param uNewRip The new RIP value.
4376 */
4377VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4378{
4379 switch (pVCpu->iem.s.enmEffOpSize)
4380 {
4381 case IEMMODE_16BIT:
4382 {
4383 Assert(uNewRip <= UINT16_MAX);
4384 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4385 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4386 return iemRaiseGeneralProtectionFault0(pVCpu);
4387 /** @todo Test 16-bit jump in 64-bit mode. */
4388 pVCpu->cpum.GstCtx.rip = uNewRip;
4389 break;
4390 }
4391
4392 case IEMMODE_32BIT:
4393 {
4394 Assert(uNewRip <= UINT32_MAX);
4395 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4396 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4397
4398 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4399 return iemRaiseGeneralProtectionFault0(pVCpu);
4400 pVCpu->cpum.GstCtx.rip = uNewRip;
4401 break;
4402 }
4403
4404 case IEMMODE_64BIT:
4405 {
4406 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4407
4408 if (!IEM_IS_CANONICAL(uNewRip))
4409 return iemRaiseGeneralProtectionFault0(pVCpu);
4410 pVCpu->cpum.GstCtx.rip = uNewRip;
4411 break;
4412 }
4413
4414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4415 }
4416
4417 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4418
4419#ifndef IEM_WITH_CODE_TLB
4420 /* Flush the prefetch buffer. */
4421 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4422#endif
4423
4424 return VINF_SUCCESS;
4425}
4426
4427/** @} */
4428
4429
4430/** @name FPU access and helpers.
4431 *
4432 * @{
4433 */
4434
4435/**
4436 * Updates the x87.DS and FPUDP registers.
4437 *
4438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4439 * @param pFpuCtx The FPU context.
4440 * @param iEffSeg The effective segment register.
4441 * @param GCPtrEff The effective address relative to @a iEffSeg.
4442 */
4443DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4444{
4445 RTSEL sel;
4446 switch (iEffSeg)
4447 {
4448 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4449 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4450 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4451 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4452 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4453 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4454 default:
4455 AssertMsgFailed(("%d\n", iEffSeg));
4456 sel = pVCpu->cpum.GstCtx.ds.Sel;
4457 }
4458 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4459 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4460 {
4461 pFpuCtx->DS = 0;
4462 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4463 }
4464 else if (!IEM_IS_LONG_MODE(pVCpu))
4465 {
4466 pFpuCtx->DS = sel;
4467 pFpuCtx->FPUDP = GCPtrEff;
4468 }
4469 else
4470 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4471}
4472
4473
4474/**
4475 * Rotates the stack registers in the push direction.
4476 *
4477 * @param pFpuCtx The FPU context.
4478 * @remarks This is a complete waste of time, but fxsave stores the registers in
4479 * stack order.
4480 */
4481DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4482{
4483 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4484 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4485 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4486 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4487 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4488 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4489 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4490 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4491 pFpuCtx->aRegs[0].r80 = r80Tmp;
4492}
4493
4494
4495/**
4496 * Rotates the stack registers in the pop direction.
4497 *
4498 * @param pFpuCtx The FPU context.
4499 * @remarks This is a complete waste of time, but fxsave stores the registers in
4500 * stack order.
4501 */
4502DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4503{
4504 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4505 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4506 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4507 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4508 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4509 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4510 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4511 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4512 pFpuCtx->aRegs[7].r80 = r80Tmp;
4513}
4514
4515
4516/**
4517 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4518 * exception prevents it.
4519 *
4520 * @param pResult The FPU operation result to push.
4521 * @param pFpuCtx The FPU context.
4522 */
4523static void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4524{
4525 /* Update FSW and bail if there are pending exceptions afterwards. */
4526 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4527 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4528 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4529 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4530 {
4531 pFpuCtx->FSW = fFsw;
4532 return;
4533 }
4534
4535 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4536 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4537 {
4538 /* All is fine, push the actual value. */
4539 pFpuCtx->FTW |= RT_BIT(iNewTop);
4540 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4541 }
4542 else if (pFpuCtx->FCW & X86_FCW_IM)
4543 {
4544 /* Masked stack overflow, push QNaN. */
4545 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4546 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4547 }
4548 else
4549 {
4550 /* Raise stack overflow, don't push anything. */
4551 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4552 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4553 return;
4554 }
4555
4556 fFsw &= ~X86_FSW_TOP_MASK;
4557 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4558 pFpuCtx->FSW = fFsw;
4559
4560 iemFpuRotateStackPush(pFpuCtx);
4561}
4562
4563
4564/**
4565 * Stores a result in a FPU register and updates the FSW and FTW.
4566 *
4567 * @param pFpuCtx The FPU context.
4568 * @param pResult The result to store.
4569 * @param iStReg Which FPU register to store it in.
4570 */
4571static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4572{
4573 Assert(iStReg < 8);
4574 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4575 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4576 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4577 pFpuCtx->FTW |= RT_BIT(iReg);
4578 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4579}
4580
4581
4582/**
4583 * Only updates the FPU status word (FSW) with the result of the current
4584 * instruction.
4585 *
4586 * @param pFpuCtx The FPU context.
4587 * @param u16FSW The FSW output of the current instruction.
4588 */
4589static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4590{
4591 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4592 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4593}
4594
4595
4596/**
4597 * Pops one item off the FPU stack if no pending exception prevents it.
4598 *
4599 * @param pFpuCtx The FPU context.
4600 */
4601static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4602{
4603 /* Check pending exceptions. */
4604 uint16_t uFSW = pFpuCtx->FSW;
4605 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4606 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4607 return;
4608
4609 /* TOP--. */
4610 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4611 uFSW &= ~X86_FSW_TOP_MASK;
4612 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4613 pFpuCtx->FSW = uFSW;
4614
4615 /* Mark the previous ST0 as empty. */
4616 iOldTop >>= X86_FSW_TOP_SHIFT;
4617 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4618
4619 /* Rotate the registers. */
4620 iemFpuRotateStackPop(pFpuCtx);
4621}
4622
4623
4624/**
4625 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4626 *
4627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4628 * @param pResult The FPU operation result to push.
4629 */
4630void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4631{
4632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4634 iemFpuMaybePushResult(pResult, pFpuCtx);
4635}
4636
4637
4638/**
4639 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4640 * and sets FPUDP and FPUDS.
4641 *
4642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4643 * @param pResult The FPU operation result to push.
4644 * @param iEffSeg The effective segment register.
4645 * @param GCPtrEff The effective address relative to @a iEffSeg.
4646 */
4647void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4648{
4649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4650 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4652 iemFpuMaybePushResult(pResult, pFpuCtx);
4653}
4654
4655
4656/**
4657 * Replace ST0 with the first value and push the second onto the FPU stack,
4658 * unless a pending exception prevents it.
4659 *
4660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4661 * @param pResult The FPU operation result to store and push.
4662 */
4663void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4664{
4665 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4666 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4667
4668 /* Update FSW and bail if there are pending exceptions afterwards. */
4669 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4670 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4671 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4672 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4673 {
4674 pFpuCtx->FSW = fFsw;
4675 return;
4676 }
4677
4678 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4679 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4680 {
4681 /* All is fine, push the actual value. */
4682 pFpuCtx->FTW |= RT_BIT(iNewTop);
4683 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4684 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4685 }
4686 else if (pFpuCtx->FCW & X86_FCW_IM)
4687 {
4688 /* Masked stack overflow, push QNaN. */
4689 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4690 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4691 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4692 }
4693 else
4694 {
4695 /* Raise stack overflow, don't push anything. */
4696 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4697 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4698 return;
4699 }
4700
4701 fFsw &= ~X86_FSW_TOP_MASK;
4702 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4703 pFpuCtx->FSW = fFsw;
4704
4705 iemFpuRotateStackPush(pFpuCtx);
4706}
4707
4708
4709/**
4710 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4711 * FOP.
4712 *
4713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4714 * @param pResult The result to store.
4715 * @param iStReg Which FPU register to store it in.
4716 */
4717void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4718{
4719 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4720 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4721 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4722}
4723
4724
4725/**
4726 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4727 * FOP, and then pops the stack.
4728 *
4729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4730 * @param pResult The result to store.
4731 * @param iStReg Which FPU register to store it in.
4732 */
4733void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4734{
4735 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4736 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4737 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4738 iemFpuMaybePopOne(pFpuCtx);
4739}
4740
4741
4742/**
4743 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4744 * FPUDP, and FPUDS.
4745 *
4746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4747 * @param pResult The result to store.
4748 * @param iStReg Which FPU register to store it in.
4749 * @param iEffSeg The effective memory operand selector register.
4750 * @param GCPtrEff The effective memory operand offset.
4751 */
4752void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4753 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4754{
4755 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4756 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4758 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4759}
4760
4761
4762/**
4763 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4764 * FPUDP, and FPUDS, and then pops the stack.
4765 *
4766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4767 * @param pResult The result to store.
4768 * @param iStReg Which FPU register to store it in.
4769 * @param iEffSeg The effective memory operand selector register.
4770 * @param GCPtrEff The effective memory operand offset.
4771 */
4772void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4773 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4774{
4775 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4776 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4777 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4778 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4779 iemFpuMaybePopOne(pFpuCtx);
4780}
4781
4782
4783/**
4784 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4785 *
4786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4787 */
4788void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4789{
4790 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4791 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4792}
4793
4794
4795/**
4796 * Updates the FSW, FOP, FPUIP, and FPUCS.
4797 *
4798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4799 * @param u16FSW The FSW from the current instruction.
4800 */
4801void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4802{
4803 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4804 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4805 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4806}
4807
4808
4809/**
4810 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4811 *
4812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4813 * @param u16FSW The FSW from the current instruction.
4814 */
4815void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4816{
4817 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4818 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4819 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4820 iemFpuMaybePopOne(pFpuCtx);
4821}
4822
4823
4824/**
4825 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4826 *
4827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4828 * @param u16FSW The FSW from the current instruction.
4829 * @param iEffSeg The effective memory operand selector register.
4830 * @param GCPtrEff The effective memory operand offset.
4831 */
4832void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4833{
4834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4835 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4836 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4837 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4838}
4839
4840
4841/**
4842 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4843 *
4844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4845 * @param u16FSW The FSW from the current instruction.
4846 */
4847void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4848{
4849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4850 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4851 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4852 iemFpuMaybePopOne(pFpuCtx);
4853 iemFpuMaybePopOne(pFpuCtx);
4854}
4855
4856
4857/**
4858 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4859 *
4860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4861 * @param u16FSW The FSW from the current instruction.
4862 * @param iEffSeg The effective memory operand selector register.
4863 * @param GCPtrEff The effective memory operand offset.
4864 */
4865void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4866{
4867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4869 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4870 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4871 iemFpuMaybePopOne(pFpuCtx);
4872}
4873
4874
4875/**
4876 * Worker routine for raising an FPU stack underflow exception.
4877 *
4878 * @param pFpuCtx The FPU context.
4879 * @param iStReg The stack register being accessed.
4880 */
4881static void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
4882{
4883 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4884 if (pFpuCtx->FCW & X86_FCW_IM)
4885 {
4886 /* Masked underflow. */
4887 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4888 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4889 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4890 if (iStReg != UINT8_MAX)
4891 {
4892 pFpuCtx->FTW |= RT_BIT(iReg);
4893 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4894 }
4895 }
4896 else
4897 {
4898 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4899 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4900 }
4901}
4902
4903
4904/**
4905 * Raises a FPU stack underflow exception.
4906 *
4907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4908 * @param iStReg The destination register that should be loaded
4909 * with QNaN if \#IS is not masked. Specify
4910 * UINT8_MAX if none (like for fcom).
4911 */
4912void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4913{
4914 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4915 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4916 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4917}
4918
4919
4920void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4921{
4922 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4923 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4924 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4925 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4926}
4927
4928
4929void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4930{
4931 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4932 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4933 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4934 iemFpuMaybePopOne(pFpuCtx);
4935}
4936
4937
4938void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4939{
4940 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4941 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4942 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4943 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4944 iemFpuMaybePopOne(pFpuCtx);
4945}
4946
4947
4948void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
4949{
4950 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4951 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4952 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
4953 iemFpuMaybePopOne(pFpuCtx);
4954 iemFpuMaybePopOne(pFpuCtx);
4955}
4956
4957
4958void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
4959{
4960 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4961 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4962
4963 if (pFpuCtx->FCW & X86_FCW_IM)
4964 {
4965 /* Masked overflow - Push QNaN. */
4966 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4967 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4968 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4969 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4970 pFpuCtx->FTW |= RT_BIT(iNewTop);
4971 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4972 iemFpuRotateStackPush(pFpuCtx);
4973 }
4974 else
4975 {
4976 /* Exception pending - don't change TOP or the register stack. */
4977 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4978 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4979 }
4980}
4981
4982
4983void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
4984{
4985 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4986 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4987
4988 if (pFpuCtx->FCW & X86_FCW_IM)
4989 {
4990 /* Masked overflow - Push QNaN. */
4991 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4992 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4993 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4994 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4995 pFpuCtx->FTW |= RT_BIT(iNewTop);
4996 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4998 iemFpuRotateStackPush(pFpuCtx);
4999 }
5000 else
5001 {
5002 /* Exception pending - don't change TOP or the register stack. */
5003 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5004 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5005 }
5006}
5007
5008
5009/**
5010 * Worker routine for raising an FPU stack overflow exception on a push.
5011 *
5012 * @param pFpuCtx The FPU context.
5013 */
5014static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5015{
5016 if (pFpuCtx->FCW & X86_FCW_IM)
5017 {
5018 /* Masked overflow. */
5019 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5020 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5021 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5022 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5023 pFpuCtx->FTW |= RT_BIT(iNewTop);
5024 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5025 iemFpuRotateStackPush(pFpuCtx);
5026 }
5027 else
5028 {
5029 /* Exception pending - don't change TOP or the register stack. */
5030 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5031 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5032 }
5033}
5034
5035
5036/**
5037 * Raises a FPU stack overflow exception on a push.
5038 *
5039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5040 */
5041void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5042{
5043 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5044 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5045 iemFpuStackPushOverflowOnly(pFpuCtx);
5046}
5047
5048
5049/**
5050 * Raises a FPU stack overflow exception on a push with a memory operand.
5051 *
5052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5053 * @param iEffSeg The effective memory operand selector register.
5054 * @param GCPtrEff The effective memory operand offset.
5055 */
5056void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5057{
5058 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5059 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5060 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5061 iemFpuStackPushOverflowOnly(pFpuCtx);
5062}
5063
5064/** @} */
5065
5066
5067/** @name Memory access.
5068 *
5069 * @{
5070 */
5071
5072
5073/**
5074 * Updates the IEMCPU::cbWritten counter if applicable.
5075 *
5076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5077 * @param fAccess The access being accounted for.
5078 * @param cbMem The access size.
5079 */
5080DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5081{
5082 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5083 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5084 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5085}
5086
5087
5088/**
5089 * Applies the segment limit, base and attributes.
5090 *
5091 * This may raise a \#GP or \#SS.
5092 *
5093 * @returns VBox strict status code.
5094 *
5095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5096 * @param fAccess The kind of access which is being performed.
5097 * @param iSegReg The index of the segment register to apply.
5098 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5099 * TSS, ++).
5100 * @param cbMem The access size.
5101 * @param pGCPtrMem Pointer to the guest memory address to apply
5102 * segmentation to. Input and output parameter.
5103 */
5104VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5105{
5106 if (iSegReg == UINT8_MAX)
5107 return VINF_SUCCESS;
5108
5109 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5110 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5111 switch (pVCpu->iem.s.enmCpuMode)
5112 {
5113 case IEMMODE_16BIT:
5114 case IEMMODE_32BIT:
5115 {
5116 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5117 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5118
5119 if ( pSel->Attr.n.u1Present
5120 && !pSel->Attr.n.u1Unusable)
5121 {
5122 Assert(pSel->Attr.n.u1DescType);
5123 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5124 {
5125 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5126 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5127 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5128
5129 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5130 {
5131 /** @todo CPL check. */
5132 }
5133
5134 /*
5135 * There are two kinds of data selectors, normal and expand down.
5136 */
5137 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5138 {
5139 if ( GCPtrFirst32 > pSel->u32Limit
5140 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5141 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5142 }
5143 else
5144 {
5145 /*
5146 * The upper boundary is defined by the B bit, not the G bit!
5147 */
5148 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5149 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5150 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5151 }
5152 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5153 }
5154 else
5155 {
5156 /*
5157 * Code selector and usually be used to read thru, writing is
5158 * only permitted in real and V8086 mode.
5159 */
5160 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5161 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5162 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5163 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5164 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5165
5166 if ( GCPtrFirst32 > pSel->u32Limit
5167 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5168 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5169
5170 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5171 {
5172 /** @todo CPL check. */
5173 }
5174
5175 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5176 }
5177 }
5178 else
5179 return iemRaiseGeneralProtectionFault0(pVCpu);
5180 return VINF_SUCCESS;
5181 }
5182
5183 case IEMMODE_64BIT:
5184 {
5185 RTGCPTR GCPtrMem = *pGCPtrMem;
5186 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5187 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5188
5189 Assert(cbMem >= 1);
5190 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5191 return VINF_SUCCESS;
5192 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5193 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5194 return iemRaiseGeneralProtectionFault0(pVCpu);
5195 }
5196
5197 default:
5198 AssertFailedReturn(VERR_IEM_IPE_7);
5199 }
5200}
5201
5202
5203/**
5204 * Translates a virtual address to a physical physical address and checks if we
5205 * can access the page as specified.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param GCPtrMem The virtual address.
5209 * @param fAccess The intended access.
5210 * @param pGCPhysMem Where to return the physical address.
5211 */
5212VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5213{
5214 /** @todo Need a different PGM interface here. We're currently using
5215 * generic / REM interfaces. this won't cut it for R0. */
5216 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5217 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5218 * here. */
5219 PGMPTWALK Walk;
5220 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5221 if (RT_FAILURE(rc))
5222 {
5223 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5224 /** @todo Check unassigned memory in unpaged mode. */
5225 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5226#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5227 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5228 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5229#endif
5230 *pGCPhysMem = NIL_RTGCPHYS;
5231 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5232 }
5233
5234 /* If the page is writable and does not have the no-exec bit set, all
5235 access is allowed. Otherwise we'll have to check more carefully... */
5236 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5237 {
5238 /* Write to read only memory? */
5239 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5240 && !(Walk.fEffective & X86_PTE_RW)
5241 && ( ( pVCpu->iem.s.uCpl == 3
5242 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5243 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5244 {
5245 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5246 *pGCPhysMem = NIL_RTGCPHYS;
5247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5250#endif
5251 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5252 }
5253
5254 /* Kernel memory accessed by userland? */
5255 if ( !(Walk.fEffective & X86_PTE_US)
5256 && pVCpu->iem.s.uCpl == 3
5257 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5258 {
5259 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5260 *pGCPhysMem = NIL_RTGCPHYS;
5261#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5262 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5263 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5264#endif
5265 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5266 }
5267
5268 /* Executing non-executable memory? */
5269 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5270 && (Walk.fEffective & X86_PTE_PAE_NX)
5271 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5272 {
5273 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5274 *pGCPhysMem = NIL_RTGCPHYS;
5275#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5276 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5277 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5278#endif
5279 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5280 VERR_ACCESS_DENIED);
5281 }
5282 }
5283
5284 /*
5285 * Set the dirty / access flags.
5286 * ASSUMES this is set when the address is translated rather than on committ...
5287 */
5288 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5289 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5290 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5291 {
5292 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5293 AssertRC(rc2);
5294 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5295 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5296 }
5297
5298 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5299 *pGCPhysMem = GCPhys;
5300 return VINF_SUCCESS;
5301}
5302
5303
5304/**
5305 * Looks up a memory mapping entry.
5306 *
5307 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5309 * @param pvMem The memory address.
5310 * @param fAccess The access to.
5311 */
5312DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5313{
5314 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5315 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5316 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5317 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5318 return 0;
5319 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5320 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5321 return 1;
5322 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5323 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5324 return 2;
5325 return VERR_NOT_FOUND;
5326}
5327
5328
5329/**
5330 * Finds a free memmap entry when using iNextMapping doesn't work.
5331 *
5332 * @returns Memory mapping index, 1024 on failure.
5333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5334 */
5335static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5336{
5337 /*
5338 * The easy case.
5339 */
5340 if (pVCpu->iem.s.cActiveMappings == 0)
5341 {
5342 pVCpu->iem.s.iNextMapping = 1;
5343 return 0;
5344 }
5345
5346 /* There should be enough mappings for all instructions. */
5347 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5348
5349 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5350 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5351 return i;
5352
5353 AssertFailedReturn(1024);
5354}
5355
5356
5357/**
5358 * Commits a bounce buffer that needs writing back and unmaps it.
5359 *
5360 * @returns Strict VBox status code.
5361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5362 * @param iMemMap The index of the buffer to commit.
5363 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5364 * Always false in ring-3, obviously.
5365 */
5366static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5367{
5368 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5369 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5370#ifdef IN_RING3
5371 Assert(!fPostponeFail);
5372 RT_NOREF_PV(fPostponeFail);
5373#endif
5374
5375 /*
5376 * Do the writing.
5377 */
5378 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5379 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5380 {
5381 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5382 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5383 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5384 if (!pVCpu->iem.s.fBypassHandlers)
5385 {
5386 /*
5387 * Carefully and efficiently dealing with access handler return
5388 * codes make this a little bloated.
5389 */
5390 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5392 pbBuf,
5393 cbFirst,
5394 PGMACCESSORIGIN_IEM);
5395 if (rcStrict == VINF_SUCCESS)
5396 {
5397 if (cbSecond)
5398 {
5399 rcStrict = PGMPhysWrite(pVM,
5400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5401 pbBuf + cbFirst,
5402 cbSecond,
5403 PGMACCESSORIGIN_IEM);
5404 if (rcStrict == VINF_SUCCESS)
5405 { /* nothing */ }
5406 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5407 {
5408 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5411 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5412 }
5413#ifndef IN_RING3
5414 else if (fPostponeFail)
5415 {
5416 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5419 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5420 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5421 return iemSetPassUpStatus(pVCpu, rcStrict);
5422 }
5423#endif
5424 else
5425 {
5426 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5429 return rcStrict;
5430 }
5431 }
5432 }
5433 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5434 {
5435 if (!cbSecond)
5436 {
5437 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5439 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5440 }
5441 else
5442 {
5443 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5445 pbBuf + cbFirst,
5446 cbSecond,
5447 PGMACCESSORIGIN_IEM);
5448 if (rcStrict2 == VINF_SUCCESS)
5449 {
5450 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5453 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5454 }
5455 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5456 {
5457 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5460 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5461 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5462 }
5463#ifndef IN_RING3
5464 else if (fPostponeFail)
5465 {
5466 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5469 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5470 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5471 return iemSetPassUpStatus(pVCpu, rcStrict);
5472 }
5473#endif
5474 else
5475 {
5476 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5479 return rcStrict2;
5480 }
5481 }
5482 }
5483#ifndef IN_RING3
5484 else if (fPostponeFail)
5485 {
5486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5489 if (!cbSecond)
5490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5491 else
5492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5493 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5494 return iemSetPassUpStatus(pVCpu, rcStrict);
5495 }
5496#endif
5497 else
5498 {
5499 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5500 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5502 return rcStrict;
5503 }
5504 }
5505 else
5506 {
5507 /*
5508 * No access handlers, much simpler.
5509 */
5510 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5511 if (RT_SUCCESS(rc))
5512 {
5513 if (cbSecond)
5514 {
5515 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5516 if (RT_SUCCESS(rc))
5517 { /* likely */ }
5518 else
5519 {
5520 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5523 return rc;
5524 }
5525 }
5526 }
5527 else
5528 {
5529 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5530 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5532 return rc;
5533 }
5534 }
5535 }
5536
5537#if defined(IEM_LOG_MEMORY_WRITES)
5538 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5539 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5540 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5541 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5542 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5543 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5544
5545 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5546 g_cbIemWrote = cbWrote;
5547 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5548#endif
5549
5550 /*
5551 * Free the mapping entry.
5552 */
5553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5554 Assert(pVCpu->iem.s.cActiveMappings != 0);
5555 pVCpu->iem.s.cActiveMappings--;
5556 return VINF_SUCCESS;
5557}
5558
5559
5560/**
5561 * iemMemMap worker that deals with a request crossing pages.
5562 */
5563static VBOXSTRICTRC
5564iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5565{
5566 /*
5567 * Do the address translations.
5568 */
5569 RTGCPHYS GCPhysFirst;
5570 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5571 if (rcStrict != VINF_SUCCESS)
5572 return rcStrict;
5573
5574 RTGCPHYS GCPhysSecond;
5575 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5576 fAccess, &GCPhysSecond);
5577 if (rcStrict != VINF_SUCCESS)
5578 return rcStrict;
5579 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5580
5581 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5582
5583 /*
5584 * Read in the current memory content if it's a read, execute or partial
5585 * write access.
5586 */
5587 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5588 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5589 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5590
5591 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5592 {
5593 if (!pVCpu->iem.s.fBypassHandlers)
5594 {
5595 /*
5596 * Must carefully deal with access handler status codes here,
5597 * makes the code a bit bloated.
5598 */
5599 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5600 if (rcStrict == VINF_SUCCESS)
5601 {
5602 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5603 if (rcStrict == VINF_SUCCESS)
5604 { /*likely */ }
5605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5606 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5607 else
5608 {
5609 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5610 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5611 return rcStrict;
5612 }
5613 }
5614 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5615 {
5616 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5617 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5618 {
5619 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5620 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5621 }
5622 else
5623 {
5624 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5625 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5626 return rcStrict2;
5627 }
5628 }
5629 else
5630 {
5631 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5632 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5633 return rcStrict;
5634 }
5635 }
5636 else
5637 {
5638 /*
5639 * No informational status codes here, much more straight forward.
5640 */
5641 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5642 if (RT_SUCCESS(rc))
5643 {
5644 Assert(rc == VINF_SUCCESS);
5645 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5646 if (RT_SUCCESS(rc))
5647 Assert(rc == VINF_SUCCESS);
5648 else
5649 {
5650 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5651 return rc;
5652 }
5653 }
5654 else
5655 {
5656 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5657 return rc;
5658 }
5659 }
5660 }
5661#ifdef VBOX_STRICT
5662 else
5663 memset(pbBuf, 0xcc, cbMem);
5664 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5665 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5666#endif
5667
5668 /*
5669 * Commit the bounce buffer entry.
5670 */
5671 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5673 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5674 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5675 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5676 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5677 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5678 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5679 pVCpu->iem.s.cActiveMappings++;
5680
5681 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5682 *ppvMem = pbBuf;
5683 return VINF_SUCCESS;
5684}
5685
5686
5687/**
5688 * iemMemMap woker that deals with iemMemPageMap failures.
5689 */
5690static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5691 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5692{
5693 /*
5694 * Filter out conditions we can handle and the ones which shouldn't happen.
5695 */
5696 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5697 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5698 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5699 {
5700 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5701 return rcMap;
5702 }
5703 pVCpu->iem.s.cPotentialExits++;
5704
5705 /*
5706 * Read in the current memory content if it's a read, execute or partial
5707 * write access.
5708 */
5709 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5710 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5711 {
5712 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5713 memset(pbBuf, 0xff, cbMem);
5714 else
5715 {
5716 int rc;
5717 if (!pVCpu->iem.s.fBypassHandlers)
5718 {
5719 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5720 if (rcStrict == VINF_SUCCESS)
5721 { /* nothing */ }
5722 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5723 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5724 else
5725 {
5726 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5727 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5728 return rcStrict;
5729 }
5730 }
5731 else
5732 {
5733 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5734 if (RT_SUCCESS(rc))
5735 { /* likely */ }
5736 else
5737 {
5738 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5739 GCPhysFirst, rc));
5740 return rc;
5741 }
5742 }
5743 }
5744 }
5745#ifdef VBOX_STRICT
5746 else
5747 memset(pbBuf, 0xcc, cbMem);
5748#endif
5749#ifdef VBOX_STRICT
5750 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5751 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5752#endif
5753
5754 /*
5755 * Commit the bounce buffer entry.
5756 */
5757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5759 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5761 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5762 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5763 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5764 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5765 pVCpu->iem.s.cActiveMappings++;
5766
5767 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5768 *ppvMem = pbBuf;
5769 return VINF_SUCCESS;
5770}
5771
5772
5773
5774/**
5775 * Maps the specified guest memory for the given kind of access.
5776 *
5777 * This may be using bounce buffering of the memory if it's crossing a page
5778 * boundary or if there is an access handler installed for any of it. Because
5779 * of lock prefix guarantees, we're in for some extra clutter when this
5780 * happens.
5781 *
5782 * This may raise a \#GP, \#SS, \#PF or \#AC.
5783 *
5784 * @returns VBox strict status code.
5785 *
5786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5787 * @param ppvMem Where to return the pointer to the mapped
5788 * memory.
5789 * @param cbMem The number of bytes to map. This is usually 1,
5790 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5791 * string operations it can be up to a page.
5792 * @param iSegReg The index of the segment register to use for
5793 * this access. The base and limits are checked.
5794 * Use UINT8_MAX to indicate that no segmentation
5795 * is required (for IDT, GDT and LDT accesses).
5796 * @param GCPtrMem The address of the guest memory.
5797 * @param fAccess How the memory is being accessed. The
5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5799 * how to map the memory, while the
5800 * IEM_ACCESS_WHAT_XXX bit is used when raising
5801 * exceptions.
5802 */
5803VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
5804{
5805 /*
5806 * Check the input and figure out which mapping entry to use.
5807 */
5808 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5809 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5810 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5811
5812 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5813 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5814 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5815 {
5816 iMemMap = iemMemMapFindFree(pVCpu);
5817 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5818 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5819 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5820 pVCpu->iem.s.aMemMappings[2].fAccess),
5821 VERR_IEM_IPE_9);
5822 }
5823
5824 /*
5825 * Map the memory, checking that we can actually access it. If something
5826 * slightly complicated happens, fall back on bounce buffering.
5827 */
5828 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5829 if (rcStrict == VINF_SUCCESS)
5830 { /* likely */ }
5831 else
5832 return rcStrict;
5833
5834 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5835 { /* likely */ }
5836 else
5837 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5838
5839#ifdef IEM_WITH_DATA_TLB
5840 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5841
5842 /*
5843 * Get the TLB entry for this page.
5844 */
5845 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5846 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5847 if (pTlbe->uTag == uTag)
5848 {
5849# ifdef VBOX_WITH_STATISTICS
5850 pVCpu->iem.s.DataTlb.cTlbHits++;
5851# endif
5852 }
5853 else
5854 {
5855 pVCpu->iem.s.DataTlb.cTlbMisses++;
5856 PGMPTWALK Walk;
5857 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5858 if (RT_FAILURE(rc))
5859 {
5860 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5861# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5862 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5863 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5864# endif
5865 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5866 }
5867
5868 Assert(Walk.fSucceeded);
5869 pTlbe->uTag = uTag;
5870 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5871 pTlbe->GCPhys = Walk.GCPhys;
5872 pTlbe->pbMappingR3 = NULL;
5873 }
5874
5875 /*
5876 * Check TLB page table level access flags.
5877 */
5878 /* If the page is either supervisor only or non-writable, we need to do
5879 more careful access checks. */
5880 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5881 {
5882 /* Write to read only memory? */
5883 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5884 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5885 && ( ( pVCpu->iem.s.uCpl == 3
5886 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5887 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5888 {
5889 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5890# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5891 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5892 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5893# endif
5894 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5895 }
5896
5897 /* Kernel memory accessed by userland? */
5898 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
5899 && pVCpu->iem.s.uCpl == 3
5900 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5901 {
5902 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5903# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5904 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5905 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5906# endif
5907 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5908 }
5909 }
5910
5911 /*
5912 * Set the dirty / access flags.
5913 * ASSUMES this is set when the address is translated rather than on commit...
5914 */
5915 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5916 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
5917 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
5918 {
5919 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5920 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5921 AssertRC(rc2);
5922 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5923 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5924 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
5925 }
5926
5927 /*
5928 * Look up the physical page info if necessary.
5929 */
5930 uint8_t *pbMem = NULL;
5931 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
5932# ifdef IN_RING3
5933 pbMem = pTlbe->pbMappingR3;
5934# else
5935 pbMem = NULL;
5936# endif
5937 else
5938 {
5939 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
5940 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
5941 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
5942 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
5943 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
5944 { /* likely */ }
5945 else
5946 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
5947 pTlbe->pbMappingR3 = NULL;
5948 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
5949 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
5950 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
5951 &pbMem, &pTlbe->fFlagsAndPhysRev);
5952 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
5953# ifdef IN_RING3
5954 pTlbe->pbMappingR3 = pbMem;
5955# endif
5956 }
5957
5958 /*
5959 * Check the physical page level access and mapping.
5960 */
5961 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
5962 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
5963 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
5964 { /* probably likely */ }
5965 else
5966 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
5967 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
5968 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
5969 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
5970 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
5971 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
5972
5973 if (pbMem)
5974 {
5975 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
5976 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5977 fAccess |= IEM_ACCESS_NOT_LOCKED;
5978 }
5979 else
5980 {
5981 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
5982 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5983 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
5984 if (rcStrict != VINF_SUCCESS)
5985 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5986 }
5987
5988 void * const pvMem = pbMem;
5989
5990 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5991 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5992 if (fAccess & IEM_ACCESS_TYPE_READ)
5993 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5994
5995#else /* !IEM_WITH_DATA_TLB */
5996
5997 RTGCPHYS GCPhysFirst;
5998 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
5999 if (rcStrict != VINF_SUCCESS)
6000 return rcStrict;
6001
6002 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6003 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6004 if (fAccess & IEM_ACCESS_TYPE_READ)
6005 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6006
6007 void *pvMem;
6008 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6009 if (rcStrict != VINF_SUCCESS)
6010 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6011
6012#endif /* !IEM_WITH_DATA_TLB */
6013
6014 /*
6015 * Fill in the mapping table entry.
6016 */
6017 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6018 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6019 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6020 pVCpu->iem.s.cActiveMappings += 1;
6021
6022 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6023 *ppvMem = pvMem;
6024
6025 return VINF_SUCCESS;
6026}
6027
6028
6029/**
6030 * Commits the guest memory if bounce buffered and unmaps it.
6031 *
6032 * @returns Strict VBox status code.
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 * @param pvMem The mapping.
6035 * @param fAccess The kind of access.
6036 */
6037VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6038{
6039 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6040 AssertReturn(iMemMap >= 0, iMemMap);
6041
6042 /* If it's bounce buffered, we may need to write back the buffer. */
6043 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6044 {
6045 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6046 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6047 }
6048 /* Otherwise unlock it. */
6049 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6050 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6051
6052 /* Free the entry. */
6053 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6054 Assert(pVCpu->iem.s.cActiveMappings != 0);
6055 pVCpu->iem.s.cActiveMappings--;
6056 return VINF_SUCCESS;
6057}
6058
6059#ifdef IEM_WITH_SETJMP
6060
6061/**
6062 * Maps the specified guest memory for the given kind of access, longjmp on
6063 * error.
6064 *
6065 * This may be using bounce buffering of the memory if it's crossing a page
6066 * boundary or if there is an access handler installed for any of it. Because
6067 * of lock prefix guarantees, we're in for some extra clutter when this
6068 * happens.
6069 *
6070 * This may raise a \#GP, \#SS, \#PF or \#AC.
6071 *
6072 * @returns Pointer to the mapped memory.
6073 *
6074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6075 * @param cbMem The number of bytes to map. This is usually 1,
6076 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6077 * string operations it can be up to a page.
6078 * @param iSegReg The index of the segment register to use for
6079 * this access. The base and limits are checked.
6080 * Use UINT8_MAX to indicate that no segmentation
6081 * is required (for IDT, GDT and LDT accesses).
6082 * @param GCPtrMem The address of the guest memory.
6083 * @param fAccess How the memory is being accessed. The
6084 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6085 * how to map the memory, while the
6086 * IEM_ACCESS_WHAT_XXX bit is used when raising
6087 * exceptions.
6088 */
6089void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
6090{
6091 /*
6092 * Check the input and figure out which mapping entry to use.
6093 */
6094 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6095 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6096 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6097
6098 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6099 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6100 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6101 {
6102 iMemMap = iemMemMapFindFree(pVCpu);
6103 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6104 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6105 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6106 pVCpu->iem.s.aMemMappings[2].fAccess),
6107 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6108 }
6109
6110 /*
6111 * Map the memory, checking that we can actually access it. If something
6112 * slightly complicated happens, fall back on bounce buffering.
6113 */
6114 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6115 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6116 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6117
6118 /* Crossing a page boundary? */
6119 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6120 { /* No (likely). */ }
6121 else
6122 {
6123 void *pvMem;
6124 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6125 if (rcStrict == VINF_SUCCESS)
6126 return pvMem;
6127 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6128 }
6129
6130#ifdef IEM_WITH_DATA_TLB
6131 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6132
6133 /*
6134 * Get the TLB entry for this page.
6135 */
6136 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6137 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6138 if (pTlbe->uTag == uTag)
6139 {
6140# ifdef VBOX_WITH_STATISTICS
6141 pVCpu->iem.s.DataTlb.cTlbHits++;
6142# endif
6143 }
6144 else
6145 {
6146 pVCpu->iem.s.DataTlb.cTlbMisses++;
6147 PGMPTWALK Walk;
6148 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6149 if (RT_FAILURE(rc))
6150 {
6151 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6152# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6153 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6154 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6155# endif
6156 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6157 }
6158
6159 Assert(Walk.fSucceeded);
6160 pTlbe->uTag = uTag;
6161 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6162 pTlbe->GCPhys = Walk.GCPhys;
6163 pTlbe->pbMappingR3 = NULL;
6164 }
6165
6166 /*
6167 * Check TLB page table level access flags.
6168 */
6169 /* If the page is either supervisor only or non-writable, we need to do
6170 more careful access checks. */
6171 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6172 {
6173 /* Write to read only memory? */
6174 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6175 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6176 && ( ( pVCpu->iem.s.uCpl == 3
6177 && !(fAccess & IEM_ACCESS_WHAT_SYS)) /** @todo check this. Not sure WP applies to all SYS writes... */
6178 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6179 {
6180 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6181# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6182 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6183 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6184# endif
6185 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6186 }
6187
6188 /* Kernel memory accessed by userland? */
6189 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6190 && pVCpu->iem.s.uCpl == 3
6191 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6192 {
6193 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6194# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6195 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6196 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6197# endif
6198 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6199 }
6200 }
6201
6202 /*
6203 * Set the dirty / access flags.
6204 * ASSUMES this is set when the address is translated rather than on commit...
6205 */
6206 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6207 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6208 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6209 {
6210 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6211 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6212 AssertRC(rc2);
6213 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6214 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6215 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6216 }
6217
6218 /*
6219 * Look up the physical page info if necessary.
6220 */
6221 uint8_t *pbMem = NULL;
6222 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6223# ifdef IN_RING3
6224 pbMem = pTlbe->pbMappingR3;
6225# else
6226 pbMem = NULL;
6227# endif
6228 else
6229 {
6230 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6231 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6232 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6233 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6234 pTlbe->pbMappingR3 = NULL;
6235 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6236 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6237 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6238 &pbMem, &pTlbe->fFlagsAndPhysRev);
6239 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6240# ifdef IN_RING3
6241 pTlbe->pbMappingR3 = pbMem;
6242# endif
6243 }
6244
6245 /*
6246 * Check the physical page level access and mapping.
6247 */
6248 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6249 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6250 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6251 { /* probably likely */ }
6252 else
6253 {
6254 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6255 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6256 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6257 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6258 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6259 if (rcStrict == VINF_SUCCESS)
6260 return pbMem;
6261 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6262 }
6263 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6264
6265 if (pbMem)
6266 {
6267 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6268 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6269 fAccess |= IEM_ACCESS_NOT_LOCKED;
6270 }
6271 else
6272 {
6273 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6274 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6275 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6276 if (rcStrict == VINF_SUCCESS)
6277 return pbMem;
6278 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6279 }
6280
6281 void * const pvMem = pbMem;
6282
6283 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6284 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6285 if (fAccess & IEM_ACCESS_TYPE_READ)
6286 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6287
6288#else /* !IEM_WITH_DATA_TLB */
6289
6290
6291 RTGCPHYS GCPhysFirst;
6292 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6293 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6294 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6295
6296 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6297 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6298 if (fAccess & IEM_ACCESS_TYPE_READ)
6299 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6300
6301 void *pvMem;
6302 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6303 if (rcStrict == VINF_SUCCESS)
6304 { /* likely */ }
6305 else
6306 {
6307 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6308 if (rcStrict == VINF_SUCCESS)
6309 return pvMem;
6310 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6311 }
6312
6313#endif /* !IEM_WITH_DATA_TLB */
6314
6315 /*
6316 * Fill in the mapping table entry.
6317 */
6318 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6319 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6320 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6321 pVCpu->iem.s.cActiveMappings++;
6322
6323 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6324 return pvMem;
6325}
6326
6327
6328/**
6329 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6330 *
6331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6332 * @param pvMem The mapping.
6333 * @param fAccess The kind of access.
6334 */
6335void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6336{
6337 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6338 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6339
6340 /* If it's bounce buffered, we may need to write back the buffer. */
6341 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6342 {
6343 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6344 {
6345 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6346 if (rcStrict == VINF_SUCCESS)
6347 return;
6348 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6349 }
6350 }
6351 /* Otherwise unlock it. */
6352 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6353 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6354
6355 /* Free the entry. */
6356 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6357 Assert(pVCpu->iem.s.cActiveMappings != 0);
6358 pVCpu->iem.s.cActiveMappings--;
6359}
6360
6361#endif /* IEM_WITH_SETJMP */
6362
6363#ifndef IN_RING3
6364/**
6365 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6366 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6367 *
6368 * Allows the instruction to be completed and retired, while the IEM user will
6369 * return to ring-3 immediately afterwards and do the postponed writes there.
6370 *
6371 * @returns VBox status code (no strict statuses). Caller must check
6372 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6374 * @param pvMem The mapping.
6375 * @param fAccess The kind of access.
6376 */
6377VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6378{
6379 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6380 AssertReturn(iMemMap >= 0, iMemMap);
6381
6382 /* If it's bounce buffered, we may need to write back the buffer. */
6383 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6384 {
6385 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6386 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6387 }
6388 /* Otherwise unlock it. */
6389 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6390 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6391
6392 /* Free the entry. */
6393 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6394 Assert(pVCpu->iem.s.cActiveMappings != 0);
6395 pVCpu->iem.s.cActiveMappings--;
6396 return VINF_SUCCESS;
6397}
6398#endif
6399
6400
6401/**
6402 * Rollbacks mappings, releasing page locks and such.
6403 *
6404 * The caller shall only call this after checking cActiveMappings.
6405 *
6406 * @returns Strict VBox status code to pass up.
6407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6408 */
6409void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6410{
6411 Assert(pVCpu->iem.s.cActiveMappings > 0);
6412
6413 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6414 while (iMemMap-- > 0)
6415 {
6416 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6417 if (fAccess != IEM_ACCESS_INVALID)
6418 {
6419 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6420 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6421 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6422 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6423 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6424 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6425 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6427 pVCpu->iem.s.cActiveMappings--;
6428 }
6429 }
6430}
6431
6432
6433/**
6434 * Fetches a data byte.
6435 *
6436 * @returns Strict VBox status code.
6437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6438 * @param pu8Dst Where to return the byte.
6439 * @param iSegReg The index of the segment register to use for
6440 * this access. The base and limits are checked.
6441 * @param GCPtrMem The address of the guest memory.
6442 */
6443VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6444{
6445 /* The lazy approach for now... */
6446 uint8_t const *pu8Src;
6447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6448 if (rc == VINF_SUCCESS)
6449 {
6450 *pu8Dst = *pu8Src;
6451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6452 }
6453 return rc;
6454}
6455
6456
6457#ifdef IEM_WITH_SETJMP
6458/**
6459 * Fetches a data byte, longjmp on error.
6460 *
6461 * @returns The byte.
6462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6463 * @param iSegReg The index of the segment register to use for
6464 * this access. The base and limits are checked.
6465 * @param GCPtrMem The address of the guest memory.
6466 */
6467uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6468{
6469 /* The lazy approach for now... */
6470 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6471 uint8_t const bRet = *pu8Src;
6472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6473 return bRet;
6474}
6475#endif /* IEM_WITH_SETJMP */
6476
6477
6478/**
6479 * Fetches a data word.
6480 *
6481 * @returns Strict VBox status code.
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 * @param pu16Dst Where to return the word.
6484 * @param iSegReg The index of the segment register to use for
6485 * this access. The base and limits are checked.
6486 * @param GCPtrMem The address of the guest memory.
6487 */
6488VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6489{
6490 /* The lazy approach for now... */
6491 uint16_t const *pu16Src;
6492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6493 if (rc == VINF_SUCCESS)
6494 {
6495 *pu16Dst = *pu16Src;
6496 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6497 }
6498 return rc;
6499}
6500
6501
6502#ifdef IEM_WITH_SETJMP
6503/**
6504 * Fetches a data word, longjmp on error.
6505 *
6506 * @returns The word
6507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6508 * @param iSegReg The index of the segment register to use for
6509 * this access. The base and limits are checked.
6510 * @param GCPtrMem The address of the guest memory.
6511 */
6512uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6513{
6514 /* The lazy approach for now... */
6515 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6516 uint16_t const u16Ret = *pu16Src;
6517 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6518 return u16Ret;
6519}
6520#endif
6521
6522
6523/**
6524 * Fetches a data dword.
6525 *
6526 * @returns Strict VBox status code.
6527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6528 * @param pu32Dst Where to return the dword.
6529 * @param iSegReg The index of the segment register to use for
6530 * this access. The base and limits are checked.
6531 * @param GCPtrMem The address of the guest memory.
6532 */
6533VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6534{
6535 /* The lazy approach for now... */
6536 uint32_t const *pu32Src;
6537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6538 if (rc == VINF_SUCCESS)
6539 {
6540 *pu32Dst = *pu32Src;
6541 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6542 }
6543 return rc;
6544}
6545
6546
6547/**
6548 * Fetches a data dword and zero extends it to a qword.
6549 *
6550 * @returns Strict VBox status code.
6551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6552 * @param pu64Dst Where to return the qword.
6553 * @param iSegReg The index of the segment register to use for
6554 * this access. The base and limits are checked.
6555 * @param GCPtrMem The address of the guest memory.
6556 */
6557VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6558{
6559 /* The lazy approach for now... */
6560 uint32_t const *pu32Src;
6561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6562 if (rc == VINF_SUCCESS)
6563 {
6564 *pu64Dst = *pu32Src;
6565 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6566 }
6567 return rc;
6568}
6569
6570
6571#ifdef IEM_WITH_SETJMP
6572
6573/**
6574 * Fetches a data dword, longjmp on error, fallback/safe version.
6575 *
6576 * @returns The dword
6577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6578 * @param iSegReg The index of the segment register to use for
6579 * this access. The base and limits are checked.
6580 * @param GCPtrMem The address of the guest memory.
6581 */
6582static uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6583{
6584 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6585 uint32_t const u32Ret = *pu32Src;
6586 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6587 return u32Ret;
6588}
6589
6590
6591/**
6592 * Fetches a data dword, longjmp on error.
6593 *
6594 * @returns The dword
6595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6596 * @param iSegReg The index of the segment register to use for
6597 * this access. The base and limits are checked.
6598 * @param GCPtrMem The address of the guest memory.
6599 */
6600uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6601{
6602# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6603 /*
6604 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6605 */
6606 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6607 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6608 {
6609 /*
6610 * TLB lookup.
6611 */
6612 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6613 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6614 if (pTlbe->uTag == uTag)
6615 {
6616 /*
6617 * Check TLB page table level access flags.
6618 */
6619 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6620 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6621 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6622 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6623 {
6624 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6625
6626 /*
6627 * Alignment check:
6628 */
6629 /** @todo check priority \#AC vs \#PF */
6630 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6631 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6632 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6633 || pVCpu->iem.s.uCpl != 3)
6634 {
6635 /*
6636 * Fetch and return the dword
6637 */
6638 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6639 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6640 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6641 }
6642 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6643 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6644 }
6645 }
6646 }
6647
6648 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6649 outdated page pointer, or other troubles. */
6650 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6651# endif
6652 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6653}
6654#endif
6655
6656
6657#ifdef SOME_UNUSED_FUNCTION
6658/**
6659 * Fetches a data dword and sign extends it to a qword.
6660 *
6661 * @returns Strict VBox status code.
6662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6663 * @param pu64Dst Where to return the sign extended value.
6664 * @param iSegReg The index of the segment register to use for
6665 * this access. The base and limits are checked.
6666 * @param GCPtrMem The address of the guest memory.
6667 */
6668VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6669{
6670 /* The lazy approach for now... */
6671 int32_t const *pi32Src;
6672 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6673 if (rc == VINF_SUCCESS)
6674 {
6675 *pu64Dst = *pi32Src;
6676 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6677 }
6678#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6679 else
6680 *pu64Dst = 0;
6681#endif
6682 return rc;
6683}
6684#endif
6685
6686
6687/**
6688 * Fetches a data qword.
6689 *
6690 * @returns Strict VBox status code.
6691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6692 * @param pu64Dst Where to return the qword.
6693 * @param iSegReg The index of the segment register to use for
6694 * this access. The base and limits are checked.
6695 * @param GCPtrMem The address of the guest memory.
6696 */
6697VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6698{
6699 /* The lazy approach for now... */
6700 uint64_t const *pu64Src;
6701 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6702 if (rc == VINF_SUCCESS)
6703 {
6704 *pu64Dst = *pu64Src;
6705 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6706 }
6707 return rc;
6708}
6709
6710
6711#ifdef IEM_WITH_SETJMP
6712/**
6713 * Fetches a data qword, longjmp on error.
6714 *
6715 * @returns The qword.
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param iSegReg The index of the segment register to use for
6718 * this access. The base and limits are checked.
6719 * @param GCPtrMem The address of the guest memory.
6720 */
6721uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6722{
6723 /* The lazy approach for now... */
6724 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6725 uint64_t const u64Ret = *pu64Src;
6726 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6727 return u64Ret;
6728}
6729#endif
6730
6731
6732/**
6733 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6734 *
6735 * @returns Strict VBox status code.
6736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6737 * @param pu64Dst Where to return the qword.
6738 * @param iSegReg The index of the segment register to use for
6739 * this access. The base and limits are checked.
6740 * @param GCPtrMem The address of the guest memory.
6741 */
6742VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6743{
6744 /* The lazy approach for now... */
6745 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6746 if (RT_UNLIKELY(GCPtrMem & 15))
6747 return iemRaiseGeneralProtectionFault0(pVCpu);
6748
6749 uint64_t const *pu64Src;
6750 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6751 if (rc == VINF_SUCCESS)
6752 {
6753 *pu64Dst = *pu64Src;
6754 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6755 }
6756 return rc;
6757}
6758
6759
6760#ifdef IEM_WITH_SETJMP
6761/**
6762 * Fetches a data qword, longjmp on error.
6763 *
6764 * @returns The qword.
6765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6766 * @param iSegReg The index of the segment register to use for
6767 * this access. The base and limits are checked.
6768 * @param GCPtrMem The address of the guest memory.
6769 */
6770uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6771{
6772 /* The lazy approach for now... */
6773 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6774 if (RT_LIKELY(!(GCPtrMem & 15)))
6775 {
6776 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6777 uint64_t const u64Ret = *pu64Src;
6778 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6779 return u64Ret;
6780 }
6781
6782 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
6783 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
6784}
6785#endif
6786
6787
6788/**
6789 * Fetches a data tword.
6790 *
6791 * @returns Strict VBox status code.
6792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6793 * @param pr80Dst Where to return the tword.
6794 * @param iSegReg The index of the segment register to use for
6795 * this access. The base and limits are checked.
6796 * @param GCPtrMem The address of the guest memory.
6797 */
6798VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6799{
6800 /* The lazy approach for now... */
6801 PCRTFLOAT80U pr80Src;
6802 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6803 if (rc == VINF_SUCCESS)
6804 {
6805 *pr80Dst = *pr80Src;
6806 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6807 }
6808 return rc;
6809}
6810
6811
6812#ifdef IEM_WITH_SETJMP
6813/**
6814 * Fetches a data tword, longjmp on error.
6815 *
6816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6817 * @param pr80Dst Where to return the tword.
6818 * @param iSegReg The index of the segment register to use for
6819 * this access. The base and limits are checked.
6820 * @param GCPtrMem The address of the guest memory.
6821 */
6822void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6823{
6824 /* The lazy approach for now... */
6825 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6826 *pr80Dst = *pr80Src;
6827 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6828}
6829#endif
6830
6831
6832/**
6833 * Fetches a data tword.
6834 *
6835 * @returns Strict VBox status code.
6836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6837 * @param pd80Dst Where to return the tword.
6838 * @param iSegReg The index of the segment register to use for
6839 * this access. The base and limits are checked.
6840 * @param GCPtrMem The address of the guest memory.
6841 */
6842VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6843{
6844 /* The lazy approach for now... */
6845 PCRTPBCD80U pd80Src;
6846 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6847 if (rc == VINF_SUCCESS)
6848 {
6849 *pd80Dst = *pd80Src;
6850 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6851 }
6852 return rc;
6853}
6854
6855
6856#ifdef IEM_WITH_SETJMP
6857/**
6858 * Fetches a data tword, longjmp on error.
6859 *
6860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6861 * @param pd80Dst Where to return the tword.
6862 * @param iSegReg The index of the segment register to use for
6863 * this access. The base and limits are checked.
6864 * @param GCPtrMem The address of the guest memory.
6865 */
6866void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6867{
6868 /* The lazy approach for now... */
6869 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6870 *pd80Dst = *pd80Src;
6871 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6872}
6873#endif
6874
6875
6876/**
6877 * Fetches a data dqword (double qword), generally SSE related.
6878 *
6879 * @returns Strict VBox status code.
6880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6881 * @param pu128Dst Where to return the qword.
6882 * @param iSegReg The index of the segment register to use for
6883 * this access. The base and limits are checked.
6884 * @param GCPtrMem The address of the guest memory.
6885 */
6886VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6887{
6888 /* The lazy approach for now... */
6889 PCRTUINT128U pu128Src;
6890 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6891 if (rc == VINF_SUCCESS)
6892 {
6893 pu128Dst->au64[0] = pu128Src->au64[0];
6894 pu128Dst->au64[1] = pu128Src->au64[1];
6895 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6896 }
6897 return rc;
6898}
6899
6900
6901#ifdef IEM_WITH_SETJMP
6902/**
6903 * Fetches a data dqword (double qword), generally SSE related.
6904 *
6905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6906 * @param pu128Dst Where to return the qword.
6907 * @param iSegReg The index of the segment register to use for
6908 * this access. The base and limits are checked.
6909 * @param GCPtrMem The address of the guest memory.
6910 */
6911void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6912{
6913 /* The lazy approach for now... */
6914 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6915 pu128Dst->au64[0] = pu128Src->au64[0];
6916 pu128Dst->au64[1] = pu128Src->au64[1];
6917 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6918}
6919#endif
6920
6921
6922/**
6923 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6924 * related.
6925 *
6926 * Raises \#GP(0) if not aligned.
6927 *
6928 * @returns Strict VBox status code.
6929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6930 * @param pu128Dst Where to return the qword.
6931 * @param iSegReg The index of the segment register to use for
6932 * this access. The base and limits are checked.
6933 * @param GCPtrMem The address of the guest memory.
6934 */
6935VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6936{
6937 /* The lazy approach for now... */
6938 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6939 if ( (GCPtrMem & 15)
6940 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6941 return iemRaiseGeneralProtectionFault0(pVCpu);
6942
6943 PCRTUINT128U pu128Src;
6944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6945 if (rc == VINF_SUCCESS)
6946 {
6947 pu128Dst->au64[0] = pu128Src->au64[0];
6948 pu128Dst->au64[1] = pu128Src->au64[1];
6949 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6950 }
6951 return rc;
6952}
6953
6954
6955#ifdef IEM_WITH_SETJMP
6956/**
6957 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6958 * related, longjmp on error.
6959 *
6960 * Raises \#GP(0) if not aligned.
6961 *
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 * @param pu128Dst Where to return the qword.
6964 * @param iSegReg The index of the segment register to use for
6965 * this access. The base and limits are checked.
6966 * @param GCPtrMem The address of the guest memory.
6967 */
6968void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6969{
6970 /* The lazy approach for now... */
6971 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6972 if ( (GCPtrMem & 15) == 0
6973 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6974 {
6975 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6976 pu128Dst->au64[0] = pu128Src->au64[0];
6977 pu128Dst->au64[1] = pu128Src->au64[1];
6978 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6979 return;
6980 }
6981
6982 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
6983 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6984}
6985#endif
6986
6987
6988/**
6989 * Fetches a data oword (octo word), generally AVX related.
6990 *
6991 * @returns Strict VBox status code.
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param pu256Dst Where to return the qword.
6994 * @param iSegReg The index of the segment register to use for
6995 * this access. The base and limits are checked.
6996 * @param GCPtrMem The address of the guest memory.
6997 */
6998VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6999{
7000 /* The lazy approach for now... */
7001 PCRTUINT256U pu256Src;
7002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7003 if (rc == VINF_SUCCESS)
7004 {
7005 pu256Dst->au64[0] = pu256Src->au64[0];
7006 pu256Dst->au64[1] = pu256Src->au64[1];
7007 pu256Dst->au64[2] = pu256Src->au64[2];
7008 pu256Dst->au64[3] = pu256Src->au64[3];
7009 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7010 }
7011 return rc;
7012}
7013
7014
7015#ifdef IEM_WITH_SETJMP
7016/**
7017 * Fetches a data oword (octo word), generally AVX related.
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 * @param pu256Dst Where to return the qword.
7021 * @param iSegReg The index of the segment register to use for
7022 * this access. The base and limits are checked.
7023 * @param GCPtrMem The address of the guest memory.
7024 */
7025void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7026{
7027 /* The lazy approach for now... */
7028 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7029 pu256Dst->au64[0] = pu256Src->au64[0];
7030 pu256Dst->au64[1] = pu256Src->au64[1];
7031 pu256Dst->au64[2] = pu256Src->au64[2];
7032 pu256Dst->au64[3] = pu256Src->au64[3];
7033 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7034}
7035#endif
7036
7037
7038/**
7039 * Fetches a data oword (octo word) at an aligned address, generally AVX
7040 * related.
7041 *
7042 * Raises \#GP(0) if not aligned.
7043 *
7044 * @returns Strict VBox status code.
7045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7046 * @param pu256Dst Where to return the qword.
7047 * @param iSegReg The index of the segment register to use for
7048 * this access. The base and limits are checked.
7049 * @param GCPtrMem The address of the guest memory.
7050 */
7051VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7052{
7053 /* The lazy approach for now... */
7054 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7055 if (GCPtrMem & 31)
7056 return iemRaiseGeneralProtectionFault0(pVCpu);
7057
7058 PCRTUINT256U pu256Src;
7059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7060 if (rc == VINF_SUCCESS)
7061 {
7062 pu256Dst->au64[0] = pu256Src->au64[0];
7063 pu256Dst->au64[1] = pu256Src->au64[1];
7064 pu256Dst->au64[2] = pu256Src->au64[2];
7065 pu256Dst->au64[3] = pu256Src->au64[3];
7066 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7067 }
7068 return rc;
7069}
7070
7071
7072#ifdef IEM_WITH_SETJMP
7073/**
7074 * Fetches a data oword (octo word) at an aligned address, generally AVX
7075 * related, longjmp on error.
7076 *
7077 * Raises \#GP(0) if not aligned.
7078 *
7079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7080 * @param pu256Dst Where to return the qword.
7081 * @param iSegReg The index of the segment register to use for
7082 * this access. The base and limits are checked.
7083 * @param GCPtrMem The address of the guest memory.
7084 */
7085void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7086{
7087 /* The lazy approach for now... */
7088 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7089 if ((GCPtrMem & 31) == 0)
7090 {
7091 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7092 pu256Dst->au64[0] = pu256Src->au64[0];
7093 pu256Dst->au64[1] = pu256Src->au64[1];
7094 pu256Dst->au64[2] = pu256Src->au64[2];
7095 pu256Dst->au64[3] = pu256Src->au64[3];
7096 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7097 return;
7098 }
7099
7100 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7101 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7102}
7103#endif
7104
7105
7106
7107/**
7108 * Fetches a descriptor register (lgdt, lidt).
7109 *
7110 * @returns Strict VBox status code.
7111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7112 * @param pcbLimit Where to return the limit.
7113 * @param pGCPtrBase Where to return the base.
7114 * @param iSegReg The index of the segment register to use for
7115 * this access. The base and limits are checked.
7116 * @param GCPtrMem The address of the guest memory.
7117 * @param enmOpSize The effective operand size.
7118 */
7119VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7120 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7121{
7122 /*
7123 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7124 * little special:
7125 * - The two reads are done separately.
7126 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7127 * - We suspect the 386 to actually commit the limit before the base in
7128 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7129 * don't try emulate this eccentric behavior, because it's not well
7130 * enough understood and rather hard to trigger.
7131 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7132 */
7133 VBOXSTRICTRC rcStrict;
7134 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7135 {
7136 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7137 if (rcStrict == VINF_SUCCESS)
7138 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7139 }
7140 else
7141 {
7142 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7143 if (enmOpSize == IEMMODE_32BIT)
7144 {
7145 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7146 {
7147 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7148 if (rcStrict == VINF_SUCCESS)
7149 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7150 }
7151 else
7152 {
7153 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7154 if (rcStrict == VINF_SUCCESS)
7155 {
7156 *pcbLimit = (uint16_t)uTmp;
7157 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7158 }
7159 }
7160 if (rcStrict == VINF_SUCCESS)
7161 *pGCPtrBase = uTmp;
7162 }
7163 else
7164 {
7165 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7166 if (rcStrict == VINF_SUCCESS)
7167 {
7168 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7169 if (rcStrict == VINF_SUCCESS)
7170 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7171 }
7172 }
7173 }
7174 return rcStrict;
7175}
7176
7177
7178
7179/**
7180 * Stores a data byte.
7181 *
7182 * @returns Strict VBox status code.
7183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7184 * @param iSegReg The index of the segment register to use for
7185 * this access. The base and limits are checked.
7186 * @param GCPtrMem The address of the guest memory.
7187 * @param u8Value The value to store.
7188 */
7189VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7190{
7191 /* The lazy approach for now... */
7192 uint8_t *pu8Dst;
7193 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7194 if (rc == VINF_SUCCESS)
7195 {
7196 *pu8Dst = u8Value;
7197 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7198 }
7199 return rc;
7200}
7201
7202
7203#ifdef IEM_WITH_SETJMP
7204/**
7205 * Stores a data byte, longjmp on error.
7206 *
7207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7208 * @param iSegReg The index of the segment register to use for
7209 * this access. The base and limits are checked.
7210 * @param GCPtrMem The address of the guest memory.
7211 * @param u8Value The value to store.
7212 */
7213void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7214{
7215 /* The lazy approach for now... */
7216 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7217 *pu8Dst = u8Value;
7218 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7219}
7220#endif
7221
7222
7223/**
7224 * Stores a data word.
7225 *
7226 * @returns Strict VBox status code.
7227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7228 * @param iSegReg The index of the segment register to use for
7229 * this access. The base and limits are checked.
7230 * @param GCPtrMem The address of the guest memory.
7231 * @param u16Value The value to store.
7232 */
7233VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7234{
7235 /* The lazy approach for now... */
7236 uint16_t *pu16Dst;
7237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7238 if (rc == VINF_SUCCESS)
7239 {
7240 *pu16Dst = u16Value;
7241 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7242 }
7243 return rc;
7244}
7245
7246
7247#ifdef IEM_WITH_SETJMP
7248/**
7249 * Stores a data word, longjmp on error.
7250 *
7251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7252 * @param iSegReg The index of the segment register to use for
7253 * this access. The base and limits are checked.
7254 * @param GCPtrMem The address of the guest memory.
7255 * @param u16Value The value to store.
7256 */
7257void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7258{
7259 /* The lazy approach for now... */
7260 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7261 *pu16Dst = u16Value;
7262 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7263}
7264#endif
7265
7266
7267/**
7268 * Stores a data dword.
7269 *
7270 * @returns Strict VBox status code.
7271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7272 * @param iSegReg The index of the segment register to use for
7273 * this access. The base and limits are checked.
7274 * @param GCPtrMem The address of the guest memory.
7275 * @param u32Value The value to store.
7276 */
7277VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7278{
7279 /* The lazy approach for now... */
7280 uint32_t *pu32Dst;
7281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7282 if (rc == VINF_SUCCESS)
7283 {
7284 *pu32Dst = u32Value;
7285 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7286 }
7287 return rc;
7288}
7289
7290
7291#ifdef IEM_WITH_SETJMP
7292/**
7293 * Stores a data dword.
7294 *
7295 * @returns Strict VBox status code.
7296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7297 * @param iSegReg The index of the segment register to use for
7298 * this access. The base and limits are checked.
7299 * @param GCPtrMem The address of the guest memory.
7300 * @param u32Value The value to store.
7301 */
7302void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7303{
7304 /* The lazy approach for now... */
7305 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7306 *pu32Dst = u32Value;
7307 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7308}
7309#endif
7310
7311
7312/**
7313 * Stores a data qword.
7314 *
7315 * @returns Strict VBox status code.
7316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7317 * @param iSegReg The index of the segment register to use for
7318 * this access. The base and limits are checked.
7319 * @param GCPtrMem The address of the guest memory.
7320 * @param u64Value The value to store.
7321 */
7322VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7323{
7324 /* The lazy approach for now... */
7325 uint64_t *pu64Dst;
7326 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7327 if (rc == VINF_SUCCESS)
7328 {
7329 *pu64Dst = u64Value;
7330 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7331 }
7332 return rc;
7333}
7334
7335
7336#ifdef IEM_WITH_SETJMP
7337/**
7338 * Stores a data qword, longjmp on error.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param iSegReg The index of the segment register to use for
7342 * this access. The base and limits are checked.
7343 * @param GCPtrMem The address of the guest memory.
7344 * @param u64Value The value to store.
7345 */
7346void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7347{
7348 /* The lazy approach for now... */
7349 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7350 *pu64Dst = u64Value;
7351 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7352}
7353#endif
7354
7355
7356/**
7357 * Stores a data dqword.
7358 *
7359 * @returns Strict VBox status code.
7360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7361 * @param iSegReg The index of the segment register to use for
7362 * this access. The base and limits are checked.
7363 * @param GCPtrMem The address of the guest memory.
7364 * @param u128Value The value to store.
7365 */
7366VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7367{
7368 /* The lazy approach for now... */
7369 PRTUINT128U pu128Dst;
7370 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7371 if (rc == VINF_SUCCESS)
7372 {
7373 pu128Dst->au64[0] = u128Value.au64[0];
7374 pu128Dst->au64[1] = u128Value.au64[1];
7375 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7376 }
7377 return rc;
7378}
7379
7380
7381#ifdef IEM_WITH_SETJMP
7382/**
7383 * Stores a data dqword, longjmp on error.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param iSegReg The index of the segment register to use for
7387 * this access. The base and limits are checked.
7388 * @param GCPtrMem The address of the guest memory.
7389 * @param u128Value The value to store.
7390 */
7391void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7392{
7393 /* The lazy approach for now... */
7394 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7395 pu128Dst->au64[0] = u128Value.au64[0];
7396 pu128Dst->au64[1] = u128Value.au64[1];
7397 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7398}
7399#endif
7400
7401
7402/**
7403 * Stores a data dqword, SSE aligned.
7404 *
7405 * @returns Strict VBox status code.
7406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7407 * @param iSegReg The index of the segment register to use for
7408 * this access. The base and limits are checked.
7409 * @param GCPtrMem The address of the guest memory.
7410 * @param u128Value The value to store.
7411 */
7412VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7413{
7414 /* The lazy approach for now... */
7415 if ( (GCPtrMem & 15)
7416 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7417 return iemRaiseGeneralProtectionFault0(pVCpu);
7418
7419 PRTUINT128U pu128Dst;
7420 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7421 if (rc == VINF_SUCCESS)
7422 {
7423 pu128Dst->au64[0] = u128Value.au64[0];
7424 pu128Dst->au64[1] = u128Value.au64[1];
7425 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7426 }
7427 return rc;
7428}
7429
7430
7431#ifdef IEM_WITH_SETJMP
7432/**
7433 * Stores a data dqword, SSE aligned.
7434 *
7435 * @returns Strict VBox status code.
7436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7437 * @param iSegReg The index of the segment register to use for
7438 * this access. The base and limits are checked.
7439 * @param GCPtrMem The address of the guest memory.
7440 * @param u128Value The value to store.
7441 */
7442void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7443{
7444 /* The lazy approach for now... */
7445 if ( (GCPtrMem & 15) == 0
7446 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7447 {
7448 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7449 pu128Dst->au64[0] = u128Value.au64[0];
7450 pu128Dst->au64[1] = u128Value.au64[1];
7451 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7452 return;
7453 }
7454
7455 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7456 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7457}
7458#endif
7459
7460
7461/**
7462 * Stores a data dqword.
7463 *
7464 * @returns Strict VBox status code.
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 * @param iSegReg The index of the segment register to use for
7467 * this access. The base and limits are checked.
7468 * @param GCPtrMem The address of the guest memory.
7469 * @param pu256Value Pointer to the value to store.
7470 */
7471VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7472{
7473 /* The lazy approach for now... */
7474 PRTUINT256U pu256Dst;
7475 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7476 if (rc == VINF_SUCCESS)
7477 {
7478 pu256Dst->au64[0] = pu256Value->au64[0];
7479 pu256Dst->au64[1] = pu256Value->au64[1];
7480 pu256Dst->au64[2] = pu256Value->au64[2];
7481 pu256Dst->au64[3] = pu256Value->au64[3];
7482 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7483 }
7484 return rc;
7485}
7486
7487
7488#ifdef IEM_WITH_SETJMP
7489/**
7490 * Stores a data dqword, longjmp on error.
7491 *
7492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7493 * @param iSegReg The index of the segment register to use for
7494 * this access. The base and limits are checked.
7495 * @param GCPtrMem The address of the guest memory.
7496 * @param pu256Value Pointer to the value to store.
7497 */
7498void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7499{
7500 /* The lazy approach for now... */
7501 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7502 pu256Dst->au64[0] = pu256Value->au64[0];
7503 pu256Dst->au64[1] = pu256Value->au64[1];
7504 pu256Dst->au64[2] = pu256Value->au64[2];
7505 pu256Dst->au64[3] = pu256Value->au64[3];
7506 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7507}
7508#endif
7509
7510
7511/**
7512 * Stores a data dqword, AVX aligned.
7513 *
7514 * @returns Strict VBox status code.
7515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7516 * @param iSegReg The index of the segment register to use for
7517 * this access. The base and limits are checked.
7518 * @param GCPtrMem The address of the guest memory.
7519 * @param pu256Value Pointer to the value to store.
7520 */
7521VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7522{
7523 /* The lazy approach for now... */
7524 if (GCPtrMem & 31)
7525 return iemRaiseGeneralProtectionFault0(pVCpu);
7526
7527 PRTUINT256U pu256Dst;
7528 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7529 if (rc == VINF_SUCCESS)
7530 {
7531 pu256Dst->au64[0] = pu256Value->au64[0];
7532 pu256Dst->au64[1] = pu256Value->au64[1];
7533 pu256Dst->au64[2] = pu256Value->au64[2];
7534 pu256Dst->au64[3] = pu256Value->au64[3];
7535 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7536 }
7537 return rc;
7538}
7539
7540
7541#ifdef IEM_WITH_SETJMP
7542/**
7543 * Stores a data dqword, AVX aligned.
7544 *
7545 * @returns Strict VBox status code.
7546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7547 * @param iSegReg The index of the segment register to use for
7548 * this access. The base and limits are checked.
7549 * @param GCPtrMem The address of the guest memory.
7550 * @param pu256Value Pointer to the value to store.
7551 */
7552void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7553{
7554 /* The lazy approach for now... */
7555 if ((GCPtrMem & 31) == 0)
7556 {
7557 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7558 pu256Dst->au64[0] = pu256Value->au64[0];
7559 pu256Dst->au64[1] = pu256Value->au64[1];
7560 pu256Dst->au64[2] = pu256Value->au64[2];
7561 pu256Dst->au64[3] = pu256Value->au64[3];
7562 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7563 return;
7564 }
7565
7566 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7567 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7568}
7569#endif
7570
7571
7572/**
7573 * Stores a descriptor register (sgdt, sidt).
7574 *
7575 * @returns Strict VBox status code.
7576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7577 * @param cbLimit The limit.
7578 * @param GCPtrBase The base address.
7579 * @param iSegReg The index of the segment register to use for
7580 * this access. The base and limits are checked.
7581 * @param GCPtrMem The address of the guest memory.
7582 */
7583VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7584{
7585 /*
7586 * The SIDT and SGDT instructions actually stores the data using two
7587 * independent writes. The instructions does not respond to opsize prefixes.
7588 */
7589 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7590 if (rcStrict == VINF_SUCCESS)
7591 {
7592 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7593 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7594 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7595 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7596 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7597 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7598 else
7599 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7600 }
7601 return rcStrict;
7602}
7603
7604
7605/**
7606 * Pushes a word onto the stack.
7607 *
7608 * @returns Strict VBox status code.
7609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7610 * @param u16Value The value to push.
7611 */
7612VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7613{
7614 /* Increment the stack pointer. */
7615 uint64_t uNewRsp;
7616 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7617
7618 /* Write the word the lazy way. */
7619 uint16_t *pu16Dst;
7620 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7621 if (rc == VINF_SUCCESS)
7622 {
7623 *pu16Dst = u16Value;
7624 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7625 }
7626
7627 /* Commit the new RSP value unless we an access handler made trouble. */
7628 if (rc == VINF_SUCCESS)
7629 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7630
7631 return rc;
7632}
7633
7634
7635/**
7636 * Pushes a dword onto the stack.
7637 *
7638 * @returns Strict VBox status code.
7639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7640 * @param u32Value The value to push.
7641 */
7642VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7643{
7644 /* Increment the stack pointer. */
7645 uint64_t uNewRsp;
7646 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7647
7648 /* Write the dword the lazy way. */
7649 uint32_t *pu32Dst;
7650 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7651 if (rc == VINF_SUCCESS)
7652 {
7653 *pu32Dst = u32Value;
7654 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7655 }
7656
7657 /* Commit the new RSP value unless we an access handler made trouble. */
7658 if (rc == VINF_SUCCESS)
7659 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7660
7661 return rc;
7662}
7663
7664
7665/**
7666 * Pushes a dword segment register value onto the stack.
7667 *
7668 * @returns Strict VBox status code.
7669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7670 * @param u32Value The value to push.
7671 */
7672VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7673{
7674 /* Increment the stack pointer. */
7675 uint64_t uNewRsp;
7676 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7677
7678 /* The intel docs talks about zero extending the selector register
7679 value. My actual intel CPU here might be zero extending the value
7680 but it still only writes the lower word... */
7681 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7682 * happens when crossing an electric page boundrary, is the high word checked
7683 * for write accessibility or not? Probably it is. What about segment limits?
7684 * It appears this behavior is also shared with trap error codes.
7685 *
7686 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7687 * ancient hardware when it actually did change. */
7688 uint16_t *pu16Dst;
7689 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7690 if (rc == VINF_SUCCESS)
7691 {
7692 *pu16Dst = (uint16_t)u32Value;
7693 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7694 }
7695
7696 /* Commit the new RSP value unless we an access handler made trouble. */
7697 if (rc == VINF_SUCCESS)
7698 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7699
7700 return rc;
7701}
7702
7703
7704/**
7705 * Pushes a qword onto the stack.
7706 *
7707 * @returns Strict VBox status code.
7708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7709 * @param u64Value The value to push.
7710 */
7711VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7712{
7713 /* Increment the stack pointer. */
7714 uint64_t uNewRsp;
7715 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7716
7717 /* Write the word the lazy way. */
7718 uint64_t *pu64Dst;
7719 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7720 if (rc == VINF_SUCCESS)
7721 {
7722 *pu64Dst = u64Value;
7723 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7724 }
7725
7726 /* Commit the new RSP value unless we an access handler made trouble. */
7727 if (rc == VINF_SUCCESS)
7728 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7729
7730 return rc;
7731}
7732
7733
7734/**
7735 * Pops a word from the stack.
7736 *
7737 * @returns Strict VBox status code.
7738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7739 * @param pu16Value Where to store the popped value.
7740 */
7741VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7742{
7743 /* Increment the stack pointer. */
7744 uint64_t uNewRsp;
7745 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7746
7747 /* Write the word the lazy way. */
7748 uint16_t const *pu16Src;
7749 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7750 if (rc == VINF_SUCCESS)
7751 {
7752 *pu16Value = *pu16Src;
7753 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7754
7755 /* Commit the new RSP value. */
7756 if (rc == VINF_SUCCESS)
7757 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7758 }
7759
7760 return rc;
7761}
7762
7763
7764/**
7765 * Pops a dword from the stack.
7766 *
7767 * @returns Strict VBox status code.
7768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7769 * @param pu32Value Where to store the popped value.
7770 */
7771VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7772{
7773 /* Increment the stack pointer. */
7774 uint64_t uNewRsp;
7775 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7776
7777 /* Write the word the lazy way. */
7778 uint32_t const *pu32Src;
7779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7780 if (rc == VINF_SUCCESS)
7781 {
7782 *pu32Value = *pu32Src;
7783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7784
7785 /* Commit the new RSP value. */
7786 if (rc == VINF_SUCCESS)
7787 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7788 }
7789
7790 return rc;
7791}
7792
7793
7794/**
7795 * Pops a qword from the stack.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7799 * @param pu64Value Where to store the popped value.
7800 */
7801VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7802{
7803 /* Increment the stack pointer. */
7804 uint64_t uNewRsp;
7805 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7806
7807 /* Write the word the lazy way. */
7808 uint64_t const *pu64Src;
7809 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7810 if (rc == VINF_SUCCESS)
7811 {
7812 *pu64Value = *pu64Src;
7813 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7814
7815 /* Commit the new RSP value. */
7816 if (rc == VINF_SUCCESS)
7817 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7818 }
7819
7820 return rc;
7821}
7822
7823
7824/**
7825 * Pushes a word onto the stack, using a temporary stack pointer.
7826 *
7827 * @returns Strict VBox status code.
7828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7829 * @param u16Value The value to push.
7830 * @param pTmpRsp Pointer to the temporary stack pointer.
7831 */
7832VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7833{
7834 /* Increment the stack pointer. */
7835 RTUINT64U NewRsp = *pTmpRsp;
7836 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7837
7838 /* Write the word the lazy way. */
7839 uint16_t *pu16Dst;
7840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7841 if (rc == VINF_SUCCESS)
7842 {
7843 *pu16Dst = u16Value;
7844 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7845 }
7846
7847 /* Commit the new RSP value unless we an access handler made trouble. */
7848 if (rc == VINF_SUCCESS)
7849 *pTmpRsp = NewRsp;
7850
7851 return rc;
7852}
7853
7854
7855/**
7856 * Pushes a dword onto the stack, using a temporary stack pointer.
7857 *
7858 * @returns Strict VBox status code.
7859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7860 * @param u32Value The value to push.
7861 * @param pTmpRsp Pointer to the temporary stack pointer.
7862 */
7863VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7864{
7865 /* Increment the stack pointer. */
7866 RTUINT64U NewRsp = *pTmpRsp;
7867 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
7868
7869 /* Write the word the lazy way. */
7870 uint32_t *pu32Dst;
7871 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7872 if (rc == VINF_SUCCESS)
7873 {
7874 *pu32Dst = u32Value;
7875 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7876 }
7877
7878 /* Commit the new RSP value unless we an access handler made trouble. */
7879 if (rc == VINF_SUCCESS)
7880 *pTmpRsp = NewRsp;
7881
7882 return rc;
7883}
7884
7885
7886/**
7887 * Pushes a dword onto the stack, using a temporary stack pointer.
7888 *
7889 * @returns Strict VBox status code.
7890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7891 * @param u64Value The value to push.
7892 * @param pTmpRsp Pointer to the temporary stack pointer.
7893 */
7894VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7895{
7896 /* Increment the stack pointer. */
7897 RTUINT64U NewRsp = *pTmpRsp;
7898 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
7899
7900 /* Write the word the lazy way. */
7901 uint64_t *pu64Dst;
7902 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7903 if (rc == VINF_SUCCESS)
7904 {
7905 *pu64Dst = u64Value;
7906 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7907 }
7908
7909 /* Commit the new RSP value unless we an access handler made trouble. */
7910 if (rc == VINF_SUCCESS)
7911 *pTmpRsp = NewRsp;
7912
7913 return rc;
7914}
7915
7916
7917/**
7918 * Pops a word from the stack, using a temporary stack pointer.
7919 *
7920 * @returns Strict VBox status code.
7921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7922 * @param pu16Value Where to store the popped value.
7923 * @param pTmpRsp Pointer to the temporary stack pointer.
7924 */
7925VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7926{
7927 /* Increment the stack pointer. */
7928 RTUINT64U NewRsp = *pTmpRsp;
7929 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
7930
7931 /* Write the word the lazy way. */
7932 uint16_t const *pu16Src;
7933 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7934 if (rc == VINF_SUCCESS)
7935 {
7936 *pu16Value = *pu16Src;
7937 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7938
7939 /* Commit the new RSP value. */
7940 if (rc == VINF_SUCCESS)
7941 *pTmpRsp = NewRsp;
7942 }
7943
7944 return rc;
7945}
7946
7947
7948/**
7949 * Pops a dword from the stack, using a temporary stack pointer.
7950 *
7951 * @returns Strict VBox status code.
7952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7953 * @param pu32Value Where to store the popped value.
7954 * @param pTmpRsp Pointer to the temporary stack pointer.
7955 */
7956VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7957{
7958 /* Increment the stack pointer. */
7959 RTUINT64U NewRsp = *pTmpRsp;
7960 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
7961
7962 /* Write the word the lazy way. */
7963 uint32_t const *pu32Src;
7964 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7965 if (rc == VINF_SUCCESS)
7966 {
7967 *pu32Value = *pu32Src;
7968 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7969
7970 /* Commit the new RSP value. */
7971 if (rc == VINF_SUCCESS)
7972 *pTmpRsp = NewRsp;
7973 }
7974
7975 return rc;
7976}
7977
7978
7979/**
7980 * Pops a qword from the stack, using a temporary stack pointer.
7981 *
7982 * @returns Strict VBox status code.
7983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7984 * @param pu64Value Where to store the popped value.
7985 * @param pTmpRsp Pointer to the temporary stack pointer.
7986 */
7987VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7988{
7989 /* Increment the stack pointer. */
7990 RTUINT64U NewRsp = *pTmpRsp;
7991 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
7992
7993 /* Write the word the lazy way. */
7994 uint64_t const *pu64Src;
7995 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7996 if (rcStrict == VINF_SUCCESS)
7997 {
7998 *pu64Value = *pu64Src;
7999 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8000
8001 /* Commit the new RSP value. */
8002 if (rcStrict == VINF_SUCCESS)
8003 *pTmpRsp = NewRsp;
8004 }
8005
8006 return rcStrict;
8007}
8008
8009
8010/**
8011 * Begin a special stack push (used by interrupt, exceptions and such).
8012 *
8013 * This will raise \#SS or \#PF if appropriate.
8014 *
8015 * @returns Strict VBox status code.
8016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8017 * @param cbMem The number of bytes to push onto the stack.
8018 * @param ppvMem Where to return the pointer to the stack memory.
8019 * As with the other memory functions this could be
8020 * direct access or bounce buffered access, so
8021 * don't commit register until the commit call
8022 * succeeds.
8023 * @param puNewRsp Where to return the new RSP value. This must be
8024 * passed unchanged to
8025 * iemMemStackPushCommitSpecial().
8026 */
8027VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8028{
8029 Assert(cbMem < UINT8_MAX);
8030 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8031 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8032}
8033
8034
8035/**
8036 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8037 *
8038 * This will update the rSP.
8039 *
8040 * @returns Strict VBox status code.
8041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8042 * @param pvMem The pointer returned by
8043 * iemMemStackPushBeginSpecial().
8044 * @param uNewRsp The new RSP value returned by
8045 * iemMemStackPushBeginSpecial().
8046 */
8047VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8048{
8049 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8050 if (rcStrict == VINF_SUCCESS)
8051 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8052 return rcStrict;
8053}
8054
8055
8056/**
8057 * Begin a special stack pop (used by iret, retf and such).
8058 *
8059 * This will raise \#SS or \#PF if appropriate.
8060 *
8061 * @returns Strict VBox status code.
8062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8063 * @param cbMem The number of bytes to pop from the stack.
8064 * @param ppvMem Where to return the pointer to the stack memory.
8065 * @param puNewRsp Where to return the new RSP value. This must be
8066 * assigned to CPUMCTX::rsp manually some time
8067 * after iemMemStackPopDoneSpecial() has been
8068 * called.
8069 */
8070VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8071{
8072 Assert(cbMem < UINT8_MAX);
8073 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8074 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8075}
8076
8077
8078/**
8079 * Continue a special stack pop (used by iret and retf).
8080 *
8081 * This will raise \#SS or \#PF if appropriate.
8082 *
8083 * @returns Strict VBox status code.
8084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8085 * @param cbMem The number of bytes to pop from the stack.
8086 * @param ppvMem Where to return the pointer to the stack memory.
8087 * @param puNewRsp Where to return the new RSP value. This must be
8088 * assigned to CPUMCTX::rsp manually some time
8089 * after iemMemStackPopDoneSpecial() has been
8090 * called.
8091 */
8092VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8093{
8094 Assert(cbMem < UINT8_MAX);
8095 RTUINT64U NewRsp;
8096 NewRsp.u = *puNewRsp;
8097 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8098 *puNewRsp = NewRsp.u;
8099 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8100}
8101
8102
8103/**
8104 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8105 * iemMemStackPopContinueSpecial).
8106 *
8107 * The caller will manually commit the rSP.
8108 *
8109 * @returns Strict VBox status code.
8110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8111 * @param pvMem The pointer returned by
8112 * iemMemStackPopBeginSpecial() or
8113 * iemMemStackPopContinueSpecial().
8114 */
8115VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8116{
8117 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8118}
8119
8120
8121/**
8122 * Fetches a system table byte.
8123 *
8124 * @returns Strict VBox status code.
8125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8126 * @param pbDst Where to return the byte.
8127 * @param iSegReg The index of the segment register to use for
8128 * this access. The base and limits are checked.
8129 * @param GCPtrMem The address of the guest memory.
8130 */
8131VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8132{
8133 /* The lazy approach for now... */
8134 uint8_t const *pbSrc;
8135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8136 if (rc == VINF_SUCCESS)
8137 {
8138 *pbDst = *pbSrc;
8139 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8140 }
8141 return rc;
8142}
8143
8144
8145/**
8146 * Fetches a system table word.
8147 *
8148 * @returns Strict VBox status code.
8149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8150 * @param pu16Dst Where to return the word.
8151 * @param iSegReg The index of the segment register to use for
8152 * this access. The base and limits are checked.
8153 * @param GCPtrMem The address of the guest memory.
8154 */
8155VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8156{
8157 /* The lazy approach for now... */
8158 uint16_t const *pu16Src;
8159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8160 if (rc == VINF_SUCCESS)
8161 {
8162 *pu16Dst = *pu16Src;
8163 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8164 }
8165 return rc;
8166}
8167
8168
8169/**
8170 * Fetches a system table dword.
8171 *
8172 * @returns Strict VBox status code.
8173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8174 * @param pu32Dst Where to return the dword.
8175 * @param iSegReg The index of the segment register to use for
8176 * this access. The base and limits are checked.
8177 * @param GCPtrMem The address of the guest memory.
8178 */
8179VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8180{
8181 /* The lazy approach for now... */
8182 uint32_t const *pu32Src;
8183 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8184 if (rc == VINF_SUCCESS)
8185 {
8186 *pu32Dst = *pu32Src;
8187 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8188 }
8189 return rc;
8190}
8191
8192
8193/**
8194 * Fetches a system table qword.
8195 *
8196 * @returns Strict VBox status code.
8197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8198 * @param pu64Dst Where to return the qword.
8199 * @param iSegReg The index of the segment register to use for
8200 * this access. The base and limits are checked.
8201 * @param GCPtrMem The address of the guest memory.
8202 */
8203VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8204{
8205 /* The lazy approach for now... */
8206 uint64_t const *pu64Src;
8207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8208 if (rc == VINF_SUCCESS)
8209 {
8210 *pu64Dst = *pu64Src;
8211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8212 }
8213 return rc;
8214}
8215
8216
8217/**
8218 * Fetches a descriptor table entry with caller specified error code.
8219 *
8220 * @returns Strict VBox status code.
8221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8222 * @param pDesc Where to return the descriptor table entry.
8223 * @param uSel The selector which table entry to fetch.
8224 * @param uXcpt The exception to raise on table lookup error.
8225 * @param uErrorCode The error code associated with the exception.
8226 */
8227static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8228 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8229{
8230 AssertPtr(pDesc);
8231 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8232
8233 /** @todo did the 286 require all 8 bytes to be accessible? */
8234 /*
8235 * Get the selector table base and check bounds.
8236 */
8237 RTGCPTR GCPtrBase;
8238 if (uSel & X86_SEL_LDT)
8239 {
8240 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8241 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8242 {
8243 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8244 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8245 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8246 uErrorCode, 0);
8247 }
8248
8249 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8250 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8251 }
8252 else
8253 {
8254 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8255 {
8256 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8257 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8258 uErrorCode, 0);
8259 }
8260 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8261 }
8262
8263 /*
8264 * Read the legacy descriptor and maybe the long mode extensions if
8265 * required.
8266 */
8267 VBOXSTRICTRC rcStrict;
8268 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8269 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8270 else
8271 {
8272 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8273 if (rcStrict == VINF_SUCCESS)
8274 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8275 if (rcStrict == VINF_SUCCESS)
8276 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8277 if (rcStrict == VINF_SUCCESS)
8278 pDesc->Legacy.au16[3] = 0;
8279 else
8280 return rcStrict;
8281 }
8282
8283 if (rcStrict == VINF_SUCCESS)
8284 {
8285 if ( !IEM_IS_LONG_MODE(pVCpu)
8286 || pDesc->Legacy.Gen.u1DescType)
8287 pDesc->Long.au64[1] = 0;
8288 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8289 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8290 else
8291 {
8292 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8293 /** @todo is this the right exception? */
8294 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8295 }
8296 }
8297 return rcStrict;
8298}
8299
8300
8301/**
8302 * Fetches a descriptor table entry.
8303 *
8304 * @returns Strict VBox status code.
8305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8306 * @param pDesc Where to return the descriptor table entry.
8307 * @param uSel The selector which table entry to fetch.
8308 * @param uXcpt The exception to raise on table lookup error.
8309 */
8310VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8311{
8312 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8313}
8314
8315
8316/**
8317 * Marks the selector descriptor as accessed (only non-system descriptors).
8318 *
8319 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8320 * will therefore skip the limit checks.
8321 *
8322 * @returns Strict VBox status code.
8323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8324 * @param uSel The selector.
8325 */
8326VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8327{
8328 /*
8329 * Get the selector table base and calculate the entry address.
8330 */
8331 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8332 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8333 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8334 GCPtr += uSel & X86_SEL_MASK;
8335
8336 /*
8337 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8338 * ugly stuff to avoid this. This will make sure it's an atomic access
8339 * as well more or less remove any question about 8-bit or 32-bit accesss.
8340 */
8341 VBOXSTRICTRC rcStrict;
8342 uint32_t volatile *pu32;
8343 if ((GCPtr & 3) == 0)
8344 {
8345 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8346 GCPtr += 2 + 2;
8347 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8348 if (rcStrict != VINF_SUCCESS)
8349 return rcStrict;
8350 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8351 }
8352 else
8353 {
8354 /* The misaligned GDT/LDT case, map the whole thing. */
8355 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8356 if (rcStrict != VINF_SUCCESS)
8357 return rcStrict;
8358 switch ((uintptr_t)pu32 & 3)
8359 {
8360 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8361 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8362 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8363 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8364 }
8365 }
8366
8367 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8368}
8369
8370/** @} */
8371
8372/** @name Opcode Helpers.
8373 * @{
8374 */
8375
8376/**
8377 * Calculates the effective address of a ModR/M memory operand.
8378 *
8379 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8380 *
8381 * @return Strict VBox status code.
8382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8383 * @param bRm The ModRM byte.
8384 * @param cbImm The size of any immediate following the
8385 * effective address opcode bytes. Important for
8386 * RIP relative addressing.
8387 * @param pGCPtrEff Where to return the effective address.
8388 */
8389VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8390{
8391 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8392# define SET_SS_DEF() \
8393 do \
8394 { \
8395 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8396 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8397 } while (0)
8398
8399 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8400 {
8401/** @todo Check the effective address size crap! */
8402 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8403 {
8404 uint16_t u16EffAddr;
8405
8406 /* Handle the disp16 form with no registers first. */
8407 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8408 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8409 else
8410 {
8411 /* Get the displacment. */
8412 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8413 {
8414 case 0: u16EffAddr = 0; break;
8415 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8416 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8417 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8418 }
8419
8420 /* Add the base and index registers to the disp. */
8421 switch (bRm & X86_MODRM_RM_MASK)
8422 {
8423 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8424 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8425 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8426 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8427 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8428 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8429 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8430 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8431 }
8432 }
8433
8434 *pGCPtrEff = u16EffAddr;
8435 }
8436 else
8437 {
8438 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8439 uint32_t u32EffAddr;
8440
8441 /* Handle the disp32 form with no registers first. */
8442 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8443 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8444 else
8445 {
8446 /* Get the register (or SIB) value. */
8447 switch ((bRm & X86_MODRM_RM_MASK))
8448 {
8449 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8450 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8451 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8452 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8453 case 4: /* SIB */
8454 {
8455 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8456
8457 /* Get the index and scale it. */
8458 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8459 {
8460 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8461 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8462 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8463 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8464 case 4: u32EffAddr = 0; /*none */ break;
8465 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8466 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8467 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8468 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8469 }
8470 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8471
8472 /* add base */
8473 switch (bSib & X86_SIB_BASE_MASK)
8474 {
8475 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8476 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8477 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8478 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8479 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8480 case 5:
8481 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8482 {
8483 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8484 SET_SS_DEF();
8485 }
8486 else
8487 {
8488 uint32_t u32Disp;
8489 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8490 u32EffAddr += u32Disp;
8491 }
8492 break;
8493 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8494 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8495 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8496 }
8497 break;
8498 }
8499 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8500 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8501 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8503 }
8504
8505 /* Get and add the displacement. */
8506 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8507 {
8508 case 0:
8509 break;
8510 case 1:
8511 {
8512 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8513 u32EffAddr += i8Disp;
8514 break;
8515 }
8516 case 2:
8517 {
8518 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8519 u32EffAddr += u32Disp;
8520 break;
8521 }
8522 default:
8523 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8524 }
8525
8526 }
8527 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8528 *pGCPtrEff = u32EffAddr;
8529 else
8530 {
8531 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8532 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8533 }
8534 }
8535 }
8536 else
8537 {
8538 uint64_t u64EffAddr;
8539
8540 /* Handle the rip+disp32 form with no registers first. */
8541 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8542 {
8543 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8544 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8545 }
8546 else
8547 {
8548 /* Get the register (or SIB) value. */
8549 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8550 {
8551 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8552 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8553 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8554 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8555 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8556 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8557 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8558 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8559 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8560 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8561 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8562 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8563 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8564 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8565 /* SIB */
8566 case 4:
8567 case 12:
8568 {
8569 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8570
8571 /* Get the index and scale it. */
8572 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8573 {
8574 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8575 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8576 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8577 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8578 case 4: u64EffAddr = 0; /*none */ break;
8579 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8580 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8581 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8582 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8583 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8584 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8585 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8586 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8587 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8588 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8589 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8591 }
8592 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8593
8594 /* add base */
8595 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8596 {
8597 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8598 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8599 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8600 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8601 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8602 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8603 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8604 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8605 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8606 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8607 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8608 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8609 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8610 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8611 /* complicated encodings */
8612 case 5:
8613 case 13:
8614 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8615 {
8616 if (!pVCpu->iem.s.uRexB)
8617 {
8618 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8619 SET_SS_DEF();
8620 }
8621 else
8622 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8623 }
8624 else
8625 {
8626 uint32_t u32Disp;
8627 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8628 u64EffAddr += (int32_t)u32Disp;
8629 }
8630 break;
8631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8632 }
8633 break;
8634 }
8635 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8636 }
8637
8638 /* Get and add the displacement. */
8639 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8640 {
8641 case 0:
8642 break;
8643 case 1:
8644 {
8645 int8_t i8Disp;
8646 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8647 u64EffAddr += i8Disp;
8648 break;
8649 }
8650 case 2:
8651 {
8652 uint32_t u32Disp;
8653 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8654 u64EffAddr += (int32_t)u32Disp;
8655 break;
8656 }
8657 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8658 }
8659
8660 }
8661
8662 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8663 *pGCPtrEff = u64EffAddr;
8664 else
8665 {
8666 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8667 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8668 }
8669 }
8670
8671 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8672 return VINF_SUCCESS;
8673}
8674
8675
8676/**
8677 * Calculates the effective address of a ModR/M memory operand.
8678 *
8679 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8680 *
8681 * @return Strict VBox status code.
8682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8683 * @param bRm The ModRM byte.
8684 * @param cbImm The size of any immediate following the
8685 * effective address opcode bytes. Important for
8686 * RIP relative addressing.
8687 * @param pGCPtrEff Where to return the effective address.
8688 * @param offRsp RSP displacement.
8689 */
8690VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8691{
8692 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8693# define SET_SS_DEF() \
8694 do \
8695 { \
8696 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8697 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8698 } while (0)
8699
8700 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8701 {
8702/** @todo Check the effective address size crap! */
8703 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8704 {
8705 uint16_t u16EffAddr;
8706
8707 /* Handle the disp16 form with no registers first. */
8708 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8709 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8710 else
8711 {
8712 /* Get the displacment. */
8713 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8714 {
8715 case 0: u16EffAddr = 0; break;
8716 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8717 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8718 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8719 }
8720
8721 /* Add the base and index registers to the disp. */
8722 switch (bRm & X86_MODRM_RM_MASK)
8723 {
8724 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8725 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8726 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8727 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8728 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8729 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8730 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8731 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8732 }
8733 }
8734
8735 *pGCPtrEff = u16EffAddr;
8736 }
8737 else
8738 {
8739 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8740 uint32_t u32EffAddr;
8741
8742 /* Handle the disp32 form with no registers first. */
8743 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8744 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8745 else
8746 {
8747 /* Get the register (or SIB) value. */
8748 switch ((bRm & X86_MODRM_RM_MASK))
8749 {
8750 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8751 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8752 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8753 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8754 case 4: /* SIB */
8755 {
8756 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8757
8758 /* Get the index and scale it. */
8759 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8760 {
8761 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8762 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8763 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8764 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8765 case 4: u32EffAddr = 0; /*none */ break;
8766 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8767 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8768 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8769 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8770 }
8771 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8772
8773 /* add base */
8774 switch (bSib & X86_SIB_BASE_MASK)
8775 {
8776 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8777 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8778 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8779 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8780 case 4:
8781 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8782 SET_SS_DEF();
8783 break;
8784 case 5:
8785 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8786 {
8787 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8788 SET_SS_DEF();
8789 }
8790 else
8791 {
8792 uint32_t u32Disp;
8793 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8794 u32EffAddr += u32Disp;
8795 }
8796 break;
8797 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8798 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8799 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8800 }
8801 break;
8802 }
8803 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8804 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8805 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8806 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8807 }
8808
8809 /* Get and add the displacement. */
8810 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8811 {
8812 case 0:
8813 break;
8814 case 1:
8815 {
8816 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8817 u32EffAddr += i8Disp;
8818 break;
8819 }
8820 case 2:
8821 {
8822 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8823 u32EffAddr += u32Disp;
8824 break;
8825 }
8826 default:
8827 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8828 }
8829
8830 }
8831 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8832 *pGCPtrEff = u32EffAddr;
8833 else
8834 {
8835 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8836 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8837 }
8838 }
8839 }
8840 else
8841 {
8842 uint64_t u64EffAddr;
8843
8844 /* Handle the rip+disp32 form with no registers first. */
8845 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8846 {
8847 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8848 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8849 }
8850 else
8851 {
8852 /* Get the register (or SIB) value. */
8853 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8854 {
8855 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8856 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8857 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8858 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8859 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8860 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8861 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8862 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8863 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8864 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8865 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8866 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8867 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8868 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8869 /* SIB */
8870 case 4:
8871 case 12:
8872 {
8873 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8874
8875 /* Get the index and scale it. */
8876 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8877 {
8878 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8879 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8880 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8881 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8882 case 4: u64EffAddr = 0; /*none */ break;
8883 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8884 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8885 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8886 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8887 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8888 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8889 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8890 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8891 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8892 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8893 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8894 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8895 }
8896 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8897
8898 /* add base */
8899 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8900 {
8901 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8902 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8903 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8904 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8905 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
8906 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8907 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8908 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8909 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8910 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8911 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8912 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8913 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8914 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8915 /* complicated encodings */
8916 case 5:
8917 case 13:
8918 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8919 {
8920 if (!pVCpu->iem.s.uRexB)
8921 {
8922 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8923 SET_SS_DEF();
8924 }
8925 else
8926 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8927 }
8928 else
8929 {
8930 uint32_t u32Disp;
8931 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8932 u64EffAddr += (int32_t)u32Disp;
8933 }
8934 break;
8935 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8936 }
8937 break;
8938 }
8939 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8940 }
8941
8942 /* Get and add the displacement. */
8943 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8944 {
8945 case 0:
8946 break;
8947 case 1:
8948 {
8949 int8_t i8Disp;
8950 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8951 u64EffAddr += i8Disp;
8952 break;
8953 }
8954 case 2:
8955 {
8956 uint32_t u32Disp;
8957 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8958 u64EffAddr += (int32_t)u32Disp;
8959 break;
8960 }
8961 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8962 }
8963
8964 }
8965
8966 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8967 *pGCPtrEff = u64EffAddr;
8968 else
8969 {
8970 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8971 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8972 }
8973 }
8974
8975 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8976 return VINF_SUCCESS;
8977}
8978
8979
8980#ifdef IEM_WITH_SETJMP
8981/**
8982 * Calculates the effective address of a ModR/M memory operand.
8983 *
8984 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8985 *
8986 * May longjmp on internal error.
8987 *
8988 * @return The effective address.
8989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8990 * @param bRm The ModRM byte.
8991 * @param cbImm The size of any immediate following the
8992 * effective address opcode bytes. Important for
8993 * RIP relative addressing.
8994 */
8995RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
8996{
8997 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8998# define SET_SS_DEF() \
8999 do \
9000 { \
9001 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9002 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9003 } while (0)
9004
9005 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9006 {
9007/** @todo Check the effective address size crap! */
9008 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9009 {
9010 uint16_t u16EffAddr;
9011
9012 /* Handle the disp16 form with no registers first. */
9013 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9014 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9015 else
9016 {
9017 /* Get the displacment. */
9018 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9019 {
9020 case 0: u16EffAddr = 0; break;
9021 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9022 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9023 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9024 }
9025
9026 /* Add the base and index registers to the disp. */
9027 switch (bRm & X86_MODRM_RM_MASK)
9028 {
9029 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9030 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9031 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9032 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9033 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9034 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9035 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9036 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9037 }
9038 }
9039
9040 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9041 return u16EffAddr;
9042 }
9043
9044 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9045 uint32_t u32EffAddr;
9046
9047 /* Handle the disp32 form with no registers first. */
9048 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9049 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9050 else
9051 {
9052 /* Get the register (or SIB) value. */
9053 switch ((bRm & X86_MODRM_RM_MASK))
9054 {
9055 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9056 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9057 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9058 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9059 case 4: /* SIB */
9060 {
9061 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9062
9063 /* Get the index and scale it. */
9064 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9065 {
9066 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9067 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9068 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9069 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9070 case 4: u32EffAddr = 0; /*none */ break;
9071 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9072 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9073 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9074 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9075 }
9076 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9077
9078 /* add base */
9079 switch (bSib & X86_SIB_BASE_MASK)
9080 {
9081 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9082 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9083 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9084 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9085 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9086 case 5:
9087 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9088 {
9089 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9090 SET_SS_DEF();
9091 }
9092 else
9093 {
9094 uint32_t u32Disp;
9095 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9096 u32EffAddr += u32Disp;
9097 }
9098 break;
9099 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9100 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9101 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9102 }
9103 break;
9104 }
9105 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9106 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9107 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9108 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9109 }
9110
9111 /* Get and add the displacement. */
9112 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9113 {
9114 case 0:
9115 break;
9116 case 1:
9117 {
9118 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9119 u32EffAddr += i8Disp;
9120 break;
9121 }
9122 case 2:
9123 {
9124 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9125 u32EffAddr += u32Disp;
9126 break;
9127 }
9128 default:
9129 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9130 }
9131 }
9132
9133 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9134 {
9135 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9136 return u32EffAddr;
9137 }
9138 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9139 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9140 return u32EffAddr & UINT16_MAX;
9141 }
9142
9143 uint64_t u64EffAddr;
9144
9145 /* Handle the rip+disp32 form with no registers first. */
9146 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9147 {
9148 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9149 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9150 }
9151 else
9152 {
9153 /* Get the register (or SIB) value. */
9154 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9155 {
9156 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9157 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9158 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9159 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9160 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9161 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9162 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9163 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9164 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9165 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9166 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9167 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9168 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9169 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9170 /* SIB */
9171 case 4:
9172 case 12:
9173 {
9174 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9175
9176 /* Get the index and scale it. */
9177 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9178 {
9179 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9180 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9181 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9182 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9183 case 4: u64EffAddr = 0; /*none */ break;
9184 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9185 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9186 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9187 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9188 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9189 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9190 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9191 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9192 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9193 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9194 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9195 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9196 }
9197 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9198
9199 /* add base */
9200 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9201 {
9202 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9203 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9204 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9205 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9206 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9207 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9208 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9209 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9210 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9211 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9212 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9213 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9214 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9215 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9216 /* complicated encodings */
9217 case 5:
9218 case 13:
9219 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9220 {
9221 if (!pVCpu->iem.s.uRexB)
9222 {
9223 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9224 SET_SS_DEF();
9225 }
9226 else
9227 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9228 }
9229 else
9230 {
9231 uint32_t u32Disp;
9232 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9233 u64EffAddr += (int32_t)u32Disp;
9234 }
9235 break;
9236 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9237 }
9238 break;
9239 }
9240 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9241 }
9242
9243 /* Get and add the displacement. */
9244 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9245 {
9246 case 0:
9247 break;
9248 case 1:
9249 {
9250 int8_t i8Disp;
9251 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9252 u64EffAddr += i8Disp;
9253 break;
9254 }
9255 case 2:
9256 {
9257 uint32_t u32Disp;
9258 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9259 u64EffAddr += (int32_t)u32Disp;
9260 break;
9261 }
9262 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9263 }
9264
9265 }
9266
9267 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9268 {
9269 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9270 return u64EffAddr;
9271 }
9272 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9273 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9274 return u64EffAddr & UINT32_MAX;
9275}
9276#endif /* IEM_WITH_SETJMP */
9277
9278/** @} */
9279
9280
9281#ifdef LOG_ENABLED
9282/**
9283 * Logs the current instruction.
9284 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9285 * @param fSameCtx Set if we have the same context information as the VMM,
9286 * clear if we may have already executed an instruction in
9287 * our debug context. When clear, we assume IEMCPU holds
9288 * valid CPU mode info.
9289 *
9290 * The @a fSameCtx parameter is now misleading and obsolete.
9291 * @param pszFunction The IEM function doing the execution.
9292 */
9293static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9294{
9295# ifdef IN_RING3
9296 if (LogIs2Enabled())
9297 {
9298 char szInstr[256];
9299 uint32_t cbInstr = 0;
9300 if (fSameCtx)
9301 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9302 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9303 szInstr, sizeof(szInstr), &cbInstr);
9304 else
9305 {
9306 uint32_t fFlags = 0;
9307 switch (pVCpu->iem.s.enmCpuMode)
9308 {
9309 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9310 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9311 case IEMMODE_16BIT:
9312 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9313 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9314 else
9315 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9316 break;
9317 }
9318 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9319 szInstr, sizeof(szInstr), &cbInstr);
9320 }
9321
9322 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9323 Log2(("**** %s\n"
9324 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9325 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9326 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9327 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9328 " %s\n"
9329 , pszFunction,
9330 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9331 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9332 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9333 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9334 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9335 szInstr));
9336
9337 if (LogIs3Enabled())
9338 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9339 }
9340 else
9341# endif
9342 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9343 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9344 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9345}
9346#endif /* LOG_ENABLED */
9347
9348
9349#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9350/**
9351 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9352 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9353 *
9354 * @returns Modified rcStrict.
9355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9356 * @param rcStrict The instruction execution status.
9357 */
9358static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9359{
9360 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9361 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9362 {
9363 /* VMX preemption timer takes priority over NMI-window exits. */
9364 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9365 {
9366 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9367 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9368 }
9369 /*
9370 * Check remaining intercepts.
9371 *
9372 * NMI-window and Interrupt-window VM-exits.
9373 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9374 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9375 *
9376 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9377 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9378 */
9379 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9380 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9381 && !TRPMHasTrap(pVCpu))
9382 {
9383 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9384 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9385 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9386 {
9387 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9388 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9389 }
9390 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9391 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9392 {
9393 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9394 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9395 }
9396 }
9397 }
9398 /* TPR-below threshold/APIC write has the highest priority. */
9399 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9400 {
9401 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9402 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9403 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9404 }
9405 /* MTF takes priority over VMX-preemption timer. */
9406 else
9407 {
9408 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9409 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9410 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9411 }
9412 return rcStrict;
9413}
9414#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9415
9416
9417/**
9418 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9419 * IEMExecOneWithPrefetchedByPC.
9420 *
9421 * Similar code is found in IEMExecLots.
9422 *
9423 * @return Strict VBox status code.
9424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9425 * @param fExecuteInhibit If set, execute the instruction following CLI,
9426 * POP SS and MOV SS,GR.
9427 * @param pszFunction The calling function name.
9428 */
9429DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9430{
9431 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9432 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9433 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9434 RT_NOREF_PV(pszFunction);
9435
9436#ifdef IEM_WITH_SETJMP
9437 VBOXSTRICTRC rcStrict;
9438 jmp_buf JmpBuf;
9439 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9440 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9441 if ((rcStrict = setjmp(JmpBuf)) == 0)
9442 {
9443 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9444 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9445 }
9446 else
9447 pVCpu->iem.s.cLongJumps++;
9448 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9449#else
9450 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9451 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9452#endif
9453 if (rcStrict == VINF_SUCCESS)
9454 pVCpu->iem.s.cInstructions++;
9455 if (pVCpu->iem.s.cActiveMappings > 0)
9456 {
9457 Assert(rcStrict != VINF_SUCCESS);
9458 iemMemRollback(pVCpu);
9459 }
9460 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9461 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9462 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9463
9464//#ifdef DEBUG
9465// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9466//#endif
9467
9468#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9469 /*
9470 * Perform any VMX nested-guest instruction boundary actions.
9471 *
9472 * If any of these causes a VM-exit, we must skip executing the next
9473 * instruction (would run into stale page tables). A VM-exit makes sure
9474 * there is no interrupt-inhibition, so that should ensure we don't go
9475 * to try execute the next instruction. Clearing fExecuteInhibit is
9476 * problematic because of the setjmp/longjmp clobbering above.
9477 */
9478 if ( rcStrict == VINF_SUCCESS
9479 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9480 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9481 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9482#endif
9483
9484 /* Execute the next instruction as well if a cli, pop ss or
9485 mov ss, Gr has just completed successfully. */
9486 if ( fExecuteInhibit
9487 && rcStrict == VINF_SUCCESS
9488 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9489 && EMIsInhibitInterruptsActive(pVCpu))
9490 {
9491 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9492 if (rcStrict == VINF_SUCCESS)
9493 {
9494#ifdef LOG_ENABLED
9495 iemLogCurInstr(pVCpu, false, pszFunction);
9496#endif
9497#ifdef IEM_WITH_SETJMP
9498 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9499 if ((rcStrict = setjmp(JmpBuf)) == 0)
9500 {
9501 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9502 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9503 }
9504 else
9505 pVCpu->iem.s.cLongJumps++;
9506 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9507#else
9508 IEM_OPCODE_GET_NEXT_U8(&b);
9509 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9510#endif
9511 if (rcStrict == VINF_SUCCESS)
9512 pVCpu->iem.s.cInstructions++;
9513 if (pVCpu->iem.s.cActiveMappings > 0)
9514 {
9515 Assert(rcStrict != VINF_SUCCESS);
9516 iemMemRollback(pVCpu);
9517 }
9518 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9519 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9520 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9521 }
9522 else if (pVCpu->iem.s.cActiveMappings > 0)
9523 iemMemRollback(pVCpu);
9524 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9525 }
9526
9527 /*
9528 * Return value fiddling, statistics and sanity assertions.
9529 */
9530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9531
9532 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9533 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9534 return rcStrict;
9535}
9536
9537
9538/**
9539 * Execute one instruction.
9540 *
9541 * @return Strict VBox status code.
9542 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9543 */
9544VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9545{
9546 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9547#ifdef LOG_ENABLED
9548 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9549#endif
9550
9551 /*
9552 * Do the decoding and emulation.
9553 */
9554 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9555 if (rcStrict == VINF_SUCCESS)
9556 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9557 else if (pVCpu->iem.s.cActiveMappings > 0)
9558 iemMemRollback(pVCpu);
9559
9560 if (rcStrict != VINF_SUCCESS)
9561 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9562 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9563 return rcStrict;
9564}
9565
9566
9567VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9568{
9569 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9570
9571 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9572 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9573 if (rcStrict == VINF_SUCCESS)
9574 {
9575 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9576 if (pcbWritten)
9577 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9578 }
9579 else if (pVCpu->iem.s.cActiveMappings > 0)
9580 iemMemRollback(pVCpu);
9581
9582 return rcStrict;
9583}
9584
9585
9586VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9587 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9588{
9589 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9590
9591 VBOXSTRICTRC rcStrict;
9592 if ( cbOpcodeBytes
9593 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9594 {
9595 iemInitDecoder(pVCpu, false, false);
9596#ifdef IEM_WITH_CODE_TLB
9597 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9598 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9599 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9600 pVCpu->iem.s.offCurInstrStart = 0;
9601 pVCpu->iem.s.offInstrNextByte = 0;
9602#else
9603 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9604 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9605#endif
9606 rcStrict = VINF_SUCCESS;
9607 }
9608 else
9609 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9610 if (rcStrict == VINF_SUCCESS)
9611 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9612 else if (pVCpu->iem.s.cActiveMappings > 0)
9613 iemMemRollback(pVCpu);
9614
9615 return rcStrict;
9616}
9617
9618
9619VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9620{
9621 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9622
9623 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9624 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9625 if (rcStrict == VINF_SUCCESS)
9626 {
9627 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9628 if (pcbWritten)
9629 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9630 }
9631 else if (pVCpu->iem.s.cActiveMappings > 0)
9632 iemMemRollback(pVCpu);
9633
9634 return rcStrict;
9635}
9636
9637
9638VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9639 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9640{
9641 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9642
9643 VBOXSTRICTRC rcStrict;
9644 if ( cbOpcodeBytes
9645 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9646 {
9647 iemInitDecoder(pVCpu, true, false);
9648#ifdef IEM_WITH_CODE_TLB
9649 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9650 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9651 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9652 pVCpu->iem.s.offCurInstrStart = 0;
9653 pVCpu->iem.s.offInstrNextByte = 0;
9654#else
9655 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9656 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9657#endif
9658 rcStrict = VINF_SUCCESS;
9659 }
9660 else
9661 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9662 if (rcStrict == VINF_SUCCESS)
9663 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9664 else if (pVCpu->iem.s.cActiveMappings > 0)
9665 iemMemRollback(pVCpu);
9666
9667 return rcStrict;
9668}
9669
9670
9671/**
9672 * For debugging DISGetParamSize, may come in handy.
9673 *
9674 * @returns Strict VBox status code.
9675 * @param pVCpu The cross context virtual CPU structure of the
9676 * calling EMT.
9677 * @param pCtxCore The context core structure.
9678 * @param OpcodeBytesPC The PC of the opcode bytes.
9679 * @param pvOpcodeBytes Prefeched opcode bytes.
9680 * @param cbOpcodeBytes Number of prefetched bytes.
9681 * @param pcbWritten Where to return the number of bytes written.
9682 * Optional.
9683 */
9684VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9685 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9686 uint32_t *pcbWritten)
9687{
9688 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9689
9690 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9691 VBOXSTRICTRC rcStrict;
9692 if ( cbOpcodeBytes
9693 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9694 {
9695 iemInitDecoder(pVCpu, true, false);
9696#ifdef IEM_WITH_CODE_TLB
9697 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9698 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9699 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9700 pVCpu->iem.s.offCurInstrStart = 0;
9701 pVCpu->iem.s.offInstrNextByte = 0;
9702#else
9703 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9704 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9705#endif
9706 rcStrict = VINF_SUCCESS;
9707 }
9708 else
9709 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9710 if (rcStrict == VINF_SUCCESS)
9711 {
9712 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9713 if (pcbWritten)
9714 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9715 }
9716 else if (pVCpu->iem.s.cActiveMappings > 0)
9717 iemMemRollback(pVCpu);
9718
9719 return rcStrict;
9720}
9721
9722
9723/**
9724 * For handling split cacheline lock operations when the host has split-lock
9725 * detection enabled.
9726 *
9727 * This will cause the interpreter to disregard the lock prefix and implicit
9728 * locking (xchg).
9729 *
9730 * @returns Strict VBox status code.
9731 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9732 */
9733VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9734{
9735 /*
9736 * Do the decoding and emulation.
9737 */
9738 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9739 if (rcStrict == VINF_SUCCESS)
9740 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9741 else if (pVCpu->iem.s.cActiveMappings > 0)
9742 iemMemRollback(pVCpu);
9743
9744 if (rcStrict != VINF_SUCCESS)
9745 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9746 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9747 return rcStrict;
9748}
9749
9750
9751VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9752{
9753 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9754 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9755
9756 /*
9757 * See if there is an interrupt pending in TRPM, inject it if we can.
9758 */
9759 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9760#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9761 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9762 if (fIntrEnabled)
9763 {
9764 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9765 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9766 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9767 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9768 else
9769 {
9770 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9771 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9772 }
9773 }
9774#else
9775 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9776#endif
9777
9778 /** @todo What if we are injecting an exception and not an interrupt? Is that
9779 * possible here? For now we assert it is indeed only an interrupt. */
9780 if ( fIntrEnabled
9781 && TRPMHasTrap(pVCpu)
9782 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9783 {
9784 uint8_t u8TrapNo;
9785 TRPMEVENT enmType;
9786 uint32_t uErrCode;
9787 RTGCPTR uCr2;
9788 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9789 AssertRC(rc2);
9790 Assert(enmType == TRPM_HARDWARE_INT);
9791 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9792 TRPMResetTrap(pVCpu);
9793#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9794 /* Injecting an event may cause a VM-exit. */
9795 if ( rcStrict != VINF_SUCCESS
9796 && rcStrict != VINF_IEM_RAISED_XCPT)
9797 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9798#else
9799 NOREF(rcStrict);
9800#endif
9801 }
9802
9803 /*
9804 * Initial decoder init w/ prefetch, then setup setjmp.
9805 */
9806 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9807 if (rcStrict == VINF_SUCCESS)
9808 {
9809#ifdef IEM_WITH_SETJMP
9810 jmp_buf JmpBuf;
9811 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9812 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9813 pVCpu->iem.s.cActiveMappings = 0;
9814 if ((rcStrict = setjmp(JmpBuf)) == 0)
9815#endif
9816 {
9817 /*
9818 * The run loop. We limit ourselves to 4096 instructions right now.
9819 */
9820 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9821 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9822 for (;;)
9823 {
9824 /*
9825 * Log the state.
9826 */
9827#ifdef LOG_ENABLED
9828 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9829#endif
9830
9831 /*
9832 * Do the decoding and emulation.
9833 */
9834 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9835 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9836 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9837 {
9838 Assert(pVCpu->iem.s.cActiveMappings == 0);
9839 pVCpu->iem.s.cInstructions++;
9840 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9841 {
9842 uint64_t fCpu = pVCpu->fLocalForcedActions
9843 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9844 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9845 | VMCPU_FF_TLB_FLUSH
9846 | VMCPU_FF_INHIBIT_INTERRUPTS
9847 | VMCPU_FF_BLOCK_NMIS
9848 | VMCPU_FF_UNHALT ));
9849
9850 if (RT_LIKELY( ( !fCpu
9851 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9852 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9853 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9854 {
9855 if (cMaxInstructionsGccStupidity-- > 0)
9856 {
9857 /* Poll timers every now an then according to the caller's specs. */
9858 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9859 || !TMTimerPollBool(pVM, pVCpu))
9860 {
9861 Assert(pVCpu->iem.s.cActiveMappings == 0);
9862 iemReInitDecoder(pVCpu);
9863 continue;
9864 }
9865 }
9866 }
9867 }
9868 Assert(pVCpu->iem.s.cActiveMappings == 0);
9869 }
9870 else if (pVCpu->iem.s.cActiveMappings > 0)
9871 iemMemRollback(pVCpu);
9872 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9873 break;
9874 }
9875 }
9876#ifdef IEM_WITH_SETJMP
9877 else
9878 {
9879 if (pVCpu->iem.s.cActiveMappings > 0)
9880 iemMemRollback(pVCpu);
9881# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9882 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9883# endif
9884 pVCpu->iem.s.cLongJumps++;
9885 }
9886 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9887#endif
9888
9889 /*
9890 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9891 */
9892 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9893 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9894 }
9895 else
9896 {
9897 if (pVCpu->iem.s.cActiveMappings > 0)
9898 iemMemRollback(pVCpu);
9899
9900#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9901 /*
9902 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9903 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9904 */
9905 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9906#endif
9907 }
9908
9909 /*
9910 * Maybe re-enter raw-mode and log.
9911 */
9912 if (rcStrict != VINF_SUCCESS)
9913 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9914 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9915 if (pcInstructions)
9916 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9917 return rcStrict;
9918}
9919
9920
9921/**
9922 * Interface used by EMExecuteExec, does exit statistics and limits.
9923 *
9924 * @returns Strict VBox status code.
9925 * @param pVCpu The cross context virtual CPU structure.
9926 * @param fWillExit To be defined.
9927 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9928 * @param cMaxInstructions Maximum number of instructions to execute.
9929 * @param cMaxInstructionsWithoutExits
9930 * The max number of instructions without exits.
9931 * @param pStats Where to return statistics.
9932 */
9933VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9934 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9935{
9936 NOREF(fWillExit); /** @todo define flexible exit crits */
9937
9938 /*
9939 * Initialize return stats.
9940 */
9941 pStats->cInstructions = 0;
9942 pStats->cExits = 0;
9943 pStats->cMaxExitDistance = 0;
9944 pStats->cReserved = 0;
9945
9946 /*
9947 * Initial decoder init w/ prefetch, then setup setjmp.
9948 */
9949 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9950 if (rcStrict == VINF_SUCCESS)
9951 {
9952#ifdef IEM_WITH_SETJMP
9953 jmp_buf JmpBuf;
9954 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9955 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9956 pVCpu->iem.s.cActiveMappings = 0;
9957 if ((rcStrict = setjmp(JmpBuf)) == 0)
9958#endif
9959 {
9960#ifdef IN_RING0
9961 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9962#endif
9963 uint32_t cInstructionSinceLastExit = 0;
9964
9965 /*
9966 * The run loop. We limit ourselves to 4096 instructions right now.
9967 */
9968 PVM pVM = pVCpu->CTX_SUFF(pVM);
9969 for (;;)
9970 {
9971 /*
9972 * Log the state.
9973 */
9974#ifdef LOG_ENABLED
9975 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9976#endif
9977
9978 /*
9979 * Do the decoding and emulation.
9980 */
9981 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9982
9983 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9984 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9985
9986 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9987 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9988 {
9989 pStats->cExits += 1;
9990 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9991 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9992 cInstructionSinceLastExit = 0;
9993 }
9994
9995 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9996 {
9997 Assert(pVCpu->iem.s.cActiveMappings == 0);
9998 pVCpu->iem.s.cInstructions++;
9999 pStats->cInstructions++;
10000 cInstructionSinceLastExit++;
10001 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10002 {
10003 uint64_t fCpu = pVCpu->fLocalForcedActions
10004 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10005 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10006 | VMCPU_FF_TLB_FLUSH
10007 | VMCPU_FF_INHIBIT_INTERRUPTS
10008 | VMCPU_FF_BLOCK_NMIS
10009 | VMCPU_FF_UNHALT ));
10010
10011 if (RT_LIKELY( ( ( !fCpu
10012 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10013 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10014 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10015 || pStats->cInstructions < cMinInstructions))
10016 {
10017 if (pStats->cInstructions < cMaxInstructions)
10018 {
10019 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10020 {
10021#ifdef IN_RING0
10022 if ( !fCheckPreemptionPending
10023 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10024#endif
10025 {
10026 Assert(pVCpu->iem.s.cActiveMappings == 0);
10027 iemReInitDecoder(pVCpu);
10028 continue;
10029 }
10030#ifdef IN_RING0
10031 rcStrict = VINF_EM_RAW_INTERRUPT;
10032 break;
10033#endif
10034 }
10035 }
10036 }
10037 Assert(!(fCpu & VMCPU_FF_IEM));
10038 }
10039 Assert(pVCpu->iem.s.cActiveMappings == 0);
10040 }
10041 else if (pVCpu->iem.s.cActiveMappings > 0)
10042 iemMemRollback(pVCpu);
10043 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10044 break;
10045 }
10046 }
10047#ifdef IEM_WITH_SETJMP
10048 else
10049 {
10050 if (pVCpu->iem.s.cActiveMappings > 0)
10051 iemMemRollback(pVCpu);
10052 pVCpu->iem.s.cLongJumps++;
10053 }
10054 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10055#endif
10056
10057 /*
10058 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10059 */
10060 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10061 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10062 }
10063 else
10064 {
10065 if (pVCpu->iem.s.cActiveMappings > 0)
10066 iemMemRollback(pVCpu);
10067
10068#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10069 /*
10070 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10071 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10072 */
10073 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10074#endif
10075 }
10076
10077 /*
10078 * Maybe re-enter raw-mode and log.
10079 */
10080 if (rcStrict != VINF_SUCCESS)
10081 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10082 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10083 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10084 return rcStrict;
10085}
10086
10087
10088/**
10089 * Injects a trap, fault, abort, software interrupt or external interrupt.
10090 *
10091 * The parameter list matches TRPMQueryTrapAll pretty closely.
10092 *
10093 * @returns Strict VBox status code.
10094 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10095 * @param u8TrapNo The trap number.
10096 * @param enmType What type is it (trap/fault/abort), software
10097 * interrupt or hardware interrupt.
10098 * @param uErrCode The error code if applicable.
10099 * @param uCr2 The CR2 value if applicable.
10100 * @param cbInstr The instruction length (only relevant for
10101 * software interrupts).
10102 */
10103VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10104 uint8_t cbInstr)
10105{
10106 iemInitDecoder(pVCpu, false, false);
10107#ifdef DBGFTRACE_ENABLED
10108 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10109 u8TrapNo, enmType, uErrCode, uCr2);
10110#endif
10111
10112 uint32_t fFlags;
10113 switch (enmType)
10114 {
10115 case TRPM_HARDWARE_INT:
10116 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10117 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10118 uErrCode = uCr2 = 0;
10119 break;
10120
10121 case TRPM_SOFTWARE_INT:
10122 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10123 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10124 uErrCode = uCr2 = 0;
10125 break;
10126
10127 case TRPM_TRAP:
10128 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10129 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10130 if (u8TrapNo == X86_XCPT_PF)
10131 fFlags |= IEM_XCPT_FLAGS_CR2;
10132 switch (u8TrapNo)
10133 {
10134 case X86_XCPT_DF:
10135 case X86_XCPT_TS:
10136 case X86_XCPT_NP:
10137 case X86_XCPT_SS:
10138 case X86_XCPT_PF:
10139 case X86_XCPT_AC:
10140 case X86_XCPT_GP:
10141 fFlags |= IEM_XCPT_FLAGS_ERR;
10142 break;
10143 }
10144 break;
10145
10146 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10147 }
10148
10149 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10150
10151 if (pVCpu->iem.s.cActiveMappings > 0)
10152 iemMemRollback(pVCpu);
10153
10154 return rcStrict;
10155}
10156
10157
10158/**
10159 * Injects the active TRPM event.
10160 *
10161 * @returns Strict VBox status code.
10162 * @param pVCpu The cross context virtual CPU structure.
10163 */
10164VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10165{
10166#ifndef IEM_IMPLEMENTS_TASKSWITCH
10167 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10168#else
10169 uint8_t u8TrapNo;
10170 TRPMEVENT enmType;
10171 uint32_t uErrCode;
10172 RTGCUINTPTR uCr2;
10173 uint8_t cbInstr;
10174 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10175 if (RT_FAILURE(rc))
10176 return rc;
10177
10178 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10179 * ICEBP \#DB injection as a special case. */
10180 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10181#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10182 if (rcStrict == VINF_SVM_VMEXIT)
10183 rcStrict = VINF_SUCCESS;
10184#endif
10185#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10186 if (rcStrict == VINF_VMX_VMEXIT)
10187 rcStrict = VINF_SUCCESS;
10188#endif
10189 /** @todo Are there any other codes that imply the event was successfully
10190 * delivered to the guest? See @bugref{6607}. */
10191 if ( rcStrict == VINF_SUCCESS
10192 || rcStrict == VINF_IEM_RAISED_XCPT)
10193 TRPMResetTrap(pVCpu);
10194
10195 return rcStrict;
10196#endif
10197}
10198
10199
10200VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10201{
10202 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10203 return VERR_NOT_IMPLEMENTED;
10204}
10205
10206
10207VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10208{
10209 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10210 return VERR_NOT_IMPLEMENTED;
10211}
10212
10213
10214#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10215/**
10216 * Executes a IRET instruction with default operand size.
10217 *
10218 * This is for PATM.
10219 *
10220 * @returns VBox status code.
10221 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10222 * @param pCtxCore The register frame.
10223 */
10224VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10225{
10226 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10227
10228 iemCtxCoreToCtx(pCtx, pCtxCore);
10229 iemInitDecoder(pVCpu);
10230 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10231 if (rcStrict == VINF_SUCCESS)
10232 iemCtxToCtxCore(pCtxCore, pCtx);
10233 else
10234 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10235 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10236 return rcStrict;
10237}
10238#endif
10239
10240
10241/**
10242 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10243 *
10244 * This API ASSUMES that the caller has already verified that the guest code is
10245 * allowed to access the I/O port. (The I/O port is in the DX register in the
10246 * guest state.)
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure.
10250 * @param cbValue The size of the I/O port access (1, 2, or 4).
10251 * @param enmAddrMode The addressing mode.
10252 * @param fRepPrefix Indicates whether a repeat prefix is used
10253 * (doesn't matter which for this instruction).
10254 * @param cbInstr The instruction length in bytes.
10255 * @param iEffSeg The effective segment address.
10256 * @param fIoChecked Whether the access to the I/O port has been
10257 * checked or not. It's typically checked in the
10258 * HM scenario.
10259 */
10260VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10261 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10262{
10263 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10264 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10265
10266 /*
10267 * State init.
10268 */
10269 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10270
10271 /*
10272 * Switch orgy for getting to the right handler.
10273 */
10274 VBOXSTRICTRC rcStrict;
10275 if (fRepPrefix)
10276 {
10277 switch (enmAddrMode)
10278 {
10279 case IEMMODE_16BIT:
10280 switch (cbValue)
10281 {
10282 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10283 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10284 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10285 default:
10286 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10287 }
10288 break;
10289
10290 case IEMMODE_32BIT:
10291 switch (cbValue)
10292 {
10293 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10294 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10295 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10296 default:
10297 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10298 }
10299 break;
10300
10301 case IEMMODE_64BIT:
10302 switch (cbValue)
10303 {
10304 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10305 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10306 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10307 default:
10308 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10309 }
10310 break;
10311
10312 default:
10313 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10314 }
10315 }
10316 else
10317 {
10318 switch (enmAddrMode)
10319 {
10320 case IEMMODE_16BIT:
10321 switch (cbValue)
10322 {
10323 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10324 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10325 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10326 default:
10327 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10328 }
10329 break;
10330
10331 case IEMMODE_32BIT:
10332 switch (cbValue)
10333 {
10334 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10335 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10336 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10337 default:
10338 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10339 }
10340 break;
10341
10342 case IEMMODE_64BIT:
10343 switch (cbValue)
10344 {
10345 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10346 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10347 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10348 default:
10349 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10350 }
10351 break;
10352
10353 default:
10354 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10355 }
10356 }
10357
10358 if (pVCpu->iem.s.cActiveMappings)
10359 iemMemRollback(pVCpu);
10360
10361 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10362}
10363
10364
10365/**
10366 * Interface for HM and EM for executing string I/O IN (read) instructions.
10367 *
10368 * This API ASSUMES that the caller has already verified that the guest code is
10369 * allowed to access the I/O port. (The I/O port is in the DX register in the
10370 * guest state.)
10371 *
10372 * @returns Strict VBox status code.
10373 * @param pVCpu The cross context virtual CPU structure.
10374 * @param cbValue The size of the I/O port access (1, 2, or 4).
10375 * @param enmAddrMode The addressing mode.
10376 * @param fRepPrefix Indicates whether a repeat prefix is used
10377 * (doesn't matter which for this instruction).
10378 * @param cbInstr The instruction length in bytes.
10379 * @param fIoChecked Whether the access to the I/O port has been
10380 * checked or not. It's typically checked in the
10381 * HM scenario.
10382 */
10383VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10384 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10385{
10386 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10387
10388 /*
10389 * State init.
10390 */
10391 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10392
10393 /*
10394 * Switch orgy for getting to the right handler.
10395 */
10396 VBOXSTRICTRC rcStrict;
10397 if (fRepPrefix)
10398 {
10399 switch (enmAddrMode)
10400 {
10401 case IEMMODE_16BIT:
10402 switch (cbValue)
10403 {
10404 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10405 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10406 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10407 default:
10408 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10409 }
10410 break;
10411
10412 case IEMMODE_32BIT:
10413 switch (cbValue)
10414 {
10415 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10416 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10417 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10418 default:
10419 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10420 }
10421 break;
10422
10423 case IEMMODE_64BIT:
10424 switch (cbValue)
10425 {
10426 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10427 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10428 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10429 default:
10430 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10431 }
10432 break;
10433
10434 default:
10435 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10436 }
10437 }
10438 else
10439 {
10440 switch (enmAddrMode)
10441 {
10442 case IEMMODE_16BIT:
10443 switch (cbValue)
10444 {
10445 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10446 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10447 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10448 default:
10449 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10450 }
10451 break;
10452
10453 case IEMMODE_32BIT:
10454 switch (cbValue)
10455 {
10456 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10457 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10458 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10459 default:
10460 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10461 }
10462 break;
10463
10464 case IEMMODE_64BIT:
10465 switch (cbValue)
10466 {
10467 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10468 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10469 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10470 default:
10471 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10472 }
10473 break;
10474
10475 default:
10476 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10477 }
10478 }
10479
10480 if ( pVCpu->iem.s.cActiveMappings == 0
10481 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10482 { /* likely */ }
10483 else
10484 {
10485 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10486 iemMemRollback(pVCpu);
10487 }
10488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10489}
10490
10491
10492/**
10493 * Interface for rawmode to write execute an OUT instruction.
10494 *
10495 * @returns Strict VBox status code.
10496 * @param pVCpu The cross context virtual CPU structure.
10497 * @param cbInstr The instruction length in bytes.
10498 * @param u16Port The port to read.
10499 * @param fImm Whether the port is specified using an immediate operand or
10500 * using the implicit DX register.
10501 * @param cbReg The register size.
10502 *
10503 * @remarks In ring-0 not all of the state needs to be synced in.
10504 */
10505VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10506{
10507 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10508 Assert(cbReg <= 4 && cbReg != 3);
10509
10510 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10511 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10512 Assert(!pVCpu->iem.s.cActiveMappings);
10513 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10514}
10515
10516
10517/**
10518 * Interface for rawmode to write execute an IN instruction.
10519 *
10520 * @returns Strict VBox status code.
10521 * @param pVCpu The cross context virtual CPU structure.
10522 * @param cbInstr The instruction length in bytes.
10523 * @param u16Port The port to read.
10524 * @param fImm Whether the port is specified using an immediate operand or
10525 * using the implicit DX.
10526 * @param cbReg The register size.
10527 */
10528VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10529{
10530 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10531 Assert(cbReg <= 4 && cbReg != 3);
10532
10533 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10534 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10535 Assert(!pVCpu->iem.s.cActiveMappings);
10536 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10537}
10538
10539
10540/**
10541 * Interface for HM and EM to write to a CRx register.
10542 *
10543 * @returns Strict VBox status code.
10544 * @param pVCpu The cross context virtual CPU structure.
10545 * @param cbInstr The instruction length in bytes.
10546 * @param iCrReg The control register number (destination).
10547 * @param iGReg The general purpose register number (source).
10548 *
10549 * @remarks In ring-0 not all of the state needs to be synced in.
10550 */
10551VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10552{
10553 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10554 Assert(iCrReg < 16);
10555 Assert(iGReg < 16);
10556
10557 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10558 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10559 Assert(!pVCpu->iem.s.cActiveMappings);
10560 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10561}
10562
10563
10564/**
10565 * Interface for HM and EM to read from a CRx register.
10566 *
10567 * @returns Strict VBox status code.
10568 * @param pVCpu The cross context virtual CPU structure.
10569 * @param cbInstr The instruction length in bytes.
10570 * @param iGReg The general purpose register number (destination).
10571 * @param iCrReg The control register number (source).
10572 *
10573 * @remarks In ring-0 not all of the state needs to be synced in.
10574 */
10575VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10576{
10577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10578 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10579 | CPUMCTX_EXTRN_APIC_TPR);
10580 Assert(iCrReg < 16);
10581 Assert(iGReg < 16);
10582
10583 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10584 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10585 Assert(!pVCpu->iem.s.cActiveMappings);
10586 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10587}
10588
10589
10590/**
10591 * Interface for HM and EM to clear the CR0[TS] bit.
10592 *
10593 * @returns Strict VBox status code.
10594 * @param pVCpu The cross context virtual CPU structure.
10595 * @param cbInstr The instruction length in bytes.
10596 *
10597 * @remarks In ring-0 not all of the state needs to be synced in.
10598 */
10599VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10600{
10601 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10602
10603 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10604 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10605 Assert(!pVCpu->iem.s.cActiveMappings);
10606 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10607}
10608
10609
10610/**
10611 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10612 *
10613 * @returns Strict VBox status code.
10614 * @param pVCpu The cross context virtual CPU structure.
10615 * @param cbInstr The instruction length in bytes.
10616 * @param uValue The value to load into CR0.
10617 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10618 * memory operand. Otherwise pass NIL_RTGCPTR.
10619 *
10620 * @remarks In ring-0 not all of the state needs to be synced in.
10621 */
10622VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10623{
10624 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10625
10626 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10627 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10628 Assert(!pVCpu->iem.s.cActiveMappings);
10629 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10630}
10631
10632
10633/**
10634 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10635 *
10636 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10637 *
10638 * @returns Strict VBox status code.
10639 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10640 * @param cbInstr The instruction length in bytes.
10641 * @remarks In ring-0 not all of the state needs to be synced in.
10642 * @thread EMT(pVCpu)
10643 */
10644VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10645{
10646 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10647
10648 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10649 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10650 Assert(!pVCpu->iem.s.cActiveMappings);
10651 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10652}
10653
10654
10655/**
10656 * Interface for HM and EM to emulate the WBINVD instruction.
10657 *
10658 * @returns Strict VBox status code.
10659 * @param pVCpu The cross context virtual CPU structure.
10660 * @param cbInstr The instruction length in bytes.
10661 *
10662 * @remarks In ring-0 not all of the state needs to be synced in.
10663 */
10664VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10665{
10666 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10667
10668 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10669 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10670 Assert(!pVCpu->iem.s.cActiveMappings);
10671 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10672}
10673
10674
10675/**
10676 * Interface for HM and EM to emulate the INVD instruction.
10677 *
10678 * @returns Strict VBox status code.
10679 * @param pVCpu The cross context virtual CPU structure.
10680 * @param cbInstr The instruction length in bytes.
10681 *
10682 * @remarks In ring-0 not all of the state needs to be synced in.
10683 */
10684VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10685{
10686 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10687
10688 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10689 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10690 Assert(!pVCpu->iem.s.cActiveMappings);
10691 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10692}
10693
10694
10695/**
10696 * Interface for HM and EM to emulate the INVLPG instruction.
10697 *
10698 * @returns Strict VBox status code.
10699 * @retval VINF_PGM_SYNC_CR3
10700 *
10701 * @param pVCpu The cross context virtual CPU structure.
10702 * @param cbInstr The instruction length in bytes.
10703 * @param GCPtrPage The effective address of the page to invalidate.
10704 *
10705 * @remarks In ring-0 not all of the state needs to be synced in.
10706 */
10707VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10708{
10709 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10710
10711 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10712 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10713 Assert(!pVCpu->iem.s.cActiveMappings);
10714 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10715}
10716
10717
10718/**
10719 * Interface for HM and EM to emulate the INVPCID instruction.
10720 *
10721 * @returns Strict VBox status code.
10722 * @retval VINF_PGM_SYNC_CR3
10723 *
10724 * @param pVCpu The cross context virtual CPU structure.
10725 * @param cbInstr The instruction length in bytes.
10726 * @param iEffSeg The effective segment register.
10727 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10728 * @param uType The invalidation type.
10729 *
10730 * @remarks In ring-0 not all of the state needs to be synced in.
10731 */
10732VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10733 uint64_t uType)
10734{
10735 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10736
10737 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10738 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10739 Assert(!pVCpu->iem.s.cActiveMappings);
10740 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10741}
10742
10743
10744/**
10745 * Interface for HM and EM to emulate the CPUID instruction.
10746 *
10747 * @returns Strict VBox status code.
10748 *
10749 * @param pVCpu The cross context virtual CPU structure.
10750 * @param cbInstr The instruction length in bytes.
10751 *
10752 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10753 */
10754VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10755{
10756 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10757 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10758
10759 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10760 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10761 Assert(!pVCpu->iem.s.cActiveMappings);
10762 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10763}
10764
10765
10766/**
10767 * Interface for HM and EM to emulate the RDPMC instruction.
10768 *
10769 * @returns Strict VBox status code.
10770 *
10771 * @param pVCpu The cross context virtual CPU structure.
10772 * @param cbInstr The instruction length in bytes.
10773 *
10774 * @remarks Not all of the state needs to be synced in.
10775 */
10776VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10777{
10778 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10779 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10780
10781 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10782 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10783 Assert(!pVCpu->iem.s.cActiveMappings);
10784 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10785}
10786
10787
10788/**
10789 * Interface for HM and EM to emulate the RDTSC instruction.
10790 *
10791 * @returns Strict VBox status code.
10792 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10793 *
10794 * @param pVCpu The cross context virtual CPU structure.
10795 * @param cbInstr The instruction length in bytes.
10796 *
10797 * @remarks Not all of the state needs to be synced in.
10798 */
10799VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10800{
10801 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10802 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10803
10804 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10805 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10806 Assert(!pVCpu->iem.s.cActiveMappings);
10807 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10808}
10809
10810
10811/**
10812 * Interface for HM and EM to emulate the RDTSCP instruction.
10813 *
10814 * @returns Strict VBox status code.
10815 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10816 *
10817 * @param pVCpu The cross context virtual CPU structure.
10818 * @param cbInstr The instruction length in bytes.
10819 *
10820 * @remarks Not all of the state needs to be synced in. Recommended
10821 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10822 */
10823VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10824{
10825 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10826 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10827
10828 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10829 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10830 Assert(!pVCpu->iem.s.cActiveMappings);
10831 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10832}
10833
10834
10835/**
10836 * Interface for HM and EM to emulate the RDMSR instruction.
10837 *
10838 * @returns Strict VBox status code.
10839 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10840 *
10841 * @param pVCpu The cross context virtual CPU structure.
10842 * @param cbInstr The instruction length in bytes.
10843 *
10844 * @remarks Not all of the state needs to be synced in. Requires RCX and
10845 * (currently) all MSRs.
10846 */
10847VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10848{
10849 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10850 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10851
10852 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10853 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10854 Assert(!pVCpu->iem.s.cActiveMappings);
10855 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10856}
10857
10858
10859/**
10860 * Interface for HM and EM to emulate the WRMSR instruction.
10861 *
10862 * @returns Strict VBox status code.
10863 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10864 *
10865 * @param pVCpu The cross context virtual CPU structure.
10866 * @param cbInstr The instruction length in bytes.
10867 *
10868 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10869 * and (currently) all MSRs.
10870 */
10871VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10872{
10873 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10874 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10875 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10876
10877 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10878 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10879 Assert(!pVCpu->iem.s.cActiveMappings);
10880 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10881}
10882
10883
10884/**
10885 * Interface for HM and EM to emulate the MONITOR instruction.
10886 *
10887 * @returns Strict VBox status code.
10888 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10889 *
10890 * @param pVCpu The cross context virtual CPU structure.
10891 * @param cbInstr The instruction length in bytes.
10892 *
10893 * @remarks Not all of the state needs to be synced in.
10894 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10895 * are used.
10896 */
10897VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10898{
10899 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10900 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10901
10902 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10903 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10904 Assert(!pVCpu->iem.s.cActiveMappings);
10905 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10906}
10907
10908
10909/**
10910 * Interface for HM and EM to emulate the MWAIT instruction.
10911 *
10912 * @returns Strict VBox status code.
10913 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10914 *
10915 * @param pVCpu The cross context virtual CPU structure.
10916 * @param cbInstr The instruction length in bytes.
10917 *
10918 * @remarks Not all of the state needs to be synced in.
10919 */
10920VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10921{
10922 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10923 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10924
10925 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10926 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10927 Assert(!pVCpu->iem.s.cActiveMappings);
10928 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10929}
10930
10931
10932/**
10933 * Interface for HM and EM to emulate the HLT instruction.
10934 *
10935 * @returns Strict VBox status code.
10936 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10937 *
10938 * @param pVCpu The cross context virtual CPU structure.
10939 * @param cbInstr The instruction length in bytes.
10940 *
10941 * @remarks Not all of the state needs to be synced in.
10942 */
10943VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10944{
10945 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10946
10947 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10948 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10949 Assert(!pVCpu->iem.s.cActiveMappings);
10950 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10951}
10952
10953
10954/**
10955 * Checks if IEM is in the process of delivering an event (interrupt or
10956 * exception).
10957 *
10958 * @returns true if we're in the process of raising an interrupt or exception,
10959 * false otherwise.
10960 * @param pVCpu The cross context virtual CPU structure.
10961 * @param puVector Where to store the vector associated with the
10962 * currently delivered event, optional.
10963 * @param pfFlags Where to store th event delivery flags (see
10964 * IEM_XCPT_FLAGS_XXX), optional.
10965 * @param puErr Where to store the error code associated with the
10966 * event, optional.
10967 * @param puCr2 Where to store the CR2 associated with the event,
10968 * optional.
10969 * @remarks The caller should check the flags to determine if the error code and
10970 * CR2 are valid for the event.
10971 */
10972VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10973{
10974 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10975 if (fRaisingXcpt)
10976 {
10977 if (puVector)
10978 *puVector = pVCpu->iem.s.uCurXcpt;
10979 if (pfFlags)
10980 *pfFlags = pVCpu->iem.s.fCurXcpt;
10981 if (puErr)
10982 *puErr = pVCpu->iem.s.uCurXcptErr;
10983 if (puCr2)
10984 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10985 }
10986 return fRaisingXcpt;
10987}
10988
10989#ifdef IN_RING3
10990
10991/**
10992 * Handles the unlikely and probably fatal merge cases.
10993 *
10994 * @returns Merged status code.
10995 * @param rcStrict Current EM status code.
10996 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10997 * with @a rcStrict.
10998 * @param iMemMap The memory mapping index. For error reporting only.
10999 * @param pVCpu The cross context virtual CPU structure of the calling
11000 * thread, for error reporting only.
11001 */
11002DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11003 unsigned iMemMap, PVMCPUCC pVCpu)
11004{
11005 if (RT_FAILURE_NP(rcStrict))
11006 return rcStrict;
11007
11008 if (RT_FAILURE_NP(rcStrictCommit))
11009 return rcStrictCommit;
11010
11011 if (rcStrict == rcStrictCommit)
11012 return rcStrictCommit;
11013
11014 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11015 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11016 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11017 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11018 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11019 return VERR_IOM_FF_STATUS_IPE;
11020}
11021
11022
11023/**
11024 * Helper for IOMR3ProcessForceFlag.
11025 *
11026 * @returns Merged status code.
11027 * @param rcStrict Current EM status code.
11028 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11029 * with @a rcStrict.
11030 * @param iMemMap The memory mapping index. For error reporting only.
11031 * @param pVCpu The cross context virtual CPU structure of the calling
11032 * thread, for error reporting only.
11033 */
11034DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11035{
11036 /* Simple. */
11037 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11038 return rcStrictCommit;
11039
11040 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11041 return rcStrict;
11042
11043 /* EM scheduling status codes. */
11044 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11045 && rcStrict <= VINF_EM_LAST))
11046 {
11047 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11048 && rcStrictCommit <= VINF_EM_LAST))
11049 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11050 }
11051
11052 /* Unlikely */
11053 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11054}
11055
11056
11057/**
11058 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11059 *
11060 * @returns Merge between @a rcStrict and what the commit operation returned.
11061 * @param pVM The cross context VM structure.
11062 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11063 * @param rcStrict The status code returned by ring-0 or raw-mode.
11064 */
11065VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11066{
11067 /*
11068 * Reset the pending commit.
11069 */
11070 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11071 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11072 ("%#x %#x %#x\n",
11073 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11074 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11075
11076 /*
11077 * Commit the pending bounce buffers (usually just one).
11078 */
11079 unsigned cBufs = 0;
11080 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11081 while (iMemMap-- > 0)
11082 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11083 {
11084 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11085 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11086 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11087
11088 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11089 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11090 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11091
11092 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11093 {
11094 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11095 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11096 pbBuf,
11097 cbFirst,
11098 PGMACCESSORIGIN_IEM);
11099 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11100 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11101 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11102 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11103 }
11104
11105 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11106 {
11107 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11108 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11109 pbBuf + cbFirst,
11110 cbSecond,
11111 PGMACCESSORIGIN_IEM);
11112 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11113 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11114 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11115 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11116 }
11117 cBufs++;
11118 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11119 }
11120
11121 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11122 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11123 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11124 pVCpu->iem.s.cActiveMappings = 0;
11125 return rcStrict;
11126}
11127
11128#endif /* IN_RING3 */
11129
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette