VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 36847

Last change on this file since 36847 was 36841, checked in by vboxsync, 14 years ago

IEM: CMOVcc, JMPF Ep.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 171.9 KB
Line 
1/* $Id: IEMAll.cpp 36841 2011-04-26 00:09:06Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_EM /** @todo add log group */
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/dbgf.h>
53#ifdef IEM_VERIFICATION_MODE
54# include <VBox/vmm/rem.h>
55# include <VBox/vmm/mm.h>
56#endif
57#include "IEMInternal.h"
58#include <VBox/vmm/vm.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/x86.h>
63#include <iprt/assert.h>
64#include <iprt/string.h>
65
66
67/*******************************************************************************
68* Structures and Typedefs *
69*******************************************************************************/
70/** @typedef PFNIEMOP
71 * Pointer to an opcode decoder function.
72 */
73
74/** @def FNIEMOP_DEF
75 * Define an opcode decoder function.
76 *
77 * We're using macors for this so that adding and removing parameters as well as
78 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
79 *
80 * @param a_Name The function name.
81 */
82
83
84#if defined(__GNUC__) && defined(RT_ARCH_X86)
85typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
86# define FNIEMOP_DEF(a_Name) \
87 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
88# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
89 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
90# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
91 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
92
93#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
94typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
95# define FNIEMOP_DEF(a_Name) \
96 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
97# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
98 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
99# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
100 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
101
102#else
103typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
104# define FNIEMOP_DEF(a_Name) \
105 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
106# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
107 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
108# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
109 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
110
111#endif
112
113
114/**
115 * Function table for a binary operator providing implementation based on
116 * operand size.
117 */
118typedef struct IEMOPBINSIZES
119{
120 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
121 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
122 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
123 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
124} IEMOPBINSIZES;
125/** Pointer to a binary operator function table. */
126typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
127
128
129/**
130 * Function table for a unary operator providing implementation based on
131 * operand size.
132 */
133typedef struct IEMOPUNARYSIZES
134{
135 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
136 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
137 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
138 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
139} IEMOPUNARYSIZES;
140/** Pointer to a unary operator function table. */
141typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
142
143
144/**
145 * Function table for a shift operator providing implementation based on
146 * operand size.
147 */
148typedef struct IEMOPSHIFTSIZES
149{
150 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
151 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
152 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
153 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
154} IEMOPSHIFTSIZES;
155/** Pointer to a shift operator function table. */
156typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
157
158
159/**
160 * Function table for a multiplication or division operation.
161 */
162typedef struct IEMOPMULDIVSIZES
163{
164 PFNIEMAIMPLMULDIVU8 pfnU8;
165 PFNIEMAIMPLMULDIVU16 pfnU16;
166 PFNIEMAIMPLMULDIVU32 pfnU32;
167 PFNIEMAIMPLMULDIVU64 pfnU64;
168} IEMOPMULDIVSIZES;
169/** Pointer to a multiplication or division operation function table. */
170typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
171
172
173/**
174 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
175 */
176typedef union IEMSELDESC
177{
178 /** The legacy view. */
179 X86DESC Legacy;
180 /** The long mode view. */
181 X86DESC64 Long;
182} IEMSELDESC;
183/** Pointer to a selector descriptor table entry. */
184typedef IEMSELDESC *PIEMSELDESC;
185
186
187/*******************************************************************************
188* Defined Constants And Macros *
189*******************************************************************************/
190/** Temporary hack to disable the double execution. Will be removed in favor
191 * of a dedicated execution mode in EM. */
192//#define IEM_VERIFICATION_MODE_NO_REM
193
194/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
195 * due to GCC lacking knowledge about the value range of a switch. */
196#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_INTERNAL_ERROR_4)
197
198/**
199 * Call an opcode decoder function.
200 *
201 * We're using macors for this so that adding and removing parameters can be
202 * done as we please. See FNIEMOP_DEF.
203 */
204#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
205
206/**
207 * Call a common opcode decoder function taking one extra argument.
208 *
209 * We're using macors for this so that adding and removing parameters can be
210 * done as we please. See FNIEMOP_DEF_1.
211 */
212#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
213
214/**
215 * Call a common opcode decoder function taking one extra argument.
216 *
217 * We're using macors for this so that adding and removing parameters can be
218 * done as we please. See FNIEMOP_DEF_1.
219 */
220#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
221
222/**
223 * Check if we're currently executing in real or virtual 8086 mode.
224 *
225 * @returns @c true if it is, @c false if not.
226 * @param a_pIemCpu The IEM state of the current CPU.
227 */
228#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
229
230/**
231 * Check if we're currently executing in long mode.
232 *
233 * @returns @c true if it is, @c false if not.
234 * @param a_pIemCpu The IEM state of the current CPU.
235 */
236#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
237
238/**
239 * Check if we're currently executing in real mode.
240 *
241 * @returns @c true if it is, @c false if not.
242 * @param a_pIemCpu The IEM state of the current CPU.
243 */
244#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
245
246/**
247 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
248 */
249#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
250
251/**
252 * Check if the address is canonical.
253 */
254#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
255
256
257/*******************************************************************************
258* Global Variables *
259*******************************************************************************/
260extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
261
262
263/** Function table for the ADD instruction. */
264static const IEMOPBINSIZES g_iemAImpl_add =
265{
266 iemAImpl_add_u8, iemAImpl_add_u8_locked,
267 iemAImpl_add_u16, iemAImpl_add_u16_locked,
268 iemAImpl_add_u32, iemAImpl_add_u32_locked,
269 iemAImpl_add_u64, iemAImpl_add_u64_locked
270};
271
272/** Function table for the ADC instruction. */
273static const IEMOPBINSIZES g_iemAImpl_adc =
274{
275 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
276 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
277 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
278 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
279};
280
281/** Function table for the SUB instruction. */
282static const IEMOPBINSIZES g_iemAImpl_sub =
283{
284 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
285 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
286 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
287 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
288};
289
290/** Function table for the SBB instruction. */
291static const IEMOPBINSIZES g_iemAImpl_sbb =
292{
293 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
294 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
295 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
296 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
297};
298
299/** Function table for the OR instruction. */
300static const IEMOPBINSIZES g_iemAImpl_or =
301{
302 iemAImpl_or_u8, iemAImpl_or_u8_locked,
303 iemAImpl_or_u16, iemAImpl_or_u16_locked,
304 iemAImpl_or_u32, iemAImpl_or_u32_locked,
305 iemAImpl_or_u64, iemAImpl_or_u64_locked
306};
307
308/** Function table for the XOR instruction. */
309static const IEMOPBINSIZES g_iemAImpl_xor =
310{
311 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
312 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
313 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
314 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
315};
316
317/** Function table for the AND instruction. */
318static const IEMOPBINSIZES g_iemAImpl_and =
319{
320 iemAImpl_and_u8, iemAImpl_and_u8_locked,
321 iemAImpl_and_u16, iemAImpl_and_u16_locked,
322 iemAImpl_and_u32, iemAImpl_and_u32_locked,
323 iemAImpl_and_u64, iemAImpl_and_u64_locked
324};
325
326/** Function table for the CMP instruction.
327 * @remarks Making operand order ASSUMPTIONS.
328 */
329static const IEMOPBINSIZES g_iemAImpl_cmp =
330{
331 iemAImpl_cmp_u8, NULL,
332 iemAImpl_cmp_u16, NULL,
333 iemAImpl_cmp_u32, NULL,
334 iemAImpl_cmp_u64, NULL
335};
336
337/** Function table for the TEST instruction.
338 * @remarks Making operand order ASSUMPTIONS.
339 */
340static const IEMOPBINSIZES g_iemAImpl_test =
341{
342 iemAImpl_test_u8, NULL,
343 iemAImpl_test_u16, NULL,
344 iemAImpl_test_u32, NULL,
345 iemAImpl_test_u64, NULL
346};
347
348/** Function table for the IMUL instruction. */
349static const IEMOPBINSIZES g_iemAImpl_imul_two =
350{
351 NULL, NULL,
352 iemAImpl_imul_two_u16, NULL,
353 iemAImpl_imul_two_u32, NULL,
354 iemAImpl_imul_two_u64, NULL
355};
356
357/** Group 1 /r lookup table. */
358static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
359{
360 &g_iemAImpl_add,
361 &g_iemAImpl_or,
362 &g_iemAImpl_adc,
363 &g_iemAImpl_sbb,
364 &g_iemAImpl_and,
365 &g_iemAImpl_sub,
366 &g_iemAImpl_xor,
367 &g_iemAImpl_cmp
368};
369
370/** Function table for the INC instruction. */
371static const IEMOPUNARYSIZES g_iemAImpl_inc =
372{
373 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
374 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
375 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
376 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
377};
378
379/** Function table for the DEC instruction. */
380static const IEMOPUNARYSIZES g_iemAImpl_dec =
381{
382 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
383 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
384 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
385 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
386};
387
388/** Function table for the NEG instruction. */
389static const IEMOPUNARYSIZES g_iemAImpl_neg =
390{
391 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
392 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
393 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
394 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
395};
396
397/** Function table for the NOT instruction. */
398static const IEMOPUNARYSIZES g_iemAImpl_not =
399{
400 iemAImpl_not_u8, iemAImpl_not_u8_locked,
401 iemAImpl_not_u16, iemAImpl_not_u16_locked,
402 iemAImpl_not_u32, iemAImpl_not_u32_locked,
403 iemAImpl_not_u64, iemAImpl_not_u64_locked
404};
405
406
407/** Function table for the ROL instruction. */
408static const IEMOPSHIFTSIZES g_iemAImpl_rol =
409{
410 iemAImpl_rol_u8,
411 iemAImpl_rol_u16,
412 iemAImpl_rol_u32,
413 iemAImpl_rol_u64
414};
415
416/** Function table for the ROR instruction. */
417static const IEMOPSHIFTSIZES g_iemAImpl_ror =
418{
419 iemAImpl_ror_u8,
420 iemAImpl_ror_u16,
421 iemAImpl_ror_u32,
422 iemAImpl_ror_u64
423};
424
425/** Function table for the RCL instruction. */
426static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
427{
428 iemAImpl_rcl_u8,
429 iemAImpl_rcl_u16,
430 iemAImpl_rcl_u32,
431 iemAImpl_rcl_u64
432};
433
434/** Function table for the RCR instruction. */
435static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
436{
437 iemAImpl_rcr_u8,
438 iemAImpl_rcr_u16,
439 iemAImpl_rcr_u32,
440 iemAImpl_rcr_u64
441};
442
443/** Function table for the SHL instruction. */
444static const IEMOPSHIFTSIZES g_iemAImpl_shl =
445{
446 iemAImpl_shl_u8,
447 iemAImpl_shl_u16,
448 iemAImpl_shl_u32,
449 iemAImpl_shl_u64
450};
451
452/** Function table for the SHR instruction. */
453static const IEMOPSHIFTSIZES g_iemAImpl_shr =
454{
455 iemAImpl_shr_u8,
456 iemAImpl_shr_u16,
457 iemAImpl_shr_u32,
458 iemAImpl_shr_u64
459};
460
461/** Function table for the SAR instruction. */
462static const IEMOPSHIFTSIZES g_iemAImpl_sar =
463{
464 iemAImpl_sar_u8,
465 iemAImpl_sar_u16,
466 iemAImpl_sar_u32,
467 iemAImpl_sar_u64
468};
469
470
471/** Function table for the MUL instruction. */
472static const IEMOPMULDIVSIZES g_iemAImpl_mul =
473{
474 iemAImpl_mul_u8,
475 iemAImpl_mul_u16,
476 iemAImpl_mul_u32,
477 iemAImpl_mul_u64
478};
479
480/** Function table for the IMUL instruction working implicitly on rAX. */
481static const IEMOPMULDIVSIZES g_iemAImpl_imul =
482{
483 iemAImpl_imul_u8,
484 iemAImpl_imul_u16,
485 iemAImpl_imul_u32,
486 iemAImpl_imul_u64
487};
488
489/** Function table for the DIV instruction. */
490static const IEMOPMULDIVSIZES g_iemAImpl_div =
491{
492 iemAImpl_div_u8,
493 iemAImpl_div_u16,
494 iemAImpl_div_u32,
495 iemAImpl_div_u64
496};
497
498/** Function table for the MUL instruction. */
499static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
500{
501 iemAImpl_idiv_u8,
502 iemAImpl_idiv_u16,
503 iemAImpl_idiv_u32,
504 iemAImpl_idiv_u64
505};
506
507
508/*******************************************************************************
509* Internal Functions *
510*******************************************************************************/
511static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
512static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
513static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
514static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
515static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
516#ifdef IEM_VERIFICATION_MODE
517static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
518#endif
519static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
520static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
521
522
523/**
524 * Initializes the decoder state.
525 *
526 * @param pIemCpu The per CPU IEM state.
527 */
528DECLINLINE(void) iemInitDecode(PIEMCPU pIemCpu)
529{
530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
531
532 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
533 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
534 ? IEMMODE_64BIT
535 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
536 ? IEMMODE_32BIT
537 : IEMMODE_16BIT;
538 pIemCpu->enmCpuMode = enmMode;
539 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
540 pIemCpu->enmEffAddrMode = enmMode;
541 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
542 pIemCpu->enmEffOpSize = enmMode;
543 pIemCpu->fPrefixes = 0;
544 pIemCpu->uRexReg = 0;
545 pIemCpu->uRexB = 0;
546 pIemCpu->uRexIndex = 0;
547 pIemCpu->iEffSeg = X86_SREG_DS;
548 pIemCpu->offOpcode = 0;
549 pIemCpu->cbOpcode = 0;
550 pIemCpu->cActiveMappings = 0;
551 pIemCpu->iNextMapping = 0;
552}
553
554
555/**
556 * Prefetch opcodes the first time when starting executing.
557 *
558 * @returns Strict VBox status code.
559 * @param pIemCpu The IEM state.
560 */
561static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
562{
563#ifdef IEM_VERIFICATION_MODE
564 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
565#endif
566 iemInitDecode(pIemCpu);
567
568 /*
569 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
570 *
571 * First translate CS:rIP to a physical address.
572 */
573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
574 uint32_t cbToTryRead;
575 RTGCPTR GCPtrPC;
576 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
577 {
578 cbToTryRead = PAGE_SIZE;
579 GCPtrPC = pCtx->rip;
580 if (!IEM_IS_CANONICAL(GCPtrPC))
581 return iemRaiseGeneralProtectionFault0(pIemCpu);
582 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
583 }
584 else
585 {
586 uint32_t GCPtrPC32 = pCtx->eip;
587 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
588 if (GCPtrPC32 > pCtx->csHid.u32Limit)
589 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
590 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
591 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
592 }
593
594 RTGCPHYS GCPhys;
595 uint64_t fFlags;
596 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
597 if (RT_FAILURE(rc))
598 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
599 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
600 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
601 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
602 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
603 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
604 /** @todo Check reserved bits and such stuff. PGM is better at doing
605 * that, so do it when implementing the guest virtual address
606 * TLB... */
607
608#ifdef IEM_VERIFICATION_MODE
609 /*
610 * Optimistic optimization: Use unconsumed opcode bytes from the previous
611 * instruction.
612 */
613 /** @todo optimize this differently by not using PGMPhysRead. */
614 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
615 pIemCpu->GCPhysOpcodes = GCPhys;
616 if ( offPrevOpcodes < cbOldOpcodes
617 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
618 {
619 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
620 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
621 pIemCpu->cbOpcode = cbNew;
622 return VINF_SUCCESS;
623 }
624#endif
625
626 /*
627 * Read the bytes at this address.
628 */
629 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
630 if (cbToTryRead > cbLeftOnPage)
631 cbToTryRead = cbLeftOnPage;
632 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
633 cbToTryRead = sizeof(pIemCpu->abOpcode);
634 /** @todo patch manager */
635 if (!pIemCpu->fByPassHandlers)
636 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
637 else
638 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
639 if (rc != VINF_SUCCESS)
640 return rc;
641 pIemCpu->cbOpcode = cbToTryRead;
642
643 return VINF_SUCCESS;
644}
645
646
647/**
648 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
649 * exception if it fails.
650 *
651 * @returns Strict VBox status code.
652 * @param pIemCpu The IEM state.
653 * @param cbMin Where to return the opcode byte.
654 */
655static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
656{
657 /*
658 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
659 *
660 * First translate CS:rIP to a physical address.
661 */
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
664 uint32_t cbToTryRead;
665 RTGCPTR GCPtrNext;
666 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
667 {
668 cbToTryRead = PAGE_SIZE;
669 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
670 if (!IEM_IS_CANONICAL(GCPtrNext))
671 return iemRaiseGeneralProtectionFault0(pIemCpu);
672 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
673 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
674 }
675 else
676 {
677 uint32_t GCPtrNext32 = pCtx->eip;
678 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
679 GCPtrNext32 += pIemCpu->cbOpcode;
680 if (GCPtrNext32 > pCtx->csHid.u32Limit)
681 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
682 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
683 if (cbToTryRead < cbMin - cbLeft)
684 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
685 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
686 }
687
688 RTGCPHYS GCPhys;
689 uint64_t fFlags;
690 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
691 if (RT_FAILURE(rc))
692 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
693 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
694 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
695 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
696 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
697 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
698 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
699 /** @todo Check reserved bits and such stuff. PGM is better at doing
700 * that, so do it when implementing the guest virtual address
701 * TLB... */
702
703 /*
704 * Read the bytes at this address.
705 */
706 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
707 if (cbToTryRead > cbLeftOnPage)
708 cbToTryRead = cbLeftOnPage;
709 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
710 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
711 Assert(cbToTryRead >= cbMin - cbLeft);
712 if (!pIemCpu->fByPassHandlers)
713 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
714 else
715 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
716 if (rc != VINF_SUCCESS)
717 return rc;
718 pIemCpu->cbOpcode += cbToTryRead;
719 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
720
721 return VINF_SUCCESS;
722}
723
724
725/**
726 * Deals with the problematic cases that iemOpcodeGetNextByte doesn't like.
727 *
728 * @returns Strict VBox status code.
729 * @param pIemCpu The IEM state.
730 * @param pb Where to return the opcode byte.
731 */
732static VBOXSTRICTRC iemOpcodeGetNextByteSlow(PIEMCPU pIemCpu, uint8_t *pb)
733{
734 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
735 if (rcStrict == VINF_SUCCESS)
736 {
737 uint8_t offOpcode = pIemCpu->offOpcode;
738 *pb = pIemCpu->abOpcode[offOpcode];
739 pIemCpu->offOpcode = offOpcode + 1;
740 }
741 else
742 *pb = 0;
743 return rcStrict;
744}
745
746
747/**
748 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
749 *
750 * @returns Strict VBox status code.
751 * @param pIemCpu The IEM state.
752 * @param pu16 Where to return the opcode dword.
753 */
754static VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
755{
756 uint8_t u8;
757 VBOXSTRICTRC rcStrict = iemOpcodeGetNextByteSlow(pIemCpu, &u8);
758 if (rcStrict == VINF_SUCCESS)
759 *pu16 = (int8_t)u8;
760 return rcStrict;
761}
762
763
764/**
765 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
766 *
767 * @returns Strict VBox status code.
768 * @param pIemCpu The IEM state.
769 * @param pu16 Where to return the opcode word.
770 */
771static VBOXSTRICTRC iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
772{
773 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
774 if (rcStrict == VINF_SUCCESS)
775 {
776 uint8_t offOpcode = pIemCpu->offOpcode;
777 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
778 pIemCpu->offOpcode = offOpcode + 2;
779 }
780 else
781 *pu16 = 0;
782 return rcStrict;
783}
784
785
786/**
787 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
788 *
789 * @returns Strict VBox status code.
790 * @param pIemCpu The IEM state.
791 * @param pu32 Where to return the opcode dword.
792 */
793static VBOXSTRICTRC iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
794{
795 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
796 if (rcStrict == VINF_SUCCESS)
797 {
798 uint8_t offOpcode = pIemCpu->offOpcode;
799 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
800 pIemCpu->abOpcode[offOpcode + 1],
801 pIemCpu->abOpcode[offOpcode + 2],
802 pIemCpu->abOpcode[offOpcode + 3]);
803 pIemCpu->offOpcode = offOpcode + 4;
804 }
805 else
806 *pu32 = 0;
807 return rcStrict;
808}
809
810
811/**
812 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
813 *
814 * @returns Strict VBox status code.
815 * @param pIemCpu The IEM state.
816 * @param pu64 Where to return the opcode qword.
817 */
818static VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
819{
820 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
821 if (rcStrict == VINF_SUCCESS)
822 {
823 uint8_t offOpcode = pIemCpu->offOpcode;
824 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
825 pIemCpu->abOpcode[offOpcode + 1],
826 pIemCpu->abOpcode[offOpcode + 2],
827 pIemCpu->abOpcode[offOpcode + 3]);
828 pIemCpu->offOpcode = offOpcode + 4;
829 }
830 else
831 *pu64 = 0;
832 return rcStrict;
833}
834
835
836/**
837 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
838 *
839 * @returns Strict VBox status code.
840 * @param pIemCpu The IEM state.
841 * @param pu64 Where to return the opcode qword.
842 */
843static VBOXSTRICTRC iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
844{
845 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
846 if (rcStrict == VINF_SUCCESS)
847 {
848 uint8_t offOpcode = pIemCpu->offOpcode;
849 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
850 pIemCpu->abOpcode[offOpcode + 1],
851 pIemCpu->abOpcode[offOpcode + 2],
852 pIemCpu->abOpcode[offOpcode + 3],
853 pIemCpu->abOpcode[offOpcode + 4],
854 pIemCpu->abOpcode[offOpcode + 5],
855 pIemCpu->abOpcode[offOpcode + 6],
856 pIemCpu->abOpcode[offOpcode + 7]);
857 pIemCpu->offOpcode = offOpcode + 8;
858 }
859 else
860 *pu64 = 0;
861 return rcStrict;
862}
863
864
865/**
866 * Fetches the next opcode byte.
867 *
868 * @returns Strict VBox status code.
869 * @param pIemCpu The IEM state.
870 * @param pu8 Where to return the opcode byte.
871 */
872DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
873{
874 uint8_t const offOpcode = pIemCpu->offOpcode;
875 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
876 return iemOpcodeGetNextByteSlow(pIemCpu, pu8);
877
878 *pu8 = pIemCpu->abOpcode[offOpcode];
879 pIemCpu->offOpcode = offOpcode + 1;
880 return VINF_SUCCESS;
881}
882
883/**
884 * Fetches the next opcode byte, returns automatically on failure.
885 *
886 * @param pIemCpu The IEM state.
887 * @param a_pu8 Where to return the opcode byte.
888 */
889#define IEM_OPCODE_GET_NEXT_BYTE(a_pIemCpu, a_pu8) \
890 do \
891 { \
892 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8((a_pIemCpu), (a_pu8)); \
893 if (rcStrict2 != VINF_SUCCESS) \
894 return rcStrict2; \
895 } while (0)
896
897
898/**
899 * Fetches the next signed byte from the opcode stream.
900 *
901 * @returns Strict VBox status code.
902 * @param pIemCpu The IEM state.
903 * @param pi8 Where to return the signed byte.
904 */
905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
906{
907 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
908}
909
910/**
911 * Fetches the next signed byte from the opcode stream, returning automatically
912 * on failure.
913 *
914 * @param pIemCpu The IEM state.
915 * @param pi8 Where to return the signed byte.
916 */
917#define IEM_OPCODE_GET_NEXT_S8(a_pIemCpu, a_pi8) \
918 do \
919 { \
920 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8((a_pIemCpu), (a_pi8)); \
921 if (rcStrict2 != VINF_SUCCESS) \
922 return rcStrict2; \
923 } while (0)
924
925
926/**
927 * Fetches the next signed byte from the opcode stream, extending it to
928 * unsigned 16-bit.
929 *
930 * @returns Strict VBox status code.
931 * @param pIemCpu The IEM state.
932 * @param pu16 Where to return the unsigned word.
933 */
934DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
935{
936 uint8_t const offOpcode = pIemCpu->offOpcode;
937 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
938 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
939
940 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
941 pIemCpu->offOpcode = offOpcode + 1;
942 return VINF_SUCCESS;
943}
944
945
946/**
947 * Fetches the next signed byte from the opcode stream and sign-extending it to
948 * a word, returning automatically on failure.
949 *
950 * @param pIemCpu The IEM state.
951 * @param pu16 Where to return the word.
952 */
953#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pIemCpu, a_pu16) \
954 do \
955 { \
956 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16((a_pIemCpu), (a_pu16)); \
957 if (rcStrict2 != VINF_SUCCESS) \
958 return rcStrict2; \
959 } while (0)
960
961
962/**
963 * Fetches the next opcode word.
964 *
965 * @returns Strict VBox status code.
966 * @param pIemCpu The IEM state.
967 * @param pu16 Where to return the opcode word.
968 */
969DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
970{
971 uint8_t const offOpcode = pIemCpu->offOpcode;
972 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
973 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
974
975 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
976 pIemCpu->offOpcode = offOpcode + 2;
977 return VINF_SUCCESS;
978}
979
980/**
981 * Fetches the next opcode word, returns automatically on failure.
982 *
983 * @param pIemCpu The IEM state.
984 * @param a_pu16 Where to return the opcode word.
985 */
986#define IEM_OPCODE_GET_NEXT_U16(a_pIemCpu, a_pu16) \
987 do \
988 { \
989 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16((a_pIemCpu), (a_pu16)); \
990 if (rcStrict2 != VINF_SUCCESS) \
991 return rcStrict2; \
992 } while (0)
993
994
995/**
996 * Fetches the next opcode dword.
997 *
998 * @returns Strict VBox status code.
999 * @param pIemCpu The IEM state.
1000 * @param pu32 Where to return the opcode double word.
1001 */
1002DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1003{
1004 uint8_t const offOpcode = pIemCpu->offOpcode;
1005 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1006 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1007
1008 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1009 pIemCpu->abOpcode[offOpcode + 1],
1010 pIemCpu->abOpcode[offOpcode + 2],
1011 pIemCpu->abOpcode[offOpcode + 3]);
1012 pIemCpu->offOpcode = offOpcode + 4;
1013 return VINF_SUCCESS;
1014}
1015
1016/**
1017 * Fetches the next opcode dword, returns automatically on failure.
1018 *
1019 * @param pIemCpu The IEM state.
1020 * @param a_u32 Where to return the opcode dword.
1021 */
1022#define IEM_OPCODE_GET_NEXT_U32(a_pIemCpu, a_pu32) \
1023 do \
1024 { \
1025 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32((a_pIemCpu), (a_pu32)); \
1026 if (rcStrict2 != VINF_SUCCESS) \
1027 return rcStrict2; \
1028 } while (0)
1029
1030
1031/**
1032 * Fetches the next opcode dword, sign extending it into a quad word.
1033 *
1034 * @returns Strict VBox status code.
1035 * @param pIemCpu The IEM state.
1036 * @param pu64 Where to return the opcode quad word.
1037 */
1038DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1039{
1040 uint8_t const offOpcode = pIemCpu->offOpcode;
1041 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1042 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1043
1044 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1045 pIemCpu->abOpcode[offOpcode + 1],
1046 pIemCpu->abOpcode[offOpcode + 2],
1047 pIemCpu->abOpcode[offOpcode + 3]);
1048 *pu64 = i32;
1049 pIemCpu->offOpcode = offOpcode + 4;
1050 return VINF_SUCCESS;
1051}
1052
1053/**
1054 * Fetches the next opcode double word and sign extends it to a quad word,
1055 * returns automatically on failure.
1056 *
1057 * @param pIemCpu The IEM state.
1058 * @param a_pu64 Where to return the opcode quad word.
1059 */
1060#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pIemCpu, a_pu64) \
1061 do \
1062 { \
1063 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64((a_pIemCpu), (a_pu64)); \
1064 if (rcStrict2 != VINF_SUCCESS) \
1065 return rcStrict2; \
1066 } while (0)
1067
1068
1069/**
1070 * Fetches the next opcode qword.
1071 *
1072 * @returns Strict VBox status code.
1073 * @param pIemCpu The IEM state.
1074 * @param pu64 Where to return the opcode qword.
1075 */
1076DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1077{
1078 uint8_t const offOpcode = pIemCpu->offOpcode;
1079 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1080 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1081
1082 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1083 pIemCpu->abOpcode[offOpcode + 1],
1084 pIemCpu->abOpcode[offOpcode + 2],
1085 pIemCpu->abOpcode[offOpcode + 3],
1086 pIemCpu->abOpcode[offOpcode + 4],
1087 pIemCpu->abOpcode[offOpcode + 5],
1088 pIemCpu->abOpcode[offOpcode + 6],
1089 pIemCpu->abOpcode[offOpcode + 7]);
1090 pIemCpu->offOpcode = offOpcode + 8;
1091 return VINF_SUCCESS;
1092}
1093
1094/**
1095 * Fetches the next opcode word, returns automatically on failure.
1096 *
1097 * @param pIemCpu The IEM state.
1098 * @param a_pu64 Where to return the opcode qword.
1099 */
1100#define IEM_OPCODE_GET_NEXT_U64(a_pIemCpu, a_pu64) \
1101 do \
1102 { \
1103 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64((a_pIemCpu), (a_pu64)); \
1104 if (rcStrict2 != VINF_SUCCESS) \
1105 return rcStrict2; \
1106 } while (0)
1107
1108
1109/** @name Raising Exceptions.
1110 *
1111 * @{
1112 */
1113
1114static VBOXSTRICTRC iemRaiseDivideError(PIEMCPU pIemCpu)
1115{
1116 AssertFailed(/** @todo implement this */);
1117 return VERR_NOT_IMPLEMENTED;
1118}
1119
1120
1121static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
1122{
1123 AssertFailed(/** @todo implement this */);
1124 return VERR_NOT_IMPLEMENTED;
1125}
1126
1127
1128static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
1129{
1130 AssertFailed(/** @todo implement this */);
1131 return VERR_NOT_IMPLEMENTED;
1132}
1133
1134
1135static VBOXSTRICTRC iemRaiseNotCanonical(PIEMCPU pIemCpu)
1136{
1137 AssertFailed(/** @todo implement this */);
1138 return VERR_NOT_IMPLEMENTED;
1139}
1140
1141
1142static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1143{
1144 AssertFailed(/** @todo implement this */);
1145 return VERR_NOT_IMPLEMENTED;
1146}
1147
1148
1149static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1150{
1151 AssertFailed(/** @todo implement this */);
1152 return VERR_NOT_IMPLEMENTED;
1153}
1154
1155
1156static VBOXSTRICTRC iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
1157{
1158 AssertFailed(/** @todo implement this */);
1159 return VERR_NOT_IMPLEMENTED;
1160}
1161
1162
1163static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
1164{
1165 AssertFailed(/** @todo implement this */);
1166 return VERR_NOT_IMPLEMENTED;
1167}
1168
1169
1170static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
1171{
1172 AssertFailed(/** @todo implement this */);
1173 return VERR_NOT_IMPLEMENTED;
1174}
1175
1176
1177/**
1178 * Macro for calling iemCImplRaiseInvalidLockPrefix().
1179 *
1180 * This enables us to add/remove arguments and force different levels of
1181 * inlining as we wish.
1182 *
1183 * @return Strict VBox status code.
1184 */
1185#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
1186IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
1187{
1188 AssertFailed();
1189 return VERR_NOT_IMPLEMENTED;
1190}
1191
1192
1193/**
1194 * Macro for calling iemCImplRaiseInvalidOpcode().
1195 *
1196 * This enables us to add/remove arguments and force different levels of
1197 * inlining as we wish.
1198 *
1199 * @return Strict VBox status code.
1200 */
1201#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
1202IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
1203{
1204 AssertFailed();
1205 return VERR_NOT_IMPLEMENTED;
1206}
1207
1208
1209/** @} */
1210
1211
1212/*
1213 *
1214 * Helpers routines.
1215 * Helpers routines.
1216 * Helpers routines.
1217 *
1218 */
1219
1220/**
1221 * Recalculates the effective operand size.
1222 *
1223 * @param pIemCpu The IEM state.
1224 */
1225static void iemRecalEffOpSize(PIEMCPU pIemCpu)
1226{
1227 switch (pIemCpu->enmCpuMode)
1228 {
1229 case IEMMODE_16BIT:
1230 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1231 break;
1232 case IEMMODE_32BIT:
1233 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1234 break;
1235 case IEMMODE_64BIT:
1236 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1237 {
1238 case 0:
1239 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
1240 break;
1241 case IEM_OP_PRF_SIZE_OP:
1242 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1243 break;
1244 case IEM_OP_PRF_SIZE_REX_W:
1245 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1246 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1247 break;
1248 }
1249 break;
1250 default:
1251 AssertFailed();
1252 }
1253}
1254
1255
1256/**
1257 * Sets the default operand size to 64-bit and recalculates the effective
1258 * operand size.
1259 *
1260 * @param pIemCpu The IEM state.
1261 */
1262static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
1263{
1264 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1265 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1266 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1267 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1268 else
1269 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1270}
1271
1272
1273/*
1274 *
1275 * Common opcode decoders.
1276 * Common opcode decoders.
1277 * Common opcode decoders.
1278 *
1279 */
1280#include <iprt/mem.h>
1281
1282/**
1283 * Used to add extra details about a stub case.
1284 * @param pIemCpu The IEM per CPU state.
1285 */
1286static void iemOpStubMsg2(PIEMCPU pIemCpu)
1287{
1288 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1289 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1290 char szRegs[4096];
1291 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1292 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1293 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1294 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1295 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1296 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1297 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1298 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1299 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1300 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1301 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1302 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1303 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1304 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1305 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1306 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1307 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1308 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1309 " efer=%016VR{efer}\n"
1310 " pat=%016VR{pat}\n"
1311 " sf_mask=%016VR{sf_mask}\n"
1312 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1313 " lstar=%016VR{lstar}\n"
1314 " star=%016VR{star} cstar=%016VR{cstar}\n"
1315 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1316 );
1317
1318 char szInstr[256];
1319 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
1320 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1321 szInstr, sizeof(szInstr), NULL);
1322
1323 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
1324}
1325
1326
1327/** Stubs an opcode. */
1328#define FNIEMOP_STUB(a_Name) \
1329 FNIEMOP_DEF(a_Name) \
1330 { \
1331 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
1332 iemOpStubMsg2(pIemCpu); \
1333 RTAssertPanic(); \
1334 return VERR_NOT_IMPLEMENTED; \
1335 } \
1336 typedef int ignore_semicolon
1337
1338/** Stubs an opcode. */
1339#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
1340 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1341 { \
1342 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
1343 iemOpStubMsg2(pIemCpu); \
1344 RTAssertPanic(); \
1345 return VERR_NOT_IMPLEMENTED; \
1346 } \
1347 typedef int ignore_semicolon
1348
1349
1350
1351/** @name Register Access.
1352 * @{
1353 */
1354
1355/**
1356 * Gets a reference (pointer) to the specified hidden segment register.
1357 *
1358 * @returns Hidden register reference.
1359 * @param pIemCpu The per CPU data.
1360 * @param iSegReg The segment register.
1361 */
1362static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
1363{
1364 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1365 switch (iSegReg)
1366 {
1367 case X86_SREG_ES: return &pCtx->esHid;
1368 case X86_SREG_CS: return &pCtx->csHid;
1369 case X86_SREG_SS: return &pCtx->ssHid;
1370 case X86_SREG_DS: return &pCtx->dsHid;
1371 case X86_SREG_FS: return &pCtx->fsHid;
1372 case X86_SREG_GS: return &pCtx->gsHid;
1373 }
1374 AssertFailedReturn(NULL);
1375}
1376
1377
1378/**
1379 * Gets a reference (pointer) to the specified segment register (the selector
1380 * value).
1381 *
1382 * @returns Pointer to the selector variable.
1383 * @param pIemCpu The per CPU data.
1384 * @param iSegReg The segment register.
1385 */
1386static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
1387{
1388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1389 switch (iSegReg)
1390 {
1391 case X86_SREG_ES: return &pCtx->es;
1392 case X86_SREG_CS: return &pCtx->cs;
1393 case X86_SREG_SS: return &pCtx->ss;
1394 case X86_SREG_DS: return &pCtx->ds;
1395 case X86_SREG_FS: return &pCtx->fs;
1396 case X86_SREG_GS: return &pCtx->gs;
1397 }
1398 AssertFailedReturn(NULL);
1399}
1400
1401
1402/**
1403 * Fetches the selector value of a segment register.
1404 *
1405 * @returns The selector value.
1406 * @param pIemCpu The per CPU data.
1407 * @param iSegReg The segment register.
1408 */
1409static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
1410{
1411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1412 switch (iSegReg)
1413 {
1414 case X86_SREG_ES: return pCtx->es;
1415 case X86_SREG_CS: return pCtx->cs;
1416 case X86_SREG_SS: return pCtx->ss;
1417 case X86_SREG_DS: return pCtx->ds;
1418 case X86_SREG_FS: return pCtx->fs;
1419 case X86_SREG_GS: return pCtx->gs;
1420 }
1421 AssertFailedReturn(0xffff);
1422}
1423
1424
1425/**
1426 * Gets a reference (pointer) to the specified general register.
1427 *
1428 * @returns Register reference.
1429 * @param pIemCpu The per CPU data.
1430 * @param iReg The general register.
1431 */
1432static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
1433{
1434 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1435 switch (iReg)
1436 {
1437 case X86_GREG_xAX: return &pCtx->rax;
1438 case X86_GREG_xCX: return &pCtx->rcx;
1439 case X86_GREG_xDX: return &pCtx->rdx;
1440 case X86_GREG_xBX: return &pCtx->rbx;
1441 case X86_GREG_xSP: return &pCtx->rsp;
1442 case X86_GREG_xBP: return &pCtx->rbp;
1443 case X86_GREG_xSI: return &pCtx->rsi;
1444 case X86_GREG_xDI: return &pCtx->rdi;
1445 case X86_GREG_x8: return &pCtx->r8;
1446 case X86_GREG_x9: return &pCtx->r9;
1447 case X86_GREG_x10: return &pCtx->r10;
1448 case X86_GREG_x11: return &pCtx->r11;
1449 case X86_GREG_x12: return &pCtx->r12;
1450 case X86_GREG_x13: return &pCtx->r13;
1451 case X86_GREG_x14: return &pCtx->r14;
1452 case X86_GREG_x15: return &pCtx->r15;
1453 }
1454 AssertFailedReturn(NULL);
1455}
1456
1457
1458/**
1459 * Gets a reference (pointer) to the specified 8-bit general register.
1460 *
1461 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1462 *
1463 * @returns Register reference.
1464 * @param pIemCpu The per CPU data.
1465 * @param iReg The register.
1466 */
1467static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
1468{
1469 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
1470 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
1471
1472 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
1473 if (iReg >= 4)
1474 pu8Reg++;
1475 return pu8Reg;
1476}
1477
1478
1479/**
1480 * Fetches the value of a 8-bit general register.
1481 *
1482 * @returns The register value.
1483 * @param pIemCpu The per CPU data.
1484 * @param iReg The register.
1485 */
1486static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
1487{
1488 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
1489 return *pbSrc;
1490}
1491
1492
1493/**
1494 * Fetches the value of a 16-bit general register.
1495 *
1496 * @returns The register value.
1497 * @param pIemCpu The per CPU data.
1498 * @param iReg The register.
1499 */
1500static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
1501{
1502 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
1503}
1504
1505
1506/**
1507 * Fetches the value of a 32-bit general register.
1508 *
1509 * @returns The register value.
1510 * @param pIemCpu The per CPU data.
1511 * @param iReg The register.
1512 */
1513static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
1514{
1515 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
1516}
1517
1518
1519/**
1520 * Fetches the value of a 64-bit general register.
1521 *
1522 * @returns The register value.
1523 * @param pIemCpu The per CPU data.
1524 * @param iReg The register.
1525 */
1526static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
1527{
1528 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
1529}
1530
1531
1532/**
1533 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
1534 *
1535 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1536 * segment limit.
1537 *
1538 * @param pIemCpu The per CPU data.
1539 * @param offNextInstr The offset of the next instruction.
1540 */
1541static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
1542{
1543 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1544 switch (pIemCpu->enmEffOpSize)
1545 {
1546 case IEMMODE_16BIT:
1547 {
1548 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1549 if ( uNewIp > pCtx->csHid.u32Limit
1550 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1551 return iemRaiseGeneralProtectionFault0(pIemCpu);
1552 pCtx->rip = uNewIp;
1553 break;
1554 }
1555
1556 case IEMMODE_32BIT:
1557 {
1558 Assert(pCtx->rip <= UINT32_MAX);
1559 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1560
1561 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1562 if (uNewEip > pCtx->csHid.u32Limit)
1563 return iemRaiseGeneralProtectionFault0(pIemCpu);
1564 pCtx->rip = uNewEip;
1565 break;
1566 }
1567
1568 case IEMMODE_64BIT:
1569 {
1570 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1571
1572 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1573 if (!IEM_IS_CANONICAL(uNewRip))
1574 return iemRaiseGeneralProtectionFault0(pIemCpu);
1575 pCtx->rip = uNewRip;
1576 break;
1577 }
1578
1579 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1580 }
1581
1582 return VINF_SUCCESS;
1583}
1584
1585
1586/**
1587 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
1588 *
1589 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1590 * segment limit.
1591 *
1592 * @returns Strict VBox status code.
1593 * @param pIemCpu The per CPU data.
1594 * @param offNextInstr The offset of the next instruction.
1595 */
1596static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
1597{
1598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1599 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
1600
1601 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1602 if ( uNewIp > pCtx->csHid.u32Limit
1603 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1604 return iemRaiseGeneralProtectionFault0(pIemCpu);
1605 /** @todo Test 16-bit jump in 64-bit mode. */
1606 pCtx->rip = uNewIp;
1607
1608 return VINF_SUCCESS;
1609}
1610
1611
1612/**
1613 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
1614 *
1615 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1616 * segment limit.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pIemCpu The per CPU data.
1620 * @param offNextInstr The offset of the next instruction.
1621 */
1622static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
1623{
1624 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1625 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
1626
1627 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
1628 {
1629 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1630
1631 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1632 if (uNewEip > pCtx->csHid.u32Limit)
1633 return iemRaiseGeneralProtectionFault0(pIemCpu);
1634 pCtx->rip = uNewEip;
1635 }
1636 else
1637 {
1638 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1639
1640 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1641 if (!IEM_IS_CANONICAL(uNewRip))
1642 return iemRaiseGeneralProtectionFault0(pIemCpu);
1643 pCtx->rip = uNewRip;
1644 }
1645 return VINF_SUCCESS;
1646}
1647
1648
1649/**
1650 * Performs a near jump to the specified address.
1651 *
1652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1653 * segment limit.
1654 *
1655 * @param pIemCpu The per CPU data.
1656 * @param uNewRip The new RIP value.
1657 */
1658static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
1659{
1660 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1661 switch (pIemCpu->enmEffOpSize)
1662 {
1663 case IEMMODE_16BIT:
1664 {
1665 Assert(uNewRip <= UINT16_MAX);
1666 if ( uNewRip > pCtx->csHid.u32Limit
1667 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1668 return iemRaiseGeneralProtectionFault0(pIemCpu);
1669 /** @todo Test 16-bit jump in 64-bit mode. */
1670 pCtx->rip = uNewRip;
1671 break;
1672 }
1673
1674 case IEMMODE_32BIT:
1675 {
1676 Assert(uNewRip <= UINT32_MAX);
1677 Assert(pCtx->rip <= UINT32_MAX);
1678 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1679
1680 if (uNewRip > pCtx->csHid.u32Limit)
1681 return iemRaiseGeneralProtectionFault0(pIemCpu);
1682 pCtx->rip = uNewRip;
1683 break;
1684 }
1685
1686 case IEMMODE_64BIT:
1687 {
1688 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1689
1690 if (!IEM_IS_CANONICAL(uNewRip))
1691 return iemRaiseGeneralProtectionFault0(pIemCpu);
1692 pCtx->rip = uNewRip;
1693 break;
1694 }
1695
1696 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1697 }
1698
1699 return VINF_SUCCESS;
1700}
1701
1702
1703/**
1704 * Get the address of the top of the stack.
1705 *
1706 * @param pCtx The CPU context which SP/ESP/RSP should be
1707 * read.
1708 */
1709DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
1710{
1711 if (pCtx->ssHid.Attr.n.u1Long)
1712 return pCtx->rsp;
1713 if (pCtx->ssHid.Attr.n.u1DefBig)
1714 return pCtx->esp;
1715 return pCtx->sp;
1716}
1717
1718
1719/**
1720 * Updates the RIP/EIP/IP to point to the next instruction.
1721 *
1722 * @param pIemCpu The per CPU data.
1723 * @param cbInstr The number of bytes to add.
1724 */
1725static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
1726{
1727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1728 switch (pIemCpu->enmCpuMode)
1729 {
1730 case IEMMODE_16BIT:
1731 Assert(pCtx->rip <= UINT16_MAX);
1732 pCtx->eip += cbInstr;
1733 pCtx->eip &= UINT32_C(0xffff);
1734 break;
1735
1736 case IEMMODE_32BIT:
1737 pCtx->eip += cbInstr;
1738 Assert(pCtx->rip <= UINT32_MAX);
1739 break;
1740
1741 case IEMMODE_64BIT:
1742 pCtx->rip += cbInstr;
1743 break;
1744 default: AssertFailed();
1745 }
1746}
1747
1748
1749/**
1750 * Updates the RIP/EIP/IP to point to the next instruction.
1751 *
1752 * @param pIemCpu The per CPU data.
1753 */
1754static void iemRegUpdateRip(PIEMCPU pIemCpu)
1755{
1756 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
1757}
1758
1759
1760/**
1761 * Adds to the stack pointer.
1762 *
1763 * @param pCtx The CPU context which SP/ESP/RSP should be
1764 * updated.
1765 * @param cbToAdd The number of bytes to add.
1766 */
1767DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
1768{
1769 if (pCtx->ssHid.Attr.n.u1Long)
1770 pCtx->rsp += cbToAdd;
1771 else if (pCtx->ssHid.Attr.n.u1DefBig)
1772 pCtx->esp += cbToAdd;
1773 else
1774 pCtx->sp += cbToAdd;
1775}
1776
1777
1778/**
1779 * Subtracts from the stack pointer.
1780 *
1781 * @param pCtx The CPU context which SP/ESP/RSP should be
1782 * updated.
1783 * @param cbToSub The number of bytes to subtract.
1784 */
1785DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
1786{
1787 if (pCtx->ssHid.Attr.n.u1Long)
1788 pCtx->rsp -= cbToSub;
1789 else if (pCtx->ssHid.Attr.n.u1DefBig)
1790 pCtx->esp -= cbToSub;
1791 else
1792 pCtx->sp -= cbToSub;
1793}
1794
1795
1796/**
1797 * Adds to the temporary stack pointer.
1798 *
1799 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1800 * @param cbToAdd The number of bytes to add.
1801 * @param pCtx Where to get the current stack mode.
1802 */
1803DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
1804{
1805 if (pCtx->ssHid.Attr.n.u1Long)
1806 pTmpRsp->u += cbToAdd;
1807 else if (pCtx->ssHid.Attr.n.u1DefBig)
1808 pTmpRsp->DWords.dw0 += cbToAdd;
1809 else
1810 pTmpRsp->Words.w0 += cbToAdd;
1811}
1812
1813
1814/**
1815 * Subtracts from the temporary stack pointer.
1816 *
1817 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1818 * @param cbToSub The number of bytes to subtract.
1819 * @param pCtx Where to get the current stack mode.
1820 */
1821DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
1822{
1823 if (pCtx->ssHid.Attr.n.u1Long)
1824 pTmpRsp->u -= cbToSub;
1825 else if (pCtx->ssHid.Attr.n.u1DefBig)
1826 pTmpRsp->DWords.dw0 -= cbToSub;
1827 else
1828 pTmpRsp->Words.w0 -= cbToSub;
1829}
1830
1831
1832/**
1833 * Calculates the effective stack address for a push of the specified size as
1834 * well as the new RSP value (upper bits may be masked).
1835 *
1836 * @returns Effective stack addressf for the push.
1837 * @param pCtx Where to get the current stack mode.
1838 * @param cbItem The size of the stack item to pop.
1839 * @param puNewRsp Where to return the new RSP value.
1840 */
1841DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1842{
1843 RTUINT64U uTmpRsp;
1844 RTGCPTR GCPtrTop;
1845 uTmpRsp.u = pCtx->rsp;
1846
1847 if (pCtx->ssHid.Attr.n.u1Long)
1848 GCPtrTop = uTmpRsp.u -= cbItem;
1849 else if (pCtx->ssHid.Attr.n.u1DefBig)
1850 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
1851 else
1852 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
1853 *puNewRsp = uTmpRsp.u;
1854 return GCPtrTop;
1855}
1856
1857
1858/**
1859 * Gets the current stack pointer and calculates the value after a pop of the
1860 * specified size.
1861 *
1862 * @returns Current stack pointer.
1863 * @param pCtx Where to get the current stack mode.
1864 * @param cbItem The size of the stack item to pop.
1865 * @param puNewRsp Where to return the new RSP value.
1866 */
1867DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1868{
1869 RTUINT64U uTmpRsp;
1870 RTGCPTR GCPtrTop;
1871 uTmpRsp.u = pCtx->rsp;
1872
1873 if (pCtx->ssHid.Attr.n.u1Long)
1874 {
1875 GCPtrTop = uTmpRsp.u;
1876 uTmpRsp.u += cbItem;
1877 }
1878 else if (pCtx->ssHid.Attr.n.u1DefBig)
1879 {
1880 GCPtrTop = uTmpRsp.DWords.dw0;
1881 uTmpRsp.DWords.dw0 += cbItem;
1882 }
1883 else
1884 {
1885 GCPtrTop = uTmpRsp.Words.w0;
1886 uTmpRsp.Words.w0 += cbItem;
1887 }
1888 *puNewRsp = uTmpRsp.u;
1889 return GCPtrTop;
1890}
1891
1892
1893/**
1894 * Calculates the effective stack address for a push of the specified size as
1895 * well as the new temporary RSP value (upper bits may be masked).
1896 *
1897 * @returns Effective stack addressf for the push.
1898 * @param pTmpRsp The temporary stack pointer. This is updated.
1899 * @param cbItem The size of the stack item to pop.
1900 * @param puNewRsp Where to return the new RSP value.
1901 */
1902DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
1903{
1904 RTGCPTR GCPtrTop;
1905
1906 if (pCtx->ssHid.Attr.n.u1Long)
1907 GCPtrTop = pTmpRsp->u -= cbItem;
1908 else if (pCtx->ssHid.Attr.n.u1DefBig)
1909 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
1910 else
1911 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
1912 return GCPtrTop;
1913}
1914
1915
1916/**
1917 * Gets the effective stack address for a pop of the specified size and
1918 * calculates and updates the temporary RSP.
1919 *
1920 * @returns Current stack pointer.
1921 * @param pTmpRsp The temporary stack pointer. This is updated.
1922 * @param pCtx Where to get the current stack mode.
1923 * @param cbItem The size of the stack item to pop.
1924 */
1925DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
1926{
1927 RTGCPTR GCPtrTop;
1928 if (pCtx->ssHid.Attr.n.u1Long)
1929 {
1930 GCPtrTop = pTmpRsp->u;
1931 pTmpRsp->u += cbItem;
1932 }
1933 else if (pCtx->ssHid.Attr.n.u1DefBig)
1934 {
1935 GCPtrTop = pTmpRsp->DWords.dw0;
1936 pTmpRsp->DWords.dw0 += cbItem;
1937 }
1938 else
1939 {
1940 GCPtrTop = pTmpRsp->Words.w0;
1941 pTmpRsp->Words.w0 += cbItem;
1942 }
1943 return GCPtrTop;
1944}
1945
1946
1947/**
1948 * Checks if an AMD CPUID feature bit is set.
1949 *
1950 * @returns true / false.
1951 *
1952 * @param pIemCpu The IEM per CPU data.
1953 * @param fEdx The EDX bit to test, or 0 if ECX.
1954 * @param fEcx The ECX bit to test, or 0 if EDX.
1955 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX.
1956 */
1957static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
1958{
1959 uint32_t uEax, uEbx, uEcx, uEdx;
1960 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
1961 return (fEcx && (uEcx & fEcx))
1962 || (fEdx && (uEdx & fEdx));
1963}
1964
1965/** @} */
1966
1967
1968/** @name Memory access.
1969 *
1970 * @{
1971 */
1972
1973
1974/**
1975 * Checks if the given segment can be written to, raise the appropriate
1976 * exception if not.
1977 *
1978 * @returns VBox strict status code.
1979 *
1980 * @param pIemCpu The IEM per CPU data.
1981 * @param pHid Pointer to the hidden register.
1982 * @param iSegReg The register number.
1983 */
1984static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
1985{
1986 if (!pHid->Attr.n.u1Present)
1987 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
1988
1989 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
1990 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
1991 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
1992 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
1993
1994 /** @todo DPL/RPL/CPL? */
1995
1996 return VINF_SUCCESS;
1997}
1998
1999
2000/**
2001 * Checks if the given segment can be read from, raise the appropriate
2002 * exception if not.
2003 *
2004 * @returns VBox strict status code.
2005 *
2006 * @param pIemCpu The IEM per CPU data.
2007 * @param pHid Pointer to the hidden register.
2008 * @param iSegReg The register number.
2009 */
2010static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
2011{
2012 if (!pHid->Attr.n.u1Present)
2013 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
2014
2015 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
2016 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
2017 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
2018
2019 /** @todo DPL/RPL/CPL? */
2020
2021 return VINF_SUCCESS;
2022}
2023
2024
2025/**
2026 * Applies the segment limit, base and attributes.
2027 *
2028 * This may raise a \#GP or \#SS.
2029 *
2030 * @returns VBox strict status code.
2031 *
2032 * @param pIemCpu The IEM per CPU data.
2033 * @param fAccess The kind of access which is being performed.
2034 * @param iSegReg The index of the segment register to apply.
2035 * This is UINT8_MAX if none (for IDT, GDT, LDT,
2036 * TSS, ++).
2037 * @param pGCPtrMem Pointer to the guest memory address to apply
2038 * segmentation to. Input and output parameter.
2039 */
2040static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
2041 size_t cbMem, PRTGCPTR pGCPtrMem)
2042{
2043 if (iSegReg == UINT8_MAX)
2044 return VINF_SUCCESS;
2045
2046 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
2047 switch (pIemCpu->enmCpuMode)
2048 {
2049 case IEMMODE_16BIT:
2050 case IEMMODE_32BIT:
2051 {
2052 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
2053 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
2054
2055 Assert(pSel->Attr.n.u1Present);
2056 Assert(pSel->Attr.n.u1DescType);
2057 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
2058 {
2059 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
2060 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2061 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
2062
2063 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2064 {
2065 /** @todo CPL check. */
2066 }
2067
2068 /*
2069 * There are two kinds of data selectors, normal and expand down.
2070 */
2071 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
2072 {
2073 if ( GCPtrFirst32 > pSel->u32Limit
2074 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
2075 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
2076
2077 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
2078 }
2079 else
2080 {
2081 /** @todo implement expand down segments. */
2082 AssertFailed(/** @todo implement this */);
2083 return VERR_NOT_IMPLEMENTED;
2084 }
2085 }
2086 else
2087 {
2088
2089 /*
2090 * Code selector and usually be used to read thru, writing is
2091 * only permitted in real and V8086 mode.
2092 */
2093 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
2094 || ( (fAccess & IEM_ACCESS_TYPE_READ)
2095 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
2096 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
2097 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
2098
2099 if ( GCPtrFirst32 > pSel->u32Limit
2100 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
2101 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
2102
2103 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2104 {
2105 /** @todo CPL check. */
2106 }
2107
2108 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
2109 }
2110 return VINF_SUCCESS;
2111 }
2112
2113 case IEMMODE_64BIT:
2114 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
2115 *pGCPtrMem += pSel->u64Base;
2116 return VINF_SUCCESS;
2117
2118 default:
2119 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
2120 }
2121}
2122
2123
2124/**
2125 * Translates a virtual address to a physical physical address and checks if we
2126 * can access the page as specified.
2127 *
2128 * @param pIemCpu The IEM per CPU data.
2129 * @param GCPtrMem The virtual address.
2130 * @param fAccess The intended access.
2131 * @param pGCPhysMem Where to return the physical address.
2132 */
2133static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
2134 PRTGCPHYS pGCPhysMem)
2135{
2136 /** @todo Need a different PGM interface here. We're currently using
2137 * generic / REM interfaces. this won't cut it for R0 & RC. */
2138 RTGCPHYS GCPhys;
2139 uint64_t fFlags;
2140 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
2141 if (RT_FAILURE(rc))
2142 {
2143 /** @todo Check unassigned memory in unpaged mode. */
2144 *pGCPhysMem = NIL_RTGCPHYS;
2145 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
2146 }
2147
2148 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)
2149 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */
2150 && !(fFlags & X86_PTE_RW)
2151 && ( pIemCpu->uCpl != 0
2152 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) )
2153 || ( !(fFlags & X86_PTE_US) /* Kernel memory */
2154 && pIemCpu->uCpl == 3)
2155 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */
2156 && (fFlags & X86_PTE_PAE_NX)
2157 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
2158 )
2159 )
2160 {
2161 *pGCPhysMem = NIL_RTGCPHYS;
2162 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
2163 }
2164
2165 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
2166 *pGCPhysMem = GCPhys;
2167 return VINF_SUCCESS;
2168}
2169
2170
2171
2172/**
2173 * Maps a physical page.
2174 *
2175 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2176 * @param pIemCpu The IEM per CPU data.
2177 * @param GCPhysMem The physical address.
2178 * @param fAccess The intended access.
2179 * @param ppvMem Where to return the mapping address.
2180 */
2181static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
2182{
2183#ifdef IEM_VERIFICATION_MODE
2184 /* Force the alternative path so we can ignore writes. */
2185 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
2186 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2187#endif
2188
2189 /*
2190 * If we can map the page without trouble, do a block processing
2191 * until the end of the current page.
2192 */
2193 /** @todo need some better API. */
2194 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
2195 GCPhysMem,
2196 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2197 ppvMem);
2198}
2199
2200
2201/**
2202 * Looks up a memory mapping entry.
2203 *
2204 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
2205 * @param pIemCpu The IEM per CPU data.
2206 * @param pvMem The memory address.
2207 * @param fAccess The access to.
2208 */
2209DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2210{
2211 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
2212 if ( pIemCpu->aMemMappings[0].pv == pvMem
2213 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2214 return 0;
2215 if ( pIemCpu->aMemMappings[1].pv == pvMem
2216 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2217 return 1;
2218 if ( pIemCpu->aMemMappings[2].pv == pvMem
2219 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2220 return 2;
2221 return VERR_NOT_FOUND;
2222}
2223
2224
2225/**
2226 * Finds a free memmap entry when using iNextMapping doesn't work.
2227 *
2228 * @returns Memory mapping index, 1024 on failure.
2229 * @param pIemCpu The IEM per CPU data.
2230 */
2231static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
2232{
2233 /*
2234 * The easy case.
2235 */
2236 if (pIemCpu->cActiveMappings == 0)
2237 {
2238 pIemCpu->iNextMapping = 1;
2239 return 0;
2240 }
2241
2242 /* There should be enough mappings for all instructions. */
2243 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
2244
2245 AssertFailed(); /** @todo implement me. */
2246 return 1024;
2247
2248}
2249
2250
2251/**
2252 * Commits a bounce buffer that needs writing back and unmaps it.
2253 *
2254 * @returns Strict VBox status code.
2255 * @param pIemCpu The IEM per CPU data.
2256 * @param iMemMap The index of the buffer to commit.
2257 */
2258static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
2259{
2260 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
2261 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
2262
2263 /*
2264 * Do the writing.
2265 */
2266 int rc;
2267 if (!pIemCpu->aMemBbMappings[iMemMap].fUnassigned && IEM_VERIFICATION_ENABLED(pIemCpu))
2268 {
2269 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2270 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2271 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2272 if (!pIemCpu->fByPassHandlers)
2273 {
2274 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2275 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2276 pbBuf,
2277 cbFirst);
2278 if (cbSecond && rc == VINF_SUCCESS)
2279 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2280 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2281 pbBuf + cbFirst,
2282 cbSecond);
2283 }
2284 else
2285 {
2286 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2287 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2288 pbBuf,
2289 cbFirst);
2290 if (cbSecond && rc == VINF_SUCCESS)
2291 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2292 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2293 pbBuf + cbFirst,
2294 cbSecond);
2295 }
2296 }
2297 else
2298 rc = VINF_SUCCESS;
2299
2300#ifdef IEM_VERIFICATION_MODE
2301 /*
2302 * Record the write(s).
2303 */
2304 if (!pIemCpu->fNoRem)
2305 {
2306 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2307 if (pEvtRec)
2308 {
2309 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
2310 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
2311 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2312 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
2313 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2314 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2315 }
2316 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
2317 {
2318 pEvtRec = iemVerifyAllocRecord(pIemCpu);
2319 if (pEvtRec)
2320 {
2321 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
2322 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
2323 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2324 memcpy(pEvtRec->u.RamWrite.ab,
2325 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
2326 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
2327 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2328 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2329 }
2330 }
2331 }
2332#endif
2333
2334 /*
2335 * Free the mapping entry.
2336 */
2337 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2338 Assert(pIemCpu->cActiveMappings != 0);
2339 pIemCpu->cActiveMappings--;
2340 return rc;
2341}
2342
2343
2344/**
2345 * iemMemMap worker that deals with a request crossing pages.
2346 */
2347static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
2348 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
2349{
2350 /*
2351 * Do the address translations.
2352 */
2353 RTGCPHYS GCPhysFirst;
2354 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
2355 if (rcStrict != VINF_SUCCESS)
2356 return rcStrict;
2357
2358 RTGCPHYS GCPhysSecond;
2359 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
2360 if (rcStrict != VINF_SUCCESS)
2361 return rcStrict;
2362 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2363
2364 /*
2365 * Read in the current memory content if it's a read of execute access.
2366 */
2367 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2368 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
2369 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
2370
2371 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2372 {
2373 int rc;
2374 if (!pIemCpu->fByPassHandlers)
2375 {
2376 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
2377 if (rc != VINF_SUCCESS)
2378 return rc;
2379 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
2380 if (rc != VINF_SUCCESS)
2381 return rc;
2382 }
2383 else
2384 {
2385 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
2386 if (rc != VINF_SUCCESS)
2387 return rc;
2388 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
2389 if (rc != VINF_SUCCESS)
2390 return rc;
2391 }
2392
2393#ifdef IEM_VERIFICATION_MODE
2394 if (!pIemCpu->fNoRem)
2395 {
2396 /*
2397 * Record the reads.
2398 */
2399 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2400 if (pEvtRec)
2401 {
2402 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2403 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
2404 pEvtRec->u.RamRead.cb = cbFirstPage;
2405 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2406 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2407 }
2408 pEvtRec = iemVerifyAllocRecord(pIemCpu);
2409 if (pEvtRec)
2410 {
2411 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2412 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
2413 pEvtRec->u.RamRead.cb = cbSecondPage;
2414 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2415 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2416 }
2417 }
2418#endif
2419 }
2420#ifdef VBOX_STRICT
2421 else
2422 memset(pbBuf, 0xcc, cbMem);
2423#endif
2424#ifdef VBOX_STRICT
2425 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2426 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2427#endif
2428
2429 /*
2430 * Commit the bounce buffer entry.
2431 */
2432 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2433 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
2434 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
2435 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
2436 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
2437 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2438 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2439 pIemCpu->cActiveMappings++;
2440
2441 *ppvMem = pbBuf;
2442 return VINF_SUCCESS;
2443}
2444
2445
2446/**
2447 * iemMemMap woker that deals with iemMemPageMap failures.
2448 */
2449static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
2450 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
2451{
2452 /*
2453 * Filter out conditions we can handle and the ones which shouldn't happen.
2454 */
2455 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
2456 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
2457 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
2458 {
2459 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
2460 return rcMap;
2461 }
2462 pIemCpu->cPotentialExits++;
2463
2464 /*
2465 * Read in the current memory content if it's a read of execute access.
2466 */
2467 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2468 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2469 {
2470 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
2471 memset(pbBuf, 0xff, cbMem);
2472 else
2473 {
2474 int rc;
2475 if (!pIemCpu->fByPassHandlers)
2476 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
2477 else
2478 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
2479 if (rc != VINF_SUCCESS)
2480 return rc;
2481 }
2482
2483#ifdef IEM_VERIFICATION_MODE
2484 if (!pIemCpu->fNoRem)
2485 {
2486 /*
2487 * Record the read.
2488 */
2489 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2490 if (pEvtRec)
2491 {
2492 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2493 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
2494 pEvtRec->u.RamRead.cb = cbMem;
2495 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2496 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2497 }
2498 }
2499#endif
2500 }
2501#ifdef VBOX_STRICT
2502 else
2503 memset(pbBuf, 0xcc, cbMem);
2504#endif
2505#ifdef VBOX_STRICT
2506 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2507 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2508#endif
2509
2510 /*
2511 * Commit the bounce buffer entry.
2512 */
2513 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2514 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
2515 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
2516 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
2517 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
2518 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2519 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2520 pIemCpu->cActiveMappings++;
2521
2522 *ppvMem = pbBuf;
2523 return VINF_SUCCESS;
2524}
2525
2526
2527
2528/**
2529 * Maps the specified guest memory for the given kind of access.
2530 *
2531 * This may be using bounce buffering of the memory if it's crossing a page
2532 * boundary or if there is an access handler installed for any of it. Because
2533 * of lock prefix guarantees, we're in for some extra clutter when this
2534 * happens.
2535 *
2536 * This may raise a \#GP, \#SS, \#PF or \#AC.
2537 *
2538 * @returns VBox strict status code.
2539 *
2540 * @param pIemCpu The IEM per CPU data.
2541 * @param ppvMem Where to return the pointer to the mapped
2542 * memory.
2543 * @param cbMem The number of bytes to map. This is usually 1,
2544 * 2, 4, 6, 8, 12, 16 or 32. When used by string
2545 * operations it can be up to a page.
2546 * @param iSegReg The index of the segment register to use for
2547 * this access. The base and limits are checked.
2548 * Use UINT8_MAX to indicate that no segmentation
2549 * is required (for IDT, GDT and LDT accesses).
2550 * @param GCPtrMem The address of the guest memory.
2551 * @param a_fAccess How the memory is being accessed. The
2552 * IEM_ACCESS_TYPE_XXX bit is used to figure out
2553 * how to map the memory, while the
2554 * IEM_ACCESS_WHAT_XXX bit is used when raising
2555 * exceptions.
2556 */
2557static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
2558{
2559 /*
2560 * Check the input and figure out which mapping entry to use.
2561 */
2562 Assert(cbMem <= 32);
2563 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
2564
2565 unsigned iMemMap = pIemCpu->iNextMapping;
2566 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
2567 {
2568 iMemMap = iemMemMapFindFree(pIemCpu);
2569 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
2570 }
2571
2572 /*
2573 * Map the memory, checking that we can actually access it. If something
2574 * slightly complicated happens, fall back on bounce buffering.
2575 */
2576 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
2577 if (rcStrict != VINF_SUCCESS)
2578 return rcStrict;
2579
2580 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
2581 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
2582
2583 RTGCPHYS GCPhysFirst;
2584 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
2585 if (rcStrict != VINF_SUCCESS)
2586 return rcStrict;
2587
2588 void *pvMem;
2589 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
2590 if (rcStrict != VINF_SUCCESS)
2591 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
2592
2593 /*
2594 * Fill in the mapping table entry.
2595 */
2596 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
2597 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
2598 pIemCpu->iNextMapping = iMemMap + 1;
2599 pIemCpu->cActiveMappings++;
2600
2601 *ppvMem = pvMem;
2602 return VINF_SUCCESS;
2603}
2604
2605
2606/**
2607 * Commits the guest memory if bounce buffered and unmaps it.
2608 *
2609 * @returns Strict VBox status code.
2610 * @param pIemCpu The IEM per CPU data.
2611 * @param pvMem The mapping.
2612 * @param fAccess The kind of access.
2613 */
2614static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2615{
2616 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
2617 AssertReturn(iMemMap >= 0, iMemMap);
2618
2619 /*
2620 * If it's bounce buffered, we need to write back the buffer.
2621 */
2622 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2623 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2624 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
2625
2626 /* Free the entry. */
2627 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2628 Assert(pIemCpu->cActiveMappings != 0);
2629 pIemCpu->cActiveMappings--;
2630 return VINF_SUCCESS;
2631}
2632
2633
2634/**
2635 * Fetches a data byte.
2636 *
2637 * @returns Strict VBox status code.
2638 * @param pIemCpu The IEM per CPU data.
2639 * @param pu8Dst Where to return the byte.
2640 * @param iSegReg The index of the segment register to use for
2641 * this access. The base and limits are checked.
2642 * @param GCPtrMem The address of the guest memory.
2643 */
2644static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2645{
2646 /* The lazy approach for now... */
2647 uint8_t const *pu8Src;
2648 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2649 if (rc == VINF_SUCCESS)
2650 {
2651 *pu8Dst = *pu8Src;
2652 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2653 }
2654 return rc;
2655}
2656
2657
2658/**
2659 * Fetches a data word.
2660 *
2661 * @returns Strict VBox status code.
2662 * @param pIemCpu The IEM per CPU data.
2663 * @param pu16Dst Where to return the word.
2664 * @param iSegReg The index of the segment register to use for
2665 * this access. The base and limits are checked.
2666 * @param GCPtrMem The address of the guest memory.
2667 */
2668static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2669{
2670 /* The lazy approach for now... */
2671 uint16_t const *pu16Src;
2672 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2673 if (rc == VINF_SUCCESS)
2674 {
2675 *pu16Dst = *pu16Src;
2676 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
2677 }
2678 return rc;
2679}
2680
2681
2682/**
2683 * Fetches a data dword.
2684 *
2685 * @returns Strict VBox status code.
2686 * @param pIemCpu The IEM per CPU data.
2687 * @param pu32Dst Where to return the dword.
2688 * @param iSegReg The index of the segment register to use for
2689 * this access. The base and limits are checked.
2690 * @param GCPtrMem The address of the guest memory.
2691 */
2692static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2693{
2694 /* The lazy approach for now... */
2695 uint32_t const *pu32Src;
2696 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2697 if (rc == VINF_SUCCESS)
2698 {
2699 *pu32Dst = *pu32Src;
2700 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
2701 }
2702 return rc;
2703}
2704
2705
2706/**
2707 * Fetches a data dword and sign extends it to a qword.
2708 *
2709 * @returns Strict VBox status code.
2710 * @param pIemCpu The IEM per CPU data.
2711 * @param pu64Dst Where to return the sign extended value.
2712 * @param iSegReg The index of the segment register to use for
2713 * this access. The base and limits are checked.
2714 * @param GCPtrMem The address of the guest memory.
2715 */
2716static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2717{
2718 /* The lazy approach for now... */
2719 int32_t const *pi32Src;
2720 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2721 if (rc == VINF_SUCCESS)
2722 {
2723 *pu64Dst = *pi32Src;
2724 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
2725 }
2726#ifdef __GNUC__ /* warning: GCC may be a royal pain */
2727 else
2728 *pu64Dst = 0;
2729#endif
2730 return rc;
2731}
2732
2733
2734/**
2735 * Fetches a data qword.
2736 *
2737 * @returns Strict VBox status code.
2738 * @param pIemCpu The IEM per CPU data.
2739 * @param pu64Dst Where to return the qword.
2740 * @param iSegReg The index of the segment register to use for
2741 * this access. The base and limits are checked.
2742 * @param GCPtrMem The address of the guest memory.
2743 */
2744static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2745{
2746 /* The lazy approach for now... */
2747 uint64_t const *pu64Src;
2748 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2749 if (rc == VINF_SUCCESS)
2750 {
2751 *pu64Dst = *pu64Src;
2752 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
2753 }
2754 return rc;
2755}
2756
2757
2758/**
2759 * Fetches a descriptor register (lgdt, lidt).
2760 *
2761 * @returns Strict VBox status code.
2762 * @param pIemCpu The IEM per CPU data.
2763 * @param pcbLimit Where to return the limit.
2764 * @param pGCPTrBase Where to return the base.
2765 * @param iSegReg The index of the segment register to use for
2766 * this access. The base and limits are checked.
2767 * @param GCPtrMem The address of the guest memory.
2768 * @param enmOpSize The effective operand size.
2769 */
2770static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
2771 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
2772{
2773 uint8_t const *pu8Src;
2774 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
2775 (void **)&pu8Src,
2776 enmOpSize == IEMMODE_64BIT
2777 ? 2 + 8
2778 : enmOpSize == IEMMODE_32BIT
2779 ? 2 + 4
2780 : 2 + 3,
2781 iSegReg,
2782 GCPtrMem,
2783 IEM_ACCESS_DATA_R);
2784 if (rcStrict == VINF_SUCCESS)
2785 {
2786 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
2787 switch (enmOpSize)
2788 {
2789 case IEMMODE_16BIT:
2790 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
2791 break;
2792 case IEMMODE_32BIT:
2793 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
2794 break;
2795 case IEMMODE_64BIT:
2796 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
2797 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
2798 break;
2799
2800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2801 }
2802 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2803 }
2804 return rcStrict;
2805}
2806
2807
2808
2809/**
2810 * Stores a data byte.
2811 *
2812 * @returns Strict VBox status code.
2813 * @param pIemCpu The IEM per CPU data.
2814 * @param iSegReg The index of the segment register to use for
2815 * this access. The base and limits are checked.
2816 * @param GCPtrMem The address of the guest memory.
2817 * @param u8Value The value to store.
2818 */
2819static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
2820{
2821 /* The lazy approach for now... */
2822 uint8_t *pu8Dst;
2823 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2824 if (rc == VINF_SUCCESS)
2825 {
2826 *pu8Dst = u8Value;
2827 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
2828 }
2829 return rc;
2830}
2831
2832
2833/**
2834 * Stores a data word.
2835 *
2836 * @returns Strict VBox status code.
2837 * @param pIemCpu The IEM per CPU data.
2838 * @param iSegReg The index of the segment register to use for
2839 * this access. The base and limits are checked.
2840 * @param GCPtrMem The address of the guest memory.
2841 * @param u16Value The value to store.
2842 */
2843static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
2844{
2845 /* The lazy approach for now... */
2846 uint16_t *pu16Dst;
2847 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2848 if (rc == VINF_SUCCESS)
2849 {
2850 *pu16Dst = u16Value;
2851 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
2852 }
2853 return rc;
2854}
2855
2856
2857/**
2858 * Stores a data dword.
2859 *
2860 * @returns Strict VBox status code.
2861 * @param pIemCpu The IEM per CPU data.
2862 * @param iSegReg The index of the segment register to use for
2863 * this access. The base and limits are checked.
2864 * @param GCPtrMem The address of the guest memory.
2865 * @param u32Value The value to store.
2866 */
2867static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
2868{
2869 /* The lazy approach for now... */
2870 uint32_t *pu32Dst;
2871 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2872 if (rc == VINF_SUCCESS)
2873 {
2874 *pu32Dst = u32Value;
2875 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
2876 }
2877 return rc;
2878}
2879
2880
2881/**
2882 * Stores a data qword.
2883 *
2884 * @returns Strict VBox status code.
2885 * @param pIemCpu The IEM per CPU data.
2886 * @param iSegReg The index of the segment register to use for
2887 * this access. The base and limits are checked.
2888 * @param GCPtrMem The address of the guest memory.
2889 * @param u64Value The value to store.
2890 */
2891static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
2892{
2893 /* The lazy approach for now... */
2894 uint64_t *pu64Dst;
2895 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2896 if (rc == VINF_SUCCESS)
2897 {
2898 *pu64Dst = u64Value;
2899 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
2900 }
2901 return rc;
2902}
2903
2904
2905/**
2906 * Pushes a word onto the stack.
2907 *
2908 * @returns Strict VBox status code.
2909 * @param pIemCpu The IEM per CPU data.
2910 * @param u16Value The value to push.
2911 */
2912static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
2913{
2914 /* Increment the stack pointer. */
2915 uint64_t uNewRsp;
2916 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2917 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
2918
2919 /* Write the word the lazy way. */
2920 uint16_t *pu16Dst;
2921 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2922 if (rc == VINF_SUCCESS)
2923 {
2924 *pu16Dst = u16Value;
2925 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
2926 }
2927
2928 /* Commit the new RSP value unless we an access handler made trouble. */
2929 if (rc == VINF_SUCCESS)
2930 pCtx->rsp = uNewRsp;
2931
2932 return rc;
2933}
2934
2935
2936/**
2937 * Pushes a dword onto the stack.
2938 *
2939 * @returns Strict VBox status code.
2940 * @param pIemCpu The IEM per CPU data.
2941 * @param u32Value The value to push.
2942 */
2943static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
2944{
2945 /* Increment the stack pointer. */
2946 uint64_t uNewRsp;
2947 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2948 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
2949
2950 /* Write the word the lazy way. */
2951 uint32_t *pu32Dst;
2952 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2953 if (rc == VINF_SUCCESS)
2954 {
2955 *pu32Dst = u32Value;
2956 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
2957 }
2958
2959 /* Commit the new RSP value unless we an access handler made trouble. */
2960 if (rc == VINF_SUCCESS)
2961 pCtx->rsp = uNewRsp;
2962
2963 return rc;
2964}
2965
2966
2967/**
2968 * Pushes a qword onto the stack.
2969 *
2970 * @returns Strict VBox status code.
2971 * @param pIemCpu The IEM per CPU data.
2972 * @param u64Value The value to push.
2973 */
2974static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
2975{
2976 /* Increment the stack pointer. */
2977 uint64_t uNewRsp;
2978 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2979 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
2980
2981 /* Write the word the lazy way. */
2982 uint64_t *pu64Dst;
2983 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2984 if (rc == VINF_SUCCESS)
2985 {
2986 *pu64Dst = u64Value;
2987 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
2988 }
2989
2990 /* Commit the new RSP value unless we an access handler made trouble. */
2991 if (rc == VINF_SUCCESS)
2992 pCtx->rsp = uNewRsp;
2993
2994 return rc;
2995}
2996
2997
2998/**
2999 * Pops a word from the stack.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pIemCpu The IEM per CPU data.
3003 * @param pu16Value Where to store the popped value.
3004 */
3005static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
3006{
3007 /* Increment the stack pointer. */
3008 uint64_t uNewRsp;
3009 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3010 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
3011
3012 /* Write the word the lazy way. */
3013 uint16_t const *pu16Src;
3014 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3015 if (rc == VINF_SUCCESS)
3016 {
3017 *pu16Value = *pu16Src;
3018 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3019
3020 /* Commit the new RSP value. */
3021 if (rc == VINF_SUCCESS)
3022 pCtx->rsp = uNewRsp;
3023 }
3024
3025 return rc;
3026}
3027
3028
3029/**
3030 * Pops a dword from the stack.
3031 *
3032 * @returns Strict VBox status code.
3033 * @param pIemCpu The IEM per CPU data.
3034 * @param pu32Value Where to store the popped value.
3035 */
3036static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
3037{
3038 /* Increment the stack pointer. */
3039 uint64_t uNewRsp;
3040 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3041 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
3042
3043 /* Write the word the lazy way. */
3044 uint32_t const *pu32Src;
3045 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3046 if (rc == VINF_SUCCESS)
3047 {
3048 *pu32Value = *pu32Src;
3049 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
3050
3051 /* Commit the new RSP value. */
3052 if (rc == VINF_SUCCESS)
3053 pCtx->rsp = uNewRsp;
3054 }
3055
3056 return rc;
3057}
3058
3059
3060/**
3061 * Pops a qword from the stack.
3062 *
3063 * @returns Strict VBox status code.
3064 * @param pIemCpu The IEM per CPU data.
3065 * @param pu64Value Where to store the popped value.
3066 */
3067static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
3068{
3069 /* Increment the stack pointer. */
3070 uint64_t uNewRsp;
3071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3072 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
3073
3074 /* Write the word the lazy way. */
3075 uint64_t const *pu64Src;
3076 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3077 if (rc == VINF_SUCCESS)
3078 {
3079 *pu64Value = *pu64Src;
3080 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
3081
3082 /* Commit the new RSP value. */
3083 if (rc == VINF_SUCCESS)
3084 pCtx->rsp = uNewRsp;
3085 }
3086
3087 return rc;
3088}
3089
3090
3091/**
3092 * Pushes a word onto the stack, using a temporary stack pointer.
3093 *
3094 * @returns Strict VBox status code.
3095 * @param pIemCpu The IEM per CPU data.
3096 * @param u16Value The value to push.
3097 * @param pTmpRsp Pointer to the temporary stack pointer.
3098 */
3099static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
3100{
3101 /* Increment the stack pointer. */
3102 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3103 RTUINT64U NewRsp = *pTmpRsp;
3104 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
3105
3106 /* Write the word the lazy way. */
3107 uint16_t *pu16Dst;
3108 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3109 if (rc == VINF_SUCCESS)
3110 {
3111 *pu16Dst = u16Value;
3112 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
3113 }
3114
3115 /* Commit the new RSP value unless we an access handler made trouble. */
3116 if (rc == VINF_SUCCESS)
3117 *pTmpRsp = NewRsp;
3118
3119 return rc;
3120}
3121
3122
3123/**
3124 * Pushes a dword onto the stack, using a temporary stack pointer.
3125 *
3126 * @returns Strict VBox status code.
3127 * @param pIemCpu The IEM per CPU data.
3128 * @param u32Value The value to push.
3129 * @param pTmpRsp Pointer to the temporary stack pointer.
3130 */
3131static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
3132{
3133 /* Increment the stack pointer. */
3134 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3135 RTUINT64U NewRsp = *pTmpRsp;
3136 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
3137
3138 /* Write the word the lazy way. */
3139 uint32_t *pu32Dst;
3140 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3141 if (rc == VINF_SUCCESS)
3142 {
3143 *pu32Dst = u32Value;
3144 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
3145 }
3146
3147 /* Commit the new RSP value unless we an access handler made trouble. */
3148 if (rc == VINF_SUCCESS)
3149 *pTmpRsp = NewRsp;
3150
3151 return rc;
3152}
3153
3154
3155/**
3156 * Pushes a dword onto the stack, using a temporary stack pointer.
3157 *
3158 * @returns Strict VBox status code.
3159 * @param pIemCpu The IEM per CPU data.
3160 * @param u64Value The value to push.
3161 * @param pTmpRsp Pointer to the temporary stack pointer.
3162 */
3163static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
3164{
3165 /* Increment the stack pointer. */
3166 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3167 RTUINT64U NewRsp = *pTmpRsp;
3168 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
3169
3170 /* Write the word the lazy way. */
3171 uint64_t *pu64Dst;
3172 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3173 if (rc == VINF_SUCCESS)
3174 {
3175 *pu64Dst = u64Value;
3176 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
3177 }
3178
3179 /* Commit the new RSP value unless we an access handler made trouble. */
3180 if (rc == VINF_SUCCESS)
3181 *pTmpRsp = NewRsp;
3182
3183 return rc;
3184}
3185
3186
3187/**
3188 * Pops a word from the stack, using a temporary stack pointer.
3189 *
3190 * @returns Strict VBox status code.
3191 * @param pIemCpu The IEM per CPU data.
3192 * @param pu16Value Where to store the popped value.
3193 * @param pTmpRsp Pointer to the temporary stack pointer.
3194 */
3195static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
3196{
3197 /* Increment the stack pointer. */
3198 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3199 RTUINT64U NewRsp = *pTmpRsp;
3200 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
3201
3202 /* Write the word the lazy way. */
3203 uint16_t const *pu16Src;
3204 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3205 if (rc == VINF_SUCCESS)
3206 {
3207 *pu16Value = *pu16Src;
3208 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3209
3210 /* Commit the new RSP value. */
3211 if (rc == VINF_SUCCESS)
3212 *pTmpRsp = NewRsp;
3213 }
3214
3215 return rc;
3216}
3217
3218
3219/**
3220 * Pops a dword from the stack, using a temporary stack pointer.
3221 *
3222 * @returns Strict VBox status code.
3223 * @param pIemCpu The IEM per CPU data.
3224 * @param pu32Value Where to store the popped value.
3225 * @param pTmpRsp Pointer to the temporary stack pointer.
3226 */
3227static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
3228{
3229 /* Increment the stack pointer. */
3230 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3231 RTUINT64U NewRsp = *pTmpRsp;
3232 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
3233
3234 /* Write the word the lazy way. */
3235 uint32_t const *pu32Src;
3236 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3237 if (rc == VINF_SUCCESS)
3238 {
3239 *pu32Value = *pu32Src;
3240 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
3241
3242 /* Commit the new RSP value. */
3243 if (rc == VINF_SUCCESS)
3244 *pTmpRsp = NewRsp;
3245 }
3246
3247 return rc;
3248}
3249
3250
3251/**
3252 * Pops a qword from the stack, using a temporary stack pointer.
3253 *
3254 * @returns Strict VBox status code.
3255 * @param pIemCpu The IEM per CPU data.
3256 * @param pu64Value Where to store the popped value.
3257 * @param pTmpRsp Pointer to the temporary stack pointer.
3258 */
3259static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
3260{
3261 /* Increment the stack pointer. */
3262 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3263 RTUINT64U NewRsp = *pTmpRsp;
3264 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
3265
3266 /* Write the word the lazy way. */
3267 uint64_t const *pu64Src;
3268 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3269 if (rcStrict == VINF_SUCCESS)
3270 {
3271 *pu64Value = *pu64Src;
3272 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
3273
3274 /* Commit the new RSP value. */
3275 if (rcStrict == VINF_SUCCESS)
3276 *pTmpRsp = NewRsp;
3277 }
3278
3279 return rcStrict;
3280}
3281
3282
3283/**
3284 * Begin a special stack push (used by interrupt, exceptions and such).
3285 *
3286 * This will raise #SS or #PF if appropriate.
3287 *
3288 * @returns Strict VBox status code.
3289 * @param pIemCpu The IEM per CPU data.
3290 * @param cbMem The number of bytes to push onto the stack.
3291 * @param ppvMem Where to return the pointer to the stack memory.
3292 * As with the other memory functions this could be
3293 * direct access or bounce buffered access, so
3294 * don't commit register until the commit call
3295 * succeeds.
3296 * @param puNewRsp Where to return the new RSP value. This must be
3297 * passed unchanged to
3298 * iemMemStackPushCommitSpecial().
3299 */
3300static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
3301{
3302 Assert(cbMem < UINT8_MAX);
3303 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3304 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
3305 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3306}
3307
3308
3309/**
3310 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
3311 *
3312 * This will update the rSP.
3313 *
3314 * @returns Strict VBox status code.
3315 * @param pIemCpu The IEM per CPU data.
3316 * @param pvMem The pointer returned by
3317 * iemMemStackPushBeginSpecial().
3318 * @param uNewRsp The new RSP value returned by
3319 * iemMemStackPushBeginSpecial().
3320 */
3321static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
3322{
3323 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
3324 if (rcStrict == VINF_SUCCESS)
3325 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3326 return rcStrict;
3327}
3328
3329
3330/**
3331 * Begin a special stack pop (used by iret, retf and such).
3332 *
3333 * This will raise #SS or #PF if appropriate.
3334 *
3335 * @returns Strict VBox status code.
3336 * @param pIemCpu The IEM per CPU data.
3337 * @param cbMem The number of bytes to push onto the stack.
3338 * @param ppvMem Where to return the pointer to the stack memory.
3339 * @param puNewRsp Where to return the new RSP value. This must be
3340 * passed unchanged to
3341 * iemMemStackPopCommitSpecial().
3342 */
3343static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
3344{
3345 Assert(cbMem < UINT8_MAX);
3346 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3347 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
3348 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3349}
3350
3351
3352/**
3353 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
3354 *
3355 * This will update the rSP.
3356 *
3357 * @returns Strict VBox status code.
3358 * @param pIemCpu The IEM per CPU data.
3359 * @param pvMem The pointer returned by
3360 * iemMemStackPopBeginSpecial().
3361 * @param uNewRsp The new RSP value returned by
3362 * iemMemStackPopBeginSpecial().
3363 */
3364static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
3365{
3366 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
3367 if (rcStrict == VINF_SUCCESS)
3368 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3369 return rcStrict;
3370}
3371
3372
3373/**
3374 * Fetches a descriptor table entry.
3375 *
3376 * @returns Strict VBox status code.
3377 * @param pIemCpu The IEM per CPU.
3378 * @param pDesc Where to return the descriptor table entry.
3379 * @param uSel The selector which table entry to fetch.
3380 */
3381static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
3382{
3383 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3384
3385 /** @todo did the 286 require all 8 bytes to be accessible? */
3386 /*
3387 * Get the selector table base and check bounds.
3388 */
3389 RTGCPTR GCPtrBase;
3390 if (uSel & X86_SEL_LDT)
3391 {
3392 if ( !pCtx->ldtrHid.Attr.n.u1Present
3393 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
3394 {
3395 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
3396 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
3397 /** @todo is this the right exception? */
3398 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3399 }
3400
3401 Assert(pCtx->ldtrHid.Attr.n.u1Present);
3402 GCPtrBase = pCtx->ldtrHid.u64Base;
3403 }
3404 else
3405 {
3406 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
3407 {
3408 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
3409 /** @todo is this the right exception? */
3410 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3411 }
3412 GCPtrBase = pCtx->gdtr.pGdt;
3413 }
3414
3415 /*
3416 * Read the legacy descriptor and maybe the long mode extensions if
3417 * required.
3418 */
3419 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3420 if (rcStrict == VINF_SUCCESS)
3421 {
3422 if ( !IEM_IS_LONG_MODE(pIemCpu)
3423 || pDesc->Legacy.Gen.u1DescType)
3424 pDesc->Long.au64[1] = 0;
3425 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
3426 rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3427 else
3428 {
3429 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
3430 /** @todo is this the right exception? */
3431 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3432 }
3433 }
3434 return rcStrict;
3435}
3436
3437
3438/**
3439 * Marks the selector descriptor as accessed (only non-system descriptors).
3440 *
3441 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
3442 * will therefore skip the limit checks.
3443 *
3444 * @returns Strict VBox status code.
3445 * @param pIemCpu The IEM per CPU.
3446 * @param uSel The selector.
3447 */
3448static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
3449{
3450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3451
3452 /*
3453 * Get the selector table base and check bounds.
3454 */
3455 RTGCPTR GCPtr = uSel & X86_SEL_LDT
3456 ? pCtx->ldtrHid.u64Base
3457 : pCtx->gdtr.pGdt;
3458 GCPtr += uSel & X86_SEL_MASK;
3459 GCPtr += 2 + 2;
3460 uint32_t volatile *pu32; /** @todo Does the CPU do a 32-bit or 8-bit access here? */
3461 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
3462 if (rcStrict == VINF_SUCCESS)
3463 {
3464 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but is preceeded by u8BaseHigh1. */
3465
3466 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW);
3467 }
3468
3469 return rcStrict;
3470}
3471
3472/** @} */
3473
3474
3475/*
3476 * Include the C/C++ implementation of instruction.
3477 */
3478#include "IEMAllCImpl.cpp.h"
3479
3480
3481
3482/** @name "Microcode" macros.
3483 *
3484 * The idea is that we should be able to use the same code to interpret
3485 * instructions as well as recompiler instructions. Thus this obfuscation.
3486 *
3487 * @{
3488 */
3489#define IEM_MC_BEGIN(cArgs, cLocals) {
3490#define IEM_MC_END() }
3491#define IEM_MC_PAUSE() do {} while (0)
3492#define IEM_MC_CONTINUE() do {} while (0)
3493
3494/** Internal macro. */
3495#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
3496 do \
3497 { \
3498 VBOXSTRICTRC rcStrict2 = a_Expr; \
3499 if (rcStrict2 != VINF_SUCCESS) \
3500 return rcStrict2; \
3501 } while (0)
3502
3503#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
3504#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
3505#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
3506#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
3507#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
3508#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
3509#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
3510
3511#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
3512
3513#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
3514#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
3515#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
3516#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
3517#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
3518#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
3519 uint32_t a_Name; \
3520 uint32_t *a_pName = &a_Name
3521#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
3522 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
3523
3524#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
3525
3526#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3527#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3528#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3529#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3530#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
3531#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
3532#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
3533#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
3534#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
3535#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
3536#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
3537#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
3538#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
3539#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
3540#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
3541#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
3542#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
3543
3544#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
3545#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
3546#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
3547#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
3548#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
3549
3550#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
3551#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
3552/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
3553 * commit. */
3554#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
3555#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
3556#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
3557
3558#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u16Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
3559#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
3560#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
3561 do { \
3562 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
3563 *pu32Reg += (a_u32Value); \
3564 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
3565 } while (0)
3566#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
3567
3568#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
3569#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
3570#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
3571 do { \
3572 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
3573 *pu32Reg -= (a_u32Value); \
3574 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
3575 } while (0)
3576#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
3577
3578#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u16Value, a_iGReg) (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg))
3579#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg))
3580#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg))
3581#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg))
3582
3583
3584#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
3585#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
3586
3587
3588
3589#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
3590 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
3591#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
3592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
3593#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
3595#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
3597#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3598 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
3599
3600#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
3601 do { \
3602 uint8_t u8Tmp; \
3603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3604 (a_u16Dst) = u8Tmp; \
3605 } while (0)
3606#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3607 do { \
3608 uint8_t u8Tmp; \
3609 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3610 (a_u32Dst) = u8Tmp; \
3611 } while (0)
3612#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3613 do { \
3614 uint8_t u8Tmp; \
3615 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3616 (a_u64Dst) = u8Tmp; \
3617 } while (0)
3618#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3619 do { \
3620 uint16_t u16Tmp; \
3621 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
3622 (a_u32Dst) = u16Tmp; \
3623 } while (0)
3624#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3625 do { \
3626 uint16_t u16Tmp; \
3627 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
3628 (a_u64Dst) = u16Tmp; \
3629 } while (0)
3630#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3631 do { \
3632 uint32_t u32Tmp; \
3633 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
3634 (a_u64Dst) = u32Tmp; \
3635 } while (0)
3636
3637#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
3638 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
3639#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
3640 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
3641#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
3642 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
3643#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
3644 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
3645
3646#define IEM_MC_PUSH_U16(a_u16Value) \
3647 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
3648#define IEM_MC_PUSH_U32(a_u32Value) \
3649 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
3650#define IEM_MC_PUSH_U64(a_u64Value) \
3651 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
3652
3653#define IEM_MC_POP_U16(a_pu16Value) \
3654 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
3655#define IEM_MC_POP_U32(a_pu32Value) \
3656 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
3657#define IEM_MC_POP_U64(a_pu64Value) \
3658 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
3659
3660/** Maps guest memory for direct or bounce buffered access.
3661 * The purpose is to pass it to an operand implementation, thus the a_iArg.
3662 * @remarks May return.
3663 */
3664#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
3665 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
3666
3667/** Maps guest memory for direct or bounce buffered access.
3668 * The purpose is to pass it to an operand implementation, thus the a_iArg.
3669 * @remarks May return.
3670 */
3671#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
3672 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
3673
3674/** Commits the memory and unmaps the guest memory.
3675 * @remarks May return.
3676 */
3677#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
3678 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
3679
3680/** Calculate efficient address from R/M. */
3681#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
3682 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
3683
3684#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
3685#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
3686#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
3687
3688/**
3689 * Defers the rest of the instruction emulation to a C implementation routine
3690 * and returns, only taking the standard parameters.
3691 *
3692 * @param a_pfnCImpl The pointer to the C routine.
3693 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
3694 */
3695#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
3696
3697/**
3698 * Defers the rest of instruction emulation to a C implementation routine and
3699 * returns, taking one argument in addition to the standard ones.
3700 *
3701 * @param a_pfnCImpl The pointer to the C routine.
3702 * @param a0 The argument.
3703 */
3704#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
3705
3706/**
3707 * Defers the rest of the instruction emulation to a C implementation routine
3708 * and returns, taking two arguments in addition to the standard ones.
3709 *
3710 * @param a_pfnCImpl The pointer to the C routine.
3711 * @param a0 The first extra argument.
3712 * @param a1 The second extra argument.
3713 */
3714#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
3715
3716/**
3717 * Defers the rest of the instruction emulation to a C implementation routine
3718 * and returns, taking two arguments in addition to the standard ones.
3719 *
3720 * @param a_pfnCImpl The pointer to the C routine.
3721 * @param a0 The first extra argument.
3722 * @param a1 The second extra argument.
3723 * @param a2 The third extra argument.
3724 */
3725#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
3726
3727/**
3728 * Defers the rest of the instruction emulation to a C implementation routine
3729 * and returns, taking two arguments in addition to the standard ones.
3730 *
3731 * @param a_pfnCImpl The pointer to the C routine.
3732 * @param a0 The first extra argument.
3733 * @param a1 The second extra argument.
3734 * @param a2 The third extra argument.
3735 * @param a3 The fourth extra argument.
3736 * @param a4 The fifth extra argument.
3737 */
3738#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
3739
3740/**
3741 * Defers the entire instruction emulation to a C implementation routine and
3742 * returns, only taking the standard parameters.
3743 *
3744 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
3745 *
3746 * @param a_pfnCImpl The pointer to the C routine.
3747 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
3748 */
3749#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
3750
3751/**
3752 * Defers the entire instruction emulation to a C implementation routine and
3753 * returns, taking one argument in addition to the standard ones.
3754 *
3755 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
3756 *
3757 * @param a_pfnCImpl The pointer to the C routine.
3758 * @param a0 The argument.
3759 */
3760#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
3761
3762/**
3763 * Defers the entire instruction emulation to a C implementation routine and
3764 * returns, taking two arguments in addition to the standard ones.
3765 *
3766 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
3767 *
3768 * @param a_pfnCImpl The pointer to the C routine.
3769 * @param a0 The first extra argument.
3770 * @param a1 The second extra argument.
3771 */
3772#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
3773
3774/**
3775 * Defers the entire instruction emulation to a C implementation routine and
3776 * returns, taking three arguments in addition to the standard ones.
3777 *
3778 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
3779 *
3780 * @param a_pfnCImpl The pointer to the C routine.
3781 * @param a0 The first extra argument.
3782 * @param a1 The second extra argument.
3783 * @param a2 The third extra argument.
3784 */
3785#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
3786
3787#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
3788#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
3789#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
3790#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
3791#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
3792 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
3793 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
3794#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
3795 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
3796 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
3797#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
3798 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
3799 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
3800 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
3801#define IEM_MC_IF_EFL_BIT_NOT_SET_OR_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
3802 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
3803 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
3804 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
3805#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
3806#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
3807#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
3808#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
3809 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
3810 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
3811#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
3812 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
3813 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
3814#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
3815 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
3816 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
3817#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
3818 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
3819 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
3820#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
3821 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
3822 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
3823#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
3824 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
3825 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
3826#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
3827#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
3828#define IEM_MC_ELSE() } else {
3829#define IEM_MC_ENDIF() } do {} while (0)
3830
3831/** @} */
3832
3833
3834/** @name Opcode Debug Helpers.
3835 * @{
3836 */
3837#ifdef DEBUG
3838# define IEMOP_MNEMONIC(a_szMnemonic) \
3839 Log2(("decode - %04x:%08RGv %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic))
3840# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
3841 Log2(("decode - %04x:%08RGv %s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic, a_szOps))
3842#else
3843# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
3844# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
3845#endif
3846
3847/** @} */
3848
3849
3850/** @name Opcode Helpers.
3851 * @{
3852 */
3853
3854/** The instruction allows no lock prefixing (in this encoding), throw #UD if
3855 * lock prefixed. */
3856#define IEMOP_HLP_NO_LOCK_PREFIX() \
3857 do \
3858 { \
3859 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
3860 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
3861 } while (0)
3862
3863/** The instruction is not available in 64-bit mode, throw #UD if we're in
3864 * 64-bit mode. */
3865#define IEMOP_HLP_NO_64BIT() \
3866 do \
3867 { \
3868 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
3869 return IEMOP_RAISE_INVALID_OPCODE(); \
3870 } while (0)
3871
3872/** The instruction defaults to 64-bit operand size if 64-bit mode. */
3873#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
3874 do \
3875 { \
3876 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
3877 iemRecalEffOpSize64Default(pIemCpu); \
3878 } while (0)
3879
3880
3881
3882/**
3883 * Calculates the effective address of a ModR/M memory operand.
3884 *
3885 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
3886 *
3887 * @return Strict VBox status code.
3888 * @param pIemCpu The IEM per CPU data.
3889 * @param bRm The ModRM byte.
3890 * @param pGCPtrEff Where to return the effective address.
3891 */
3892static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
3893{
3894 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
3895 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3896#define SET_SS_DEF() \
3897 do \
3898 { \
3899 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
3900 pIemCpu->iEffSeg = X86_SREG_SS; \
3901 } while (0)
3902
3903/** @todo Check the effective address size crap! */
3904 switch (pIemCpu->enmEffAddrMode)
3905 {
3906 case IEMMODE_16BIT:
3907 {
3908 uint16_t u16EffAddr;
3909
3910 /* Handle the disp16 form with no registers first. */
3911 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
3912 IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr);
3913 else
3914 {
3915 /* Get the displacment. */
3916 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3917 {
3918 case 0: u16EffAddr = 0; break;
3919 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(pIemCpu, &u16EffAddr); break;
3920 case 2: IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr); break;
3921 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
3922 }
3923
3924 /* Add the base and index registers to the disp. */
3925 switch (bRm & X86_MODRM_RM_MASK)
3926 {
3927 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
3928 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
3929 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
3930 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
3931 case 4: u16EffAddr += pCtx->si; break;
3932 case 5: u16EffAddr += pCtx->di; break;
3933 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
3934 case 7: u16EffAddr += pCtx->bx; break;
3935 }
3936 }
3937
3938 *pGCPtrEff = u16EffAddr;
3939 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
3940 return VINF_SUCCESS;
3941 }
3942
3943 case IEMMODE_32BIT:
3944 {
3945 uint32_t u32EffAddr;
3946
3947 /* Handle the disp32 form with no registers first. */
3948 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
3949 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32EffAddr);
3950 else
3951 {
3952 /* Get the register (or SIB) value. */
3953 switch ((bRm & X86_MODRM_RM_MASK))
3954 {
3955 case 0: u32EffAddr = pCtx->eax; break;
3956 case 1: u32EffAddr = pCtx->ecx; break;
3957 case 2: u32EffAddr = pCtx->edx; break;
3958 case 3: u32EffAddr = pCtx->ebx; break;
3959 case 4: /* SIB */
3960 {
3961 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
3962
3963 /* Get the index and scale it. */
3964 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
3965 {
3966 case 0: u32EffAddr = pCtx->eax; break;
3967 case 1: u32EffAddr = pCtx->ecx; break;
3968 case 2: u32EffAddr = pCtx->edx; break;
3969 case 3: u32EffAddr = pCtx->ebx; break;
3970 case 4: u32EffAddr = 0; /*none */ break;
3971 case 5: u32EffAddr = pCtx->ebp; break;
3972 case 6: u32EffAddr = pCtx->esi; break;
3973 case 7: u32EffAddr = pCtx->edi; break;
3974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3975 }
3976 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
3977
3978 /* add base */
3979 switch (bSib & X86_SIB_BASE_MASK)
3980 {
3981 case 0: u32EffAddr += pCtx->eax; break;
3982 case 1: u32EffAddr += pCtx->ecx; break;
3983 case 2: u32EffAddr += pCtx->edx; break;
3984 case 3: u32EffAddr += pCtx->ebx; break;
3985 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
3986 case 5:
3987 if ((bRm & X86_MODRM_MOD_MASK) != 0)
3988 {
3989 u32EffAddr += pCtx->ebp;
3990 SET_SS_DEF();
3991 }
3992 else
3993 {
3994 uint32_t u32Disp;
3995 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
3996 u32EffAddr += u32Disp;
3997 }
3998 break;
3999 case 6: u32EffAddr += pCtx->esi; break;
4000 case 7: u32EffAddr += pCtx->edi; break;
4001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4002 }
4003 break;
4004 }
4005 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
4006 case 6: u32EffAddr = pCtx->esi; break;
4007 case 7: u32EffAddr = pCtx->edi; break;
4008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4009 }
4010
4011 /* Get and add the displacement. */
4012 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
4013 {
4014 case 0:
4015 break;
4016 case 1:
4017 {
4018 int8_t i8Disp;
4019 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
4020 u32EffAddr += i8Disp;
4021 break;
4022 }
4023 case 2:
4024 {
4025 uint32_t u32Disp;
4026 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4027 u32EffAddr += u32Disp;
4028 break;
4029 }
4030 default:
4031 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
4032 }
4033
4034 }
4035 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
4036 *pGCPtrEff = u32EffAddr;
4037 else
4038 {
4039 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
4040 *pGCPtrEff = u32EffAddr & UINT16_MAX;
4041 }
4042 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
4043 return VINF_SUCCESS;
4044 }
4045
4046 case IEMMODE_64BIT:
4047 {
4048 uint64_t u64EffAddr;
4049
4050 /* Handle the rip+disp32 form with no registers first. */
4051 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
4052 {
4053 IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64EffAddr);
4054 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
4055 }
4056 else
4057 {
4058 /* Get the register (or SIB) value. */
4059 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
4060 {
4061 case 0: u64EffAddr = pCtx->rax; break;
4062 case 1: u64EffAddr = pCtx->rcx; break;
4063 case 2: u64EffAddr = pCtx->rdx; break;
4064 case 3: u64EffAddr = pCtx->rbx; break;
4065 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
4066 case 6: u64EffAddr = pCtx->rsi; break;
4067 case 7: u64EffAddr = pCtx->rdi; break;
4068 case 8: u64EffAddr = pCtx->r8; break;
4069 case 9: u64EffAddr = pCtx->r9; break;
4070 case 10: u64EffAddr = pCtx->r10; break;
4071 case 11: u64EffAddr = pCtx->r11; break;
4072 case 13: u64EffAddr = pCtx->r13; break;
4073 case 14: u64EffAddr = pCtx->r14; break;
4074 case 15: u64EffAddr = pCtx->r15; break;
4075 /* SIB */
4076 case 4:
4077 case 12:
4078 {
4079 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
4080
4081 /* Get the index and scale it. */
4082 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
4083 {
4084 case 0: u64EffAddr = pCtx->rax; break;
4085 case 1: u64EffAddr = pCtx->rcx; break;
4086 case 2: u64EffAddr = pCtx->rdx; break;
4087 case 3: u64EffAddr = pCtx->rbx; break;
4088 case 4: u64EffAddr = 0; /*none */ break;
4089 case 5: u64EffAddr = pCtx->rbp; break;
4090 case 6: u64EffAddr = pCtx->rsi; break;
4091 case 7: u64EffAddr = pCtx->rdi; break;
4092 case 8: u64EffAddr = pCtx->r8; break;
4093 case 9: u64EffAddr = pCtx->r9; break;
4094 case 10: u64EffAddr = pCtx->r10; break;
4095 case 11: u64EffAddr = pCtx->r11; break;
4096 case 12: u64EffAddr = pCtx->r12; break;
4097 case 13: u64EffAddr = pCtx->r13; break;
4098 case 14: u64EffAddr = pCtx->r14; break;
4099 case 15: u64EffAddr = pCtx->r15; break;
4100 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4101 }
4102 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
4103
4104 /* add base */
4105 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
4106 {
4107 case 0: u64EffAddr += pCtx->rax; break;
4108 case 1: u64EffAddr += pCtx->rcx; break;
4109 case 2: u64EffAddr += pCtx->rdx; break;
4110 case 3: u64EffAddr += pCtx->rbx; break;
4111 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
4112 case 6: u64EffAddr += pCtx->rsi; break;
4113 case 7: u64EffAddr += pCtx->rdi; break;
4114 case 8: u64EffAddr += pCtx->r8; break;
4115 case 9: u64EffAddr += pCtx->r9; break;
4116 case 10: u64EffAddr += pCtx->r10; break;
4117 case 11: u64EffAddr += pCtx->r11; break;
4118 case 14: u64EffAddr += pCtx->r14; break;
4119 case 15: u64EffAddr += pCtx->r15; break;
4120 /* complicated encodings */
4121 case 5:
4122 case 13:
4123 if ((bRm & X86_MODRM_MOD_MASK) != 0)
4124 {
4125 if (!pIemCpu->uRexB)
4126 {
4127 u64EffAddr += pCtx->rbp;
4128 SET_SS_DEF();
4129 }
4130 else
4131 u64EffAddr += pCtx->r13;
4132 }
4133 else
4134 {
4135 uint32_t u32Disp;
4136 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4137 u64EffAddr += (int32_t)u32Disp;
4138 }
4139 break;
4140 }
4141 break;
4142 }
4143 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4144 }
4145
4146 /* Get and add the displacement. */
4147 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
4148 {
4149 case 0:
4150 break;
4151 case 1:
4152 {
4153 int8_t i8Disp;
4154 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
4155 u64EffAddr += i8Disp;
4156 break;
4157 }
4158 case 2:
4159 {
4160 uint32_t u32Disp;
4161 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4162 u64EffAddr += (int32_t)u32Disp;
4163 break;
4164 }
4165 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
4166 }
4167
4168 }
4169 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
4170 *pGCPtrEff = u64EffAddr;
4171 else
4172 *pGCPtrEff = u64EffAddr & UINT16_MAX;
4173 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
4174 return VINF_SUCCESS;
4175 }
4176 }
4177
4178 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4179}
4180
4181/** @} */
4182
4183
4184
4185/*
4186 * Include the instructions
4187 */
4188#include "IEMAllInstructions.cpp.h"
4189
4190
4191
4192
4193#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4194
4195/**
4196 * Sets up execution verification mode.
4197 */
4198static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
4199{
4200 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
4201 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */
4202
4203#if 0
4204 // Auto enable; DSL.
4205 if ( pIemCpu->fNoRem
4206 && pOrgCtx->cs == 0x10
4207 && ( pOrgCtx->rip == 0x00100fc7
4208 || pOrgCtx->rip == 0x00100ffc
4209 || pOrgCtx->rip == 0x00100ffe
4210 )
4211 )
4212 {
4213 RTLogFlags(NULL, "enabled");
4214 pIemCpu->fNoRem = false;
4215 }
4216#endif
4217
4218 /*
4219 * Switch state.
4220 */
4221 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4222 {
4223 static CPUMCTX s_DebugCtx; /* Ugly! */
4224
4225 s_DebugCtx = *pOrgCtx;
4226 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
4227 }
4228
4229 /*
4230 * See if there is an interrupt pending in TRPM and inject it if we can.
4231 */
4232 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4233 if ( pOrgCtx->eflags.Bits.u1IF
4234 && TRPMHasTrap(pVCpu)
4235 //&& TRPMIsSoftwareInterrupt(pVCpu)
4236 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
4237 {
4238 Log(("Injecting trap %#x\n", TRPMGetTrapNo(pVCpu)));
4239 iemCImpl_int(pIemCpu, 0, TRPMGetTrapNo(pVCpu), false);
4240 if (IEM_VERIFICATION_ENABLED(pIemCpu))
4241 TRPMResetTrap(pVCpu);
4242 }
4243
4244 /*
4245 * Reset the counters.
4246 */
4247 pIemCpu->cIOReads = 0;
4248 pIemCpu->cIOWrites = 0;
4249 pIemCpu->fMulDivHack = false;
4250 pIemCpu->fShiftOfHack= false;
4251
4252 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4253 {
4254 /*
4255 * Free all verification records.
4256 */
4257 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
4258 pIemCpu->pIemEvtRecHead = NULL;
4259 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
4260 do
4261 {
4262 while (pEvtRec)
4263 {
4264 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
4265 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
4266 pIemCpu->pFreeEvtRec = pEvtRec;
4267 pEvtRec = pNext;
4268 }
4269 pEvtRec = pIemCpu->pOtherEvtRecHead;
4270 pIemCpu->pOtherEvtRecHead = NULL;
4271 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
4272 } while (pEvtRec);
4273 }
4274}
4275
4276
4277/**
4278 * Allocate an event record.
4279 * @returns Poitner to a record.
4280 */
4281static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
4282{
4283 if (IEM_VERIFICATION_ENABLED(pIemCpu))
4284 return NULL;
4285
4286 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
4287 if (pEvtRec)
4288 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
4289 else
4290 {
4291 if (!pIemCpu->ppIemEvtRecNext)
4292 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
4293
4294 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
4295 if (!pEvtRec)
4296 return NULL;
4297 }
4298 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
4299 pEvtRec->pNext = NULL;
4300 return pEvtRec;
4301}
4302
4303
4304/**
4305 * IOMMMIORead notification.
4306 */
4307VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
4308{
4309 PVMCPU pVCpu = VMMGetCpu(pVM);
4310 if (!pVCpu)
4311 return;
4312 PIEMCPU pIemCpu = &pVCpu->iem.s;
4313 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4314 if (!pEvtRec)
4315 return;
4316 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4317 pEvtRec->u.RamRead.GCPhys = GCPhys;
4318 pEvtRec->u.RamRead.cb = cbValue;
4319 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4320 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4321}
4322
4323
4324/**
4325 * IOMMMIOWrite notification.
4326 */
4327VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
4328{
4329 PVMCPU pVCpu = VMMGetCpu(pVM);
4330 if (!pVCpu)
4331 return;
4332 PIEMCPU pIemCpu = &pVCpu->iem.s;
4333 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4334 if (!pEvtRec)
4335 return;
4336 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4337 pEvtRec->u.RamWrite.GCPhys = GCPhys;
4338 pEvtRec->u.RamWrite.cb = cbValue;
4339 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
4340 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
4341 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
4342 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
4343 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4344 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4345}
4346
4347
4348/**
4349 * IOMIOPortRead notification.
4350 */
4351VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
4352{
4353 PVMCPU pVCpu = VMMGetCpu(pVM);
4354 if (!pVCpu)
4355 return;
4356 PIEMCPU pIemCpu = &pVCpu->iem.s;
4357 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4358 if (!pEvtRec)
4359 return;
4360 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
4361 pEvtRec->u.IOPortRead.Port = Port;
4362 pEvtRec->u.IOPortRead.cbValue = cbValue;
4363 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4364 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4365}
4366
4367/**
4368 * IOMIOPortWrite notification.
4369 */
4370VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
4371{
4372 PVMCPU pVCpu = VMMGetCpu(pVM);
4373 if (!pVCpu)
4374 return;
4375 PIEMCPU pIemCpu = &pVCpu->iem.s;
4376 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4377 if (!pEvtRec)
4378 return;
4379 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
4380 pEvtRec->u.IOPortWrite.Port = Port;
4381 pEvtRec->u.IOPortWrite.cbValue = cbValue;
4382 pEvtRec->u.IOPortWrite.u32Value = u32Value;
4383 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4384 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4385}
4386
4387
4388VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
4389{
4390 AssertFailed();
4391}
4392
4393
4394VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
4395{
4396 AssertFailed();
4397}
4398
4399
4400/**
4401 * Fakes and records an I/O port read.
4402 *
4403 * @returns VINF_SUCCESS.
4404 * @param pIemCpu The IEM per CPU data.
4405 * @param Port The I/O port.
4406 * @param pu32Value Where to store the fake value.
4407 * @param cbValue The size of the access.
4408 */
4409static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
4410{
4411 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4412 if (pEvtRec)
4413 {
4414 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
4415 pEvtRec->u.IOPortRead.Port = Port;
4416 pEvtRec->u.IOPortRead.cbValue = cbValue;
4417 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4418 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4419 }
4420 pIemCpu->cIOReads++;
4421 *pu32Value = 0xffffffff;
4422 return VINF_SUCCESS;
4423}
4424
4425
4426/**
4427 * Fakes and records an I/O port write.
4428 *
4429 * @returns VINF_SUCCESS.
4430 * @param pIemCpu The IEM per CPU data.
4431 * @param Port The I/O port.
4432 * @param u32Value The value being written.
4433 * @param cbValue The size of the access.
4434 */
4435static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
4436{
4437 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4438 if (pEvtRec)
4439 {
4440 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
4441 pEvtRec->u.IOPortWrite.Port = Port;
4442 pEvtRec->u.IOPortWrite.cbValue = cbValue;
4443 pEvtRec->u.IOPortWrite.u32Value = u32Value;
4444 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4445 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4446 }
4447 pIemCpu->cIOWrites++;
4448 return VINF_SUCCESS;
4449}
4450
4451
4452/**
4453 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
4454 * dump to the assertion info.
4455 *
4456 * @param pEvtRec The record to dump.
4457 */
4458static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
4459{
4460 switch (pEvtRec->enmEvent)
4461 {
4462 case IEMVERIFYEVENT_IOPORT_READ:
4463 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
4464 pEvtRec->u.IOPortWrite.Port,
4465 pEvtRec->u.IOPortWrite.cbValue);
4466 break;
4467 case IEMVERIFYEVENT_IOPORT_WRITE:
4468 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
4469 pEvtRec->u.IOPortWrite.Port,
4470 pEvtRec->u.IOPortWrite.cbValue,
4471 pEvtRec->u.IOPortWrite.u32Value);
4472 break;
4473 case IEMVERIFYEVENT_RAM_READ:
4474 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
4475 pEvtRec->u.RamRead.GCPhys,
4476 pEvtRec->u.RamRead.cb);
4477 break;
4478 case IEMVERIFYEVENT_RAM_WRITE:
4479 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
4480 pEvtRec->u.RamWrite.GCPhys,
4481 pEvtRec->u.RamWrite.cb,
4482 (int)pEvtRec->u.RamWrite.cb,
4483 pEvtRec->u.RamWrite.ab);
4484 break;
4485 default:
4486 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
4487 break;
4488 }
4489}
4490
4491
4492/**
4493 * Raises an assertion on the specified record, showing the given message with
4494 * a record dump attached.
4495 *
4496 * @param pIemCpu The IEM per CPU data.
4497 * @param pEvtRec1 The first record.
4498 * @param pEvtRec2 The second record.
4499 * @param pszMsg The message explaining why we're asserting.
4500 */
4501static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
4502{
4503 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
4504 iemVerifyAssertAddRecordDump(pEvtRec1);
4505 iemVerifyAssertAddRecordDump(pEvtRec2);
4506 iemOpStubMsg2(pIemCpu);
4507 RTAssertPanic();
4508}
4509
4510
4511/**
4512 * Raises an assertion on the specified record, showing the given message with
4513 * a record dump attached.
4514 *
4515 * @param pIemCpu The IEM per CPU data.
4516 * @param pEvtRec1 The first record.
4517 * @param pszMsg The message explaining why we're asserting.
4518 */
4519static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
4520{
4521 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
4522 iemVerifyAssertAddRecordDump(pEvtRec);
4523 iemOpStubMsg2(pIemCpu);
4524 RTAssertPanic();
4525}
4526
4527
4528/**
4529 * Verifies a write record.
4530 *
4531 * @param pIemCpu The IEM per CPU data.
4532 * @param pEvtRec The write record.
4533 */
4534static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
4535{
4536 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
4537 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
4538 if ( RT_FAILURE(rc)
4539 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
4540 {
4541 /* fend off ins */
4542 if ( !pIemCpu->cIOReads
4543 || pEvtRec->u.RamWrite.ab[0] != 0xcc
4544 || ( pEvtRec->u.RamWrite.cb != 1
4545 && pEvtRec->u.RamWrite.cb != 2
4546 && pEvtRec->u.RamWrite.cb != 4) )
4547 {
4548 /* fend off ROMs */
4549 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
4550 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
4551 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
4552 {
4553 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
4554 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
4555 RTAssertMsg2Add("REM: %.*Rhxs\n"
4556 "IEM: %.*Rhxs\n",
4557 pEvtRec->u.RamWrite.cb, abBuf,
4558 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
4559 iemVerifyAssertAddRecordDump(pEvtRec);
4560 iemOpStubMsg2(pIemCpu);
4561 RTAssertPanic();
4562 }
4563 }
4564 }
4565
4566}
4567
4568/**
4569 * Performs the post-execution verfication checks.
4570 */
4571static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
4572{
4573 if (IEM_VERIFICATION_ENABLED(pIemCpu))
4574 return;
4575
4576 /*
4577 * Switch back the state.
4578 */
4579 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
4580 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
4581 Assert(pOrgCtx != pDebugCtx);
4582 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
4583
4584 /*
4585 * Execute the instruction in REM.
4586 */
4587 int rc = REMR3EmulateInstruction(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu));
4588 AssertRC(rc);
4589
4590 /*
4591 * Compare the register states.
4592 */
4593 unsigned cDiffs = 0;
4594 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
4595 {
4596 Log(("REM and IEM ends up with different registers!\n"));
4597
4598# define CHECK_FIELD(a_Field) \
4599 do \
4600 { \
4601 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
4602 { \
4603 switch (sizeof(pOrgCtx->a_Field)) \
4604 { \
4605 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4606 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4607 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4608 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4609 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
4610 } \
4611 cDiffs++; \
4612 } \
4613 } while (0)
4614
4615# define CHECK_BIT_FIELD(a_Field) \
4616 do \
4617 { \
4618 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
4619 { \
4620 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
4621 cDiffs++; \
4622 } \
4623 } while (0)
4624
4625# define CHECK_SEL(a_Sel) \
4626 do \
4627 { \
4628 CHECK_FIELD(a_Sel); \
4629 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
4630 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
4631 { \
4632 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
4633 cDiffs++; \
4634 } \
4635 CHECK_FIELD(a_Sel##Hid.u64Base); \
4636 CHECK_FIELD(a_Sel##Hid.u32Limit); \
4637 } while (0)
4638
4639 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
4640 {
4641 if (pIemCpu->cInstructions != 1)
4642 {
4643 RTAssertMsg2Weak(" the FPU state differs\n");
4644 cDiffs++;
4645 }
4646 else
4647 RTAssertMsg2Weak(" the FPU state differs - happens the first time...\n");
4648 }
4649 CHECK_FIELD(rip);
4650 uint32_t fFlagsMask = UINT32_MAX;
4651 if (pIemCpu->fMulDivHack)
4652 fFlagsMask &= ~(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
4653 if (pIemCpu->fShiftOfHack)
4654 fFlagsMask &= ~(X86_EFL_OF | X86_EFL_AF);
4655 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
4656 {
4657 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
4658 CHECK_BIT_FIELD(rflags.Bits.u1CF);
4659 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
4660 CHECK_BIT_FIELD(rflags.Bits.u1PF);
4661 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
4662 CHECK_BIT_FIELD(rflags.Bits.u1AF);
4663 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
4664 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
4665 CHECK_BIT_FIELD(rflags.Bits.u1SF);
4666 CHECK_BIT_FIELD(rflags.Bits.u1TF);
4667 CHECK_BIT_FIELD(rflags.Bits.u1IF);
4668 CHECK_BIT_FIELD(rflags.Bits.u1DF);
4669 CHECK_BIT_FIELD(rflags.Bits.u1OF);
4670 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
4671 CHECK_BIT_FIELD(rflags.Bits.u1NT);
4672 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
4673 CHECK_BIT_FIELD(rflags.Bits.u1RF);
4674 CHECK_BIT_FIELD(rflags.Bits.u1VM);
4675 CHECK_BIT_FIELD(rflags.Bits.u1AC);
4676 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
4677 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
4678 CHECK_BIT_FIELD(rflags.Bits.u1ID);
4679 }
4680
4681 if (pIemCpu->cIOReads != 1)
4682 CHECK_FIELD(rax);
4683 CHECK_FIELD(rcx);
4684 CHECK_FIELD(rdx);
4685 CHECK_FIELD(rbx);
4686 CHECK_FIELD(rsp);
4687 CHECK_FIELD(rbp);
4688 CHECK_FIELD(rsi);
4689 CHECK_FIELD(rdi);
4690 CHECK_FIELD(r8);
4691 CHECK_FIELD(r9);
4692 CHECK_FIELD(r10);
4693 CHECK_FIELD(r11);
4694 CHECK_FIELD(r12);
4695 CHECK_FIELD(r13);
4696 CHECK_SEL(cs);
4697 CHECK_SEL(ss);
4698 CHECK_SEL(ds);
4699 CHECK_SEL(es);
4700 CHECK_SEL(fs);
4701 CHECK_SEL(gs);
4702 CHECK_FIELD(cr0);
4703 CHECK_FIELD(cr2);
4704 CHECK_FIELD(cr3);
4705 CHECK_FIELD(cr4);
4706 CHECK_FIELD(dr[0]);
4707 CHECK_FIELD(dr[1]);
4708 CHECK_FIELD(dr[2]);
4709 CHECK_FIELD(dr[3]);
4710 CHECK_FIELD(dr[6]);
4711 CHECK_FIELD(dr[7]);
4712 CHECK_FIELD(gdtr.cbGdt);
4713 CHECK_FIELD(gdtr.pGdt);
4714 CHECK_FIELD(idtr.cbIdt);
4715 CHECK_FIELD(idtr.pIdt);
4716 CHECK_FIELD(ldtr);
4717 CHECK_FIELD(ldtrHid.u64Base);
4718 CHECK_FIELD(ldtrHid.u32Limit);
4719 CHECK_FIELD(ldtrHid.Attr.u);
4720 CHECK_FIELD(tr);
4721 CHECK_FIELD(trHid.u64Base);
4722 CHECK_FIELD(trHid.u32Limit);
4723 CHECK_FIELD(trHid.Attr.u);
4724 CHECK_FIELD(SysEnter.cs);
4725 CHECK_FIELD(SysEnter.eip);
4726 CHECK_FIELD(SysEnter.esp);
4727 CHECK_FIELD(msrEFER);
4728 CHECK_FIELD(msrSTAR);
4729 CHECK_FIELD(msrPAT);
4730 CHECK_FIELD(msrLSTAR);
4731 CHECK_FIELD(msrCSTAR);
4732 CHECK_FIELD(msrSFMASK);
4733 CHECK_FIELD(msrKERNELGSBASE);
4734
4735 if (cDiffs != 0)
4736 {
4737 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
4738 iemOpStubMsg2(pIemCpu);
4739 RTAssertPanic();
4740 }
4741# undef CHECK_FIELD
4742# undef CHECK_BIT_FIELD
4743 }
4744
4745 /*
4746 * If the register state compared fine, check the verification event
4747 * records.
4748 */
4749 if (cDiffs == 0)
4750 {
4751 /*
4752 * Compare verficiation event records.
4753 * - I/O port accesses should be a 1:1 match.
4754 */
4755 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
4756 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
4757 while (pIemRec && pOtherRec)
4758 {
4759 /* Since we might miss RAM writes and reads, ignore reads and check
4760 that any written memory is the same extra ones. */
4761 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
4762 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
4763 && pIemRec->pNext)
4764 {
4765 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
4766 iemVerifyWriteRecord(pIemCpu, pIemRec);
4767 pIemRec = pIemRec->pNext;
4768 }
4769
4770 /* Do the compare. */
4771 if (pIemRec->enmEvent != pOtherRec->enmEvent)
4772 {
4773 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
4774 break;
4775 }
4776 bool fEquals;
4777 switch (pIemRec->enmEvent)
4778 {
4779 case IEMVERIFYEVENT_IOPORT_READ:
4780 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
4781 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
4782 break;
4783 case IEMVERIFYEVENT_IOPORT_WRITE:
4784 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
4785 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
4786 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
4787 break;
4788 case IEMVERIFYEVENT_RAM_READ:
4789 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
4790 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
4791 break;
4792 case IEMVERIFYEVENT_RAM_WRITE:
4793 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
4794 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
4795 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
4796 break;
4797 default:
4798 fEquals = false;
4799 break;
4800 }
4801 if (!fEquals)
4802 {
4803 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
4804 break;
4805 }
4806
4807 /* advance */
4808 pIemRec = pIemRec->pNext;
4809 pOtherRec = pOtherRec->pNext;
4810 }
4811
4812 /* Ignore extra writes and reads. */
4813 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
4814 {
4815 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
4816 iemVerifyWriteRecord(pIemCpu, pIemRec);
4817 pIemRec = pIemRec->pNext;
4818 }
4819 if (pIemRec != NULL)
4820 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
4821 else if (pOtherRec != NULL)
4822 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
4823 }
4824 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
4825}
4826
4827#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
4828
4829/* stubs */
4830static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
4831{
4832 return VERR_INTERNAL_ERROR;
4833}
4834
4835static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
4836{
4837 return VERR_INTERNAL_ERROR;
4838}
4839
4840#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
4841
4842
4843/**
4844 * Execute one instruction.
4845 *
4846 * @return Strict VBox status code.
4847 * @param pVCpu The current virtual CPU.
4848 */
4849VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
4850{
4851 PIEMCPU pIemCpu = &pVCpu->iem.s;
4852
4853#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4854 iemExecVerificationModeSetup(pIemCpu);
4855#endif
4856#ifdef LOG_ENABLED
4857 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4858 if (LogIs2Enabled())
4859 {
4860 char szInstr[256];
4861 uint32_t cbInstr = 0;
4862 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
4863 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4864 szInstr, sizeof(szInstr), &cbInstr);
4865
4866 Log2(("**** "
4867 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
4868 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
4869 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
4870 " %s\n"
4871 ,
4872 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
4873 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
4874 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
4875 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
4876 szInstr));
4877 }
4878#endif
4879
4880 /*
4881 * Do the decoding and emulation.
4882 */
4883 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
4884 if (rcStrict != VINF_SUCCESS)
4885 return rcStrict;
4886
4887 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4888 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
4889 if (rcStrict == VINF_SUCCESS)
4890 pIemCpu->cInstructions++;
4891//#ifdef DEBUG
4892// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
4893//#endif
4894
4895 /* Execute the next instruction as well if a cli, pop ss or
4896 mov ss, Gr has just completed successfully. */
4897 if ( rcStrict == VINF_SUCCESS
4898 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4899 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
4900 {
4901 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
4902 if (rcStrict == VINF_SUCCESS)
4903 {
4904 b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
4905 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
4906 if (rcStrict == VINF_SUCCESS)
4907 pIemCpu->cInstructions++;
4908 }
4909 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
4910 }
4911
4912 /*
4913 * Assert some sanity.
4914 */
4915#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4916 iemExecVerificationModeCheck(pIemCpu);
4917#endif
4918 return rcStrict;
4919}
4920
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette