VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 39953

Last change on this file since 39953 was 39953, checked in by vboxsync, 13 years ago

gcc 4.2 on mac in debug mode maybe [insert fitting explitives].

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 239.0 KB
Line 
1/* $Id: IEMAll.cpp 39953 2012-02-02 10:39:16Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_IEM
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/dbgf.h>
54#ifdef IEM_VERIFICATION_MODE
55# include <VBox/vmm/rem.h>
56# include <VBox/vmm/mm.h>
57#endif
58#include "IEMInternal.h"
59#include <VBox/vmm/vm.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <iprt/assert.h>
64#include <iprt/string.h>
65#include <iprt/x86.h>
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/** @typedef PFNIEMOP
72 * Pointer to an opcode decoder function.
73 */
74
75/** @def FNIEMOP_DEF
76 * Define an opcode decoder function.
77 *
78 * We're using macors for this so that adding and removing parameters as well as
79 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
80 *
81 * @param a_Name The function name.
82 */
83
84
85#if defined(__GNUC__) && defined(RT_ARCH_X86)
86typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
87# define FNIEMOP_DEF(a_Name) \
88 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
89# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
90 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
91# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
92 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
93
94#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
95typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
96# define FNIEMOP_DEF(a_Name) \
97 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
98# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
99 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
100# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
101 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
102
103#elif defined(__GNUC__)
104typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
105# define FNIEMOP_DEF(a_Name) \
106 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
107# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
108 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
109# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
110 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
111
112#else
113typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
114# define FNIEMOP_DEF(a_Name) \
115 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
116# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
117 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
118# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
119 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
120
121#endif
122
123
124/**
125 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
126 */
127typedef union IEMSELDESC
128{
129 /** The legacy view. */
130 X86DESC Legacy;
131 /** The long mode view. */
132 X86DESC64 Long;
133} IEMSELDESC;
134/** Pointer to a selector descriptor table entry. */
135typedef IEMSELDESC *PIEMSELDESC;
136
137
138/*******************************************************************************
139* Defined Constants And Macros *
140*******************************************************************************/
141/** @name IEM status codes.
142 *
143 * Not quite sure how this will play out in the end, just aliasing safe status
144 * codes for now.
145 *
146 * @{ */
147#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
148/** @} */
149
150/** Temporary hack to disable the double execution. Will be removed in favor
151 * of a dedicated execution mode in EM. */
152//#define IEM_VERIFICATION_MODE_NO_REM
153
154/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
155 * due to GCC lacking knowledge about the value range of a switch. */
156#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
157
158/**
159 * Call an opcode decoder function.
160 *
161 * We're using macors for this so that adding and removing parameters can be
162 * done as we please. See FNIEMOP_DEF.
163 */
164#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
165
166/**
167 * Call a common opcode decoder function taking one extra argument.
168 *
169 * We're using macors for this so that adding and removing parameters can be
170 * done as we please. See FNIEMOP_DEF_1.
171 */
172#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
173
174/**
175 * Call a common opcode decoder function taking one extra argument.
176 *
177 * We're using macors for this so that adding and removing parameters can be
178 * done as we please. See FNIEMOP_DEF_1.
179 */
180#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
181
182/**
183 * Check if we're currently executing in real or virtual 8086 mode.
184 *
185 * @returns @c true if it is, @c false if not.
186 * @param a_pIemCpu The IEM state of the current CPU.
187 */
188#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
189
190/**
191 * Check if we're currently executing in long mode.
192 *
193 * @returns @c true if it is, @c false if not.
194 * @param a_pIemCpu The IEM state of the current CPU.
195 */
196#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
197
198/**
199 * Check if we're currently executing in real mode.
200 *
201 * @returns @c true if it is, @c false if not.
202 * @param a_pIemCpu The IEM state of the current CPU.
203 */
204#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
205
206/**
207 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
208 */
209#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
210
211/**
212 * Checks if a intel CPUID feature is present.
213 */
214#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
215 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
216 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
217
218/**
219 * Check if the address is canonical.
220 */
221#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
222
223
224/*******************************************************************************
225* Global Variables *
226*******************************************************************************/
227extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
228
229
230/** Function table for the ADD instruction. */
231static const IEMOPBINSIZES g_iemAImpl_add =
232{
233 iemAImpl_add_u8, iemAImpl_add_u8_locked,
234 iemAImpl_add_u16, iemAImpl_add_u16_locked,
235 iemAImpl_add_u32, iemAImpl_add_u32_locked,
236 iemAImpl_add_u64, iemAImpl_add_u64_locked
237};
238
239/** Function table for the ADC instruction. */
240static const IEMOPBINSIZES g_iemAImpl_adc =
241{
242 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
243 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
244 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
245 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
246};
247
248/** Function table for the SUB instruction. */
249static const IEMOPBINSIZES g_iemAImpl_sub =
250{
251 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
252 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
253 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
254 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
255};
256
257/** Function table for the SBB instruction. */
258static const IEMOPBINSIZES g_iemAImpl_sbb =
259{
260 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
261 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
262 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
263 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
264};
265
266/** Function table for the OR instruction. */
267static const IEMOPBINSIZES g_iemAImpl_or =
268{
269 iemAImpl_or_u8, iemAImpl_or_u8_locked,
270 iemAImpl_or_u16, iemAImpl_or_u16_locked,
271 iemAImpl_or_u32, iemAImpl_or_u32_locked,
272 iemAImpl_or_u64, iemAImpl_or_u64_locked
273};
274
275/** Function table for the XOR instruction. */
276static const IEMOPBINSIZES g_iemAImpl_xor =
277{
278 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
279 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
280 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
281 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
282};
283
284/** Function table for the AND instruction. */
285static const IEMOPBINSIZES g_iemAImpl_and =
286{
287 iemAImpl_and_u8, iemAImpl_and_u8_locked,
288 iemAImpl_and_u16, iemAImpl_and_u16_locked,
289 iemAImpl_and_u32, iemAImpl_and_u32_locked,
290 iemAImpl_and_u64, iemAImpl_and_u64_locked
291};
292
293/** Function table for the CMP instruction.
294 * @remarks Making operand order ASSUMPTIONS.
295 */
296static const IEMOPBINSIZES g_iemAImpl_cmp =
297{
298 iemAImpl_cmp_u8, NULL,
299 iemAImpl_cmp_u16, NULL,
300 iemAImpl_cmp_u32, NULL,
301 iemAImpl_cmp_u64, NULL
302};
303
304/** Function table for the TEST instruction.
305 * @remarks Making operand order ASSUMPTIONS.
306 */
307static const IEMOPBINSIZES g_iemAImpl_test =
308{
309 iemAImpl_test_u8, NULL,
310 iemAImpl_test_u16, NULL,
311 iemAImpl_test_u32, NULL,
312 iemAImpl_test_u64, NULL
313};
314
315/** Function table for the BT instruction. */
316static const IEMOPBINSIZES g_iemAImpl_bt =
317{
318 NULL, NULL,
319 iemAImpl_bt_u16, NULL,
320 iemAImpl_bt_u32, NULL,
321 iemAImpl_bt_u64, NULL
322};
323
324/** Function table for the BTC instruction. */
325static const IEMOPBINSIZES g_iemAImpl_btc =
326{
327 NULL, NULL,
328 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
329 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
330 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
331};
332
333/** Function table for the BTR instruction. */
334static const IEMOPBINSIZES g_iemAImpl_btr =
335{
336 NULL, NULL,
337 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
338 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
339 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
340};
341
342/** Function table for the BTS instruction. */
343static const IEMOPBINSIZES g_iemAImpl_bts =
344{
345 NULL, NULL,
346 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
347 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
348 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
349};
350
351/** Function table for the BSF instruction. */
352static const IEMOPBINSIZES g_iemAImpl_bsf =
353{
354 NULL, NULL,
355 iemAImpl_bsf_u16, NULL,
356 iemAImpl_bsf_u32, NULL,
357 iemAImpl_bsf_u64, NULL
358};
359
360/** Function table for the BSR instruction. */
361static const IEMOPBINSIZES g_iemAImpl_bsr =
362{
363 NULL, NULL,
364 iemAImpl_bsr_u16, NULL,
365 iemAImpl_bsr_u32, NULL,
366 iemAImpl_bsr_u64, NULL
367};
368
369/** Function table for the IMUL instruction. */
370static const IEMOPBINSIZES g_iemAImpl_imul_two =
371{
372 NULL, NULL,
373 iemAImpl_imul_two_u16, NULL,
374 iemAImpl_imul_two_u32, NULL,
375 iemAImpl_imul_two_u64, NULL
376};
377
378/** Group 1 /r lookup table. */
379static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
380{
381 &g_iemAImpl_add,
382 &g_iemAImpl_or,
383 &g_iemAImpl_adc,
384 &g_iemAImpl_sbb,
385 &g_iemAImpl_and,
386 &g_iemAImpl_sub,
387 &g_iemAImpl_xor,
388 &g_iemAImpl_cmp
389};
390
391/** Function table for the INC instruction. */
392static const IEMOPUNARYSIZES g_iemAImpl_inc =
393{
394 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
395 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
396 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
397 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
398};
399
400/** Function table for the DEC instruction. */
401static const IEMOPUNARYSIZES g_iemAImpl_dec =
402{
403 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
404 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
405 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
406 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
407};
408
409/** Function table for the NEG instruction. */
410static const IEMOPUNARYSIZES g_iemAImpl_neg =
411{
412 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
413 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
414 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
415 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
416};
417
418/** Function table for the NOT instruction. */
419static const IEMOPUNARYSIZES g_iemAImpl_not =
420{
421 iemAImpl_not_u8, iemAImpl_not_u8_locked,
422 iemAImpl_not_u16, iemAImpl_not_u16_locked,
423 iemAImpl_not_u32, iemAImpl_not_u32_locked,
424 iemAImpl_not_u64, iemAImpl_not_u64_locked
425};
426
427
428/** Function table for the ROL instruction. */
429static const IEMOPSHIFTSIZES g_iemAImpl_rol =
430{
431 iemAImpl_rol_u8,
432 iemAImpl_rol_u16,
433 iemAImpl_rol_u32,
434 iemAImpl_rol_u64
435};
436
437/** Function table for the ROR instruction. */
438static const IEMOPSHIFTSIZES g_iemAImpl_ror =
439{
440 iemAImpl_ror_u8,
441 iemAImpl_ror_u16,
442 iemAImpl_ror_u32,
443 iemAImpl_ror_u64
444};
445
446/** Function table for the RCL instruction. */
447static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
448{
449 iemAImpl_rcl_u8,
450 iemAImpl_rcl_u16,
451 iemAImpl_rcl_u32,
452 iemAImpl_rcl_u64
453};
454
455/** Function table for the RCR instruction. */
456static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
457{
458 iemAImpl_rcr_u8,
459 iemAImpl_rcr_u16,
460 iemAImpl_rcr_u32,
461 iemAImpl_rcr_u64
462};
463
464/** Function table for the SHL instruction. */
465static const IEMOPSHIFTSIZES g_iemAImpl_shl =
466{
467 iemAImpl_shl_u8,
468 iemAImpl_shl_u16,
469 iemAImpl_shl_u32,
470 iemAImpl_shl_u64
471};
472
473/** Function table for the SHR instruction. */
474static const IEMOPSHIFTSIZES g_iemAImpl_shr =
475{
476 iemAImpl_shr_u8,
477 iemAImpl_shr_u16,
478 iemAImpl_shr_u32,
479 iemAImpl_shr_u64
480};
481
482/** Function table for the SAR instruction. */
483static const IEMOPSHIFTSIZES g_iemAImpl_sar =
484{
485 iemAImpl_sar_u8,
486 iemAImpl_sar_u16,
487 iemAImpl_sar_u32,
488 iemAImpl_sar_u64
489};
490
491
492/** Function table for the MUL instruction. */
493static const IEMOPMULDIVSIZES g_iemAImpl_mul =
494{
495 iemAImpl_mul_u8,
496 iemAImpl_mul_u16,
497 iemAImpl_mul_u32,
498 iemAImpl_mul_u64
499};
500
501/** Function table for the IMUL instruction working implicitly on rAX. */
502static const IEMOPMULDIVSIZES g_iemAImpl_imul =
503{
504 iemAImpl_imul_u8,
505 iemAImpl_imul_u16,
506 iemAImpl_imul_u32,
507 iemAImpl_imul_u64
508};
509
510/** Function table for the DIV instruction. */
511static const IEMOPMULDIVSIZES g_iemAImpl_div =
512{
513 iemAImpl_div_u8,
514 iemAImpl_div_u16,
515 iemAImpl_div_u32,
516 iemAImpl_div_u64
517};
518
519/** Function table for the MUL instruction. */
520static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
521{
522 iemAImpl_idiv_u8,
523 iemAImpl_idiv_u16,
524 iemAImpl_idiv_u32,
525 iemAImpl_idiv_u64
526};
527
528/** Function table for the SHLD instruction */
529static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
530{
531 iemAImpl_shld_u16,
532 iemAImpl_shld_u32,
533 iemAImpl_shld_u64,
534};
535
536/** Function table for the SHRD instruction */
537static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
538{
539 iemAImpl_shrd_u16,
540 iemAImpl_shrd_u32,
541 iemAImpl_shrd_u64,
542};
543
544
545/*******************************************************************************
546* Internal Functions *
547*******************************************************************************/
548static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
549/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
550static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
551static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
552static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
553static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
554static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
555static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
556static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
557static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
558static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
559static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
560static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
561static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
562static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
563static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
564static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
565static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
566static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
567static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
568static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
569static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
570static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
571
572#ifdef IEM_VERIFICATION_MODE
573static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
574#endif
575static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
576static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
577
578
579/**
580 * Initializes the decoder state.
581 *
582 * @param pIemCpu The per CPU IEM state.
583 */
584DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
585{
586 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
587
588 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
589 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
590 ? IEMMODE_64BIT
591 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
592 ? IEMMODE_32BIT
593 : IEMMODE_16BIT;
594 pIemCpu->enmCpuMode = enmMode;
595 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
596 pIemCpu->enmEffAddrMode = enmMode;
597 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
598 pIemCpu->enmEffOpSize = enmMode;
599 pIemCpu->fPrefixes = 0;
600 pIemCpu->uRexReg = 0;
601 pIemCpu->uRexB = 0;
602 pIemCpu->uRexIndex = 0;
603 pIemCpu->iEffSeg = X86_SREG_DS;
604 pIemCpu->offOpcode = 0;
605 pIemCpu->cbOpcode = 0;
606 pIemCpu->cActiveMappings = 0;
607 pIemCpu->iNextMapping = 0;
608}
609
610
611/**
612 * Prefetch opcodes the first time when starting executing.
613 *
614 * @returns Strict VBox status code.
615 * @param pIemCpu The IEM state.
616 */
617static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
618{
619#ifdef IEM_VERIFICATION_MODE
620 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
621#endif
622 iemInitDecoder(pIemCpu);
623
624 /*
625 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
626 *
627 * First translate CS:rIP to a physical address.
628 */
629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
630 uint32_t cbToTryRead;
631 RTGCPTR GCPtrPC;
632 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
633 {
634 cbToTryRead = PAGE_SIZE;
635 GCPtrPC = pCtx->rip;
636 if (!IEM_IS_CANONICAL(GCPtrPC))
637 return iemRaiseGeneralProtectionFault0(pIemCpu);
638 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
639 }
640 else
641 {
642 uint32_t GCPtrPC32 = pCtx->eip;
643 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
644 if (GCPtrPC32 > pCtx->csHid.u32Limit)
645 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
646 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
647 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
648 }
649
650 RTGCPHYS GCPhys;
651 uint64_t fFlags;
652 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
653 if (RT_FAILURE(rc))
654 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
655 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
656 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
657 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
658 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
659 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
660 /** @todo Check reserved bits and such stuff. PGM is better at doing
661 * that, so do it when implementing the guest virtual address
662 * TLB... */
663
664#ifdef IEM_VERIFICATION_MODE
665 /*
666 * Optimistic optimization: Use unconsumed opcode bytes from the previous
667 * instruction.
668 */
669 /** @todo optimize this differently by not using PGMPhysRead. */
670 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
671 pIemCpu->GCPhysOpcodes = GCPhys;
672 if ( offPrevOpcodes < cbOldOpcodes
673 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
674 {
675 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
676 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
677 pIemCpu->cbOpcode = cbNew;
678 return VINF_SUCCESS;
679 }
680#endif
681
682 /*
683 * Read the bytes at this address.
684 */
685 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
686 if (cbToTryRead > cbLeftOnPage)
687 cbToTryRead = cbLeftOnPage;
688 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
689 cbToTryRead = sizeof(pIemCpu->abOpcode);
690 /** @todo patch manager */
691 if (!pIemCpu->fByPassHandlers)
692 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
693 else
694 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
695 if (rc != VINF_SUCCESS)
696 return rc;
697 pIemCpu->cbOpcode = cbToTryRead;
698
699 return VINF_SUCCESS;
700}
701
702
703/**
704 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
705 * exception if it fails.
706 *
707 * @returns Strict VBox status code.
708 * @param pIemCpu The IEM state.
709 * @param cbMin Where to return the opcode byte.
710 */
711static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
712{
713 /*
714 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
715 *
716 * First translate CS:rIP to a physical address.
717 */
718 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
719 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
720 uint32_t cbToTryRead;
721 RTGCPTR GCPtrNext;
722 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
723 {
724 cbToTryRead = PAGE_SIZE;
725 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
726 if (!IEM_IS_CANONICAL(GCPtrNext))
727 return iemRaiseGeneralProtectionFault0(pIemCpu);
728 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
729 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
730 }
731 else
732 {
733 uint32_t GCPtrNext32 = pCtx->eip;
734 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
735 GCPtrNext32 += pIemCpu->cbOpcode;
736 if (GCPtrNext32 > pCtx->csHid.u32Limit)
737 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
738 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
739 if (cbToTryRead < cbMin - cbLeft)
740 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
741 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
742 }
743
744 RTGCPHYS GCPhys;
745 uint64_t fFlags;
746 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
747 if (RT_FAILURE(rc))
748 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
749 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
750 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
751 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
752 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
753 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
754 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
755 /** @todo Check reserved bits and such stuff. PGM is better at doing
756 * that, so do it when implementing the guest virtual address
757 * TLB... */
758
759 /*
760 * Read the bytes at this address.
761 */
762 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
763 if (cbToTryRead > cbLeftOnPage)
764 cbToTryRead = cbLeftOnPage;
765 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
766 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
767 Assert(cbToTryRead >= cbMin - cbLeft);
768 if (!pIemCpu->fByPassHandlers)
769 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
770 else
771 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
772 if (rc != VINF_SUCCESS)
773 return rc;
774 pIemCpu->cbOpcode += cbToTryRead;
775 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
776
777 return VINF_SUCCESS;
778}
779
780
781/**
782 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
783 *
784 * @returns Strict VBox status code.
785 * @param pIemCpu The IEM state.
786 * @param pb Where to return the opcode byte.
787 */
788DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
789{
790 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
791 if (rcStrict == VINF_SUCCESS)
792 {
793 uint8_t offOpcode = pIemCpu->offOpcode;
794 *pb = pIemCpu->abOpcode[offOpcode];
795 pIemCpu->offOpcode = offOpcode + 1;
796 }
797 else
798 *pb = 0;
799 return rcStrict;
800}
801
802
803/**
804 * Fetches the next opcode byte.
805 *
806 * @returns Strict VBox status code.
807 * @param pIemCpu The IEM state.
808 * @param pu8 Where to return the opcode byte.
809 */
810DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
811{
812 uint8_t const offOpcode = pIemCpu->offOpcode;
813 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
814 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
815
816 *pu8 = pIemCpu->abOpcode[offOpcode];
817 pIemCpu->offOpcode = offOpcode + 1;
818 return VINF_SUCCESS;
819}
820
821
822/**
823 * Fetches the next opcode byte, returns automatically on failure.
824 *
825 * @param a_pu8 Where to return the opcode byte.
826 * @remark Implicitly references pIemCpu.
827 */
828#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
829 do \
830 { \
831 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
832 if (rcStrict2 != VINF_SUCCESS) \
833 return rcStrict2; \
834 } while (0)
835
836
837/**
838 * Fetches the next signed byte from the opcode stream.
839 *
840 * @returns Strict VBox status code.
841 * @param pIemCpu The IEM state.
842 * @param pi8 Where to return the signed byte.
843 */
844DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
845{
846 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
847}
848
849
850/**
851 * Fetches the next signed byte from the opcode stream, returning automatically
852 * on failure.
853 *
854 * @param pi8 Where to return the signed byte.
855 * @remark Implicitly references pIemCpu.
856 */
857#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
858 do \
859 { \
860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
861 if (rcStrict2 != VINF_SUCCESS) \
862 return rcStrict2; \
863 } while (0)
864
865
866/**
867 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
868 *
869 * @returns Strict VBox status code.
870 * @param pIemCpu The IEM state.
871 * @param pu16 Where to return the opcode dword.
872 */
873DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
874{
875 uint8_t u8;
876 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
877 if (rcStrict == VINF_SUCCESS)
878 *pu16 = (int8_t)u8;
879 return rcStrict;
880}
881
882
883/**
884 * Fetches the next signed byte from the opcode stream, extending it to
885 * unsigned 16-bit.
886 *
887 * @returns Strict VBox status code.
888 * @param pIemCpu The IEM state.
889 * @param pu16 Where to return the unsigned word.
890 */
891DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
892{
893 uint8_t const offOpcode = pIemCpu->offOpcode;
894 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
895 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
896
897 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
898 pIemCpu->offOpcode = offOpcode + 1;
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Fetches the next signed byte from the opcode stream and sign-extending it to
905 * a word, returning automatically on failure.
906 *
907 * @param pu16 Where to return the word.
908 * @remark Implicitly references pIemCpu.
909 */
910#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
911 do \
912 { \
913 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
914 if (rcStrict2 != VINF_SUCCESS) \
915 return rcStrict2; \
916 } while (0)
917
918
919/**
920 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
921 *
922 * @returns Strict VBox status code.
923 * @param pIemCpu The IEM state.
924 * @param pu16 Where to return the opcode word.
925 */
926DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
927{
928 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
929 if (rcStrict == VINF_SUCCESS)
930 {
931 uint8_t offOpcode = pIemCpu->offOpcode;
932 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
933 pIemCpu->offOpcode = offOpcode + 2;
934 }
935 else
936 *pu16 = 0;
937 return rcStrict;
938}
939
940
941/**
942 * Fetches the next opcode word.
943 *
944 * @returns Strict VBox status code.
945 * @param pIemCpu The IEM state.
946 * @param pu16 Where to return the opcode word.
947 */
948DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
949{
950 uint8_t const offOpcode = pIemCpu->offOpcode;
951 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
952 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
953
954 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
955 pIemCpu->offOpcode = offOpcode + 2;
956 return VINF_SUCCESS;
957}
958
959
960/**
961 * Fetches the next opcode word, returns automatically on failure.
962 *
963 * @param a_pu16 Where to return the opcode word.
964 * @remark Implicitly references pIemCpu.
965 */
966#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
967 do \
968 { \
969 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
970 if (rcStrict2 != VINF_SUCCESS) \
971 return rcStrict2; \
972 } while (0)
973
974
975/**
976 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
977 *
978 * @returns Strict VBox status code.
979 * @param pIemCpu The IEM state.
980 * @param pu32 Where to return the opcode double word.
981 */
982DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
983{
984 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
985 if (rcStrict == VINF_SUCCESS)
986 {
987 uint8_t offOpcode = pIemCpu->offOpcode;
988 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
989 pIemCpu->offOpcode = offOpcode + 2;
990 }
991 else
992 *pu32 = 0;
993 return rcStrict;
994}
995
996
997/**
998 * Fetches the next opcode word, zero extending it to a double word.
999 *
1000 * @returns Strict VBox status code.
1001 * @param pIemCpu The IEM state.
1002 * @param pu32 Where to return the opcode double word.
1003 */
1004DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1005{
1006 uint8_t const offOpcode = pIemCpu->offOpcode;
1007 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1008 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1009
1010 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1011 pIemCpu->offOpcode = offOpcode + 2;
1012 return VINF_SUCCESS;
1013}
1014
1015
1016/**
1017 * Fetches the next opcode word and zero extends it to a double word, returns
1018 * automatically on failure.
1019 *
1020 * @param a_pu32 Where to return the opcode double word.
1021 * @remark Implicitly references pIemCpu.
1022 */
1023#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1024 do \
1025 { \
1026 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1027 if (rcStrict2 != VINF_SUCCESS) \
1028 return rcStrict2; \
1029 } while (0)
1030
1031
1032/**
1033 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1034 *
1035 * @returns Strict VBox status code.
1036 * @param pIemCpu The IEM state.
1037 * @param pu64 Where to return the opcode quad word.
1038 */
1039DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1040{
1041 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1042 if (rcStrict == VINF_SUCCESS)
1043 {
1044 uint8_t offOpcode = pIemCpu->offOpcode;
1045 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1046 pIemCpu->offOpcode = offOpcode + 2;
1047 }
1048 else
1049 *pu64 = 0;
1050 return rcStrict;
1051}
1052
1053
1054/**
1055 * Fetches the next opcode word, zero extending it to a quad word.
1056 *
1057 * @returns Strict VBox status code.
1058 * @param pIemCpu The IEM state.
1059 * @param pu64 Where to return the opcode quad word.
1060 */
1061DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1062{
1063 uint8_t const offOpcode = pIemCpu->offOpcode;
1064 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1065 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1066
1067 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1068 pIemCpu->offOpcode = offOpcode + 2;
1069 return VINF_SUCCESS;
1070}
1071
1072
1073/**
1074 * Fetches the next opcode word and zero extends it to a quad word, returns
1075 * automatically on failure.
1076 *
1077 * @param a_pu64 Where to return the opcode quad word.
1078 * @remark Implicitly references pIemCpu.
1079 */
1080#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1081 do \
1082 { \
1083 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1084 if (rcStrict2 != VINF_SUCCESS) \
1085 return rcStrict2; \
1086 } while (0)
1087
1088
1089/**
1090 * Fetches the next signed word from the opcode stream.
1091 *
1092 * @returns Strict VBox status code.
1093 * @param pIemCpu The IEM state.
1094 * @param pi16 Where to return the signed word.
1095 */
1096DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1097{
1098 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1099}
1100
1101
1102/**
1103 * Fetches the next signed word from the opcode stream, returning automatically
1104 * on failure.
1105 *
1106 * @param pi16 Where to return the signed word.
1107 * @remark Implicitly references pIemCpu.
1108 */
1109#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1110 do \
1111 { \
1112 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1113 if (rcStrict2 != VINF_SUCCESS) \
1114 return rcStrict2; \
1115 } while (0)
1116
1117
1118/**
1119 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1120 *
1121 * @returns Strict VBox status code.
1122 * @param pIemCpu The IEM state.
1123 * @param pu32 Where to return the opcode dword.
1124 */
1125DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1126{
1127 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1128 if (rcStrict == VINF_SUCCESS)
1129 {
1130 uint8_t offOpcode = pIemCpu->offOpcode;
1131 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1132 pIemCpu->abOpcode[offOpcode + 1],
1133 pIemCpu->abOpcode[offOpcode + 2],
1134 pIemCpu->abOpcode[offOpcode + 3]);
1135 pIemCpu->offOpcode = offOpcode + 4;
1136 }
1137 else
1138 *pu32 = 0;
1139 return rcStrict;
1140}
1141
1142
1143/**
1144 * Fetches the next opcode dword.
1145 *
1146 * @returns Strict VBox status code.
1147 * @param pIemCpu The IEM state.
1148 * @param pu32 Where to return the opcode double word.
1149 */
1150DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1151{
1152 uint8_t const offOpcode = pIemCpu->offOpcode;
1153 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1154 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1155
1156 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1157 pIemCpu->abOpcode[offOpcode + 1],
1158 pIemCpu->abOpcode[offOpcode + 2],
1159 pIemCpu->abOpcode[offOpcode + 3]);
1160 pIemCpu->offOpcode = offOpcode + 4;
1161 return VINF_SUCCESS;
1162}
1163
1164
1165/**
1166 * Fetches the next opcode dword, returns automatically on failure.
1167 *
1168 * @param a_pu32 Where to return the opcode dword.
1169 * @remark Implicitly references pIemCpu.
1170 */
1171#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1172 do \
1173 { \
1174 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1175 if (rcStrict2 != VINF_SUCCESS) \
1176 return rcStrict2; \
1177 } while (0)
1178
1179
1180/**
1181 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1182 *
1183 * @returns Strict VBox status code.
1184 * @param pIemCpu The IEM state.
1185 * @param pu32 Where to return the opcode dword.
1186 */
1187DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1188{
1189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1190 if (rcStrict == VINF_SUCCESS)
1191 {
1192 uint8_t offOpcode = pIemCpu->offOpcode;
1193 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1194 pIemCpu->abOpcode[offOpcode + 1],
1195 pIemCpu->abOpcode[offOpcode + 2],
1196 pIemCpu->abOpcode[offOpcode + 3]);
1197 pIemCpu->offOpcode = offOpcode + 4;
1198 }
1199 else
1200 *pu64 = 0;
1201 return rcStrict;
1202}
1203
1204
1205/**
1206 * Fetches the next opcode dword, zero extending it to a quad word.
1207 *
1208 * @returns Strict VBox status code.
1209 * @param pIemCpu The IEM state.
1210 * @param pu64 Where to return the opcode quad word.
1211 */
1212DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1213{
1214 uint8_t const offOpcode = pIemCpu->offOpcode;
1215 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1216 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1217
1218 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1219 pIemCpu->abOpcode[offOpcode + 1],
1220 pIemCpu->abOpcode[offOpcode + 2],
1221 pIemCpu->abOpcode[offOpcode + 3]);
1222 pIemCpu->offOpcode = offOpcode + 4;
1223 return VINF_SUCCESS;
1224}
1225
1226
1227/**
1228 * Fetches the next opcode dword and zero extends it to a quad word, returns
1229 * automatically on failure.
1230 *
1231 * @param a_pu64 Where to return the opcode quad word.
1232 * @remark Implicitly references pIemCpu.
1233 */
1234#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1235 do \
1236 { \
1237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1238 if (rcStrict2 != VINF_SUCCESS) \
1239 return rcStrict2; \
1240 } while (0)
1241
1242
1243/**
1244 * Fetches the next signed double word from the opcode stream.
1245 *
1246 * @returns Strict VBox status code.
1247 * @param pIemCpu The IEM state.
1248 * @param pi32 Where to return the signed double word.
1249 */
1250DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1251{
1252 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1253}
1254
1255/**
1256 * Fetches the next signed double word from the opcode stream, returning
1257 * automatically on failure.
1258 *
1259 * @param pi32 Where to return the signed double word.
1260 * @remark Implicitly references pIemCpu.
1261 */
1262#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1263 do \
1264 { \
1265 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1266 if (rcStrict2 != VINF_SUCCESS) \
1267 return rcStrict2; \
1268 } while (0)
1269
1270
1271/**
1272 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1273 *
1274 * @returns Strict VBox status code.
1275 * @param pIemCpu The IEM state.
1276 * @param pu64 Where to return the opcode qword.
1277 */
1278DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1279{
1280 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1281 if (rcStrict == VINF_SUCCESS)
1282 {
1283 uint8_t offOpcode = pIemCpu->offOpcode;
1284 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1285 pIemCpu->abOpcode[offOpcode + 1],
1286 pIemCpu->abOpcode[offOpcode + 2],
1287 pIemCpu->abOpcode[offOpcode + 3]);
1288 pIemCpu->offOpcode = offOpcode + 4;
1289 }
1290 else
1291 *pu64 = 0;
1292 return rcStrict;
1293}
1294
1295
1296/**
1297 * Fetches the next opcode dword, sign extending it into a quad word.
1298 *
1299 * @returns Strict VBox status code.
1300 * @param pIemCpu The IEM state.
1301 * @param pu64 Where to return the opcode quad word.
1302 */
1303DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1304{
1305 uint8_t const offOpcode = pIemCpu->offOpcode;
1306 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1307 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1308
1309 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1310 pIemCpu->abOpcode[offOpcode + 1],
1311 pIemCpu->abOpcode[offOpcode + 2],
1312 pIemCpu->abOpcode[offOpcode + 3]);
1313 *pu64 = i32;
1314 pIemCpu->offOpcode = offOpcode + 4;
1315 return VINF_SUCCESS;
1316}
1317
1318
1319/**
1320 * Fetches the next opcode double word and sign extends it to a quad word,
1321 * returns automatically on failure.
1322 *
1323 * @param a_pu64 Where to return the opcode quad word.
1324 * @remark Implicitly references pIemCpu.
1325 */
1326#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1327 do \
1328 { \
1329 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1330 if (rcStrict2 != VINF_SUCCESS) \
1331 return rcStrict2; \
1332 } while (0)
1333
1334
1335/**
1336 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1337 *
1338 * @returns Strict VBox status code.
1339 * @param pIemCpu The IEM state.
1340 * @param pu64 Where to return the opcode qword.
1341 */
1342DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1343{
1344 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1345 if (rcStrict == VINF_SUCCESS)
1346 {
1347 uint8_t offOpcode = pIemCpu->offOpcode;
1348 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1349 pIemCpu->abOpcode[offOpcode + 1],
1350 pIemCpu->abOpcode[offOpcode + 2],
1351 pIemCpu->abOpcode[offOpcode + 3],
1352 pIemCpu->abOpcode[offOpcode + 4],
1353 pIemCpu->abOpcode[offOpcode + 5],
1354 pIemCpu->abOpcode[offOpcode + 6],
1355 pIemCpu->abOpcode[offOpcode + 7]);
1356 pIemCpu->offOpcode = offOpcode + 8;
1357 }
1358 else
1359 *pu64 = 0;
1360 return rcStrict;
1361}
1362
1363
1364/**
1365 * Fetches the next opcode qword.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pIemCpu The IEM state.
1369 * @param pu64 Where to return the opcode qword.
1370 */
1371DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1372{
1373 uint8_t const offOpcode = pIemCpu->offOpcode;
1374 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1375 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1376
1377 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1378 pIemCpu->abOpcode[offOpcode + 1],
1379 pIemCpu->abOpcode[offOpcode + 2],
1380 pIemCpu->abOpcode[offOpcode + 3],
1381 pIemCpu->abOpcode[offOpcode + 4],
1382 pIemCpu->abOpcode[offOpcode + 5],
1383 pIemCpu->abOpcode[offOpcode + 6],
1384 pIemCpu->abOpcode[offOpcode + 7]);
1385 pIemCpu->offOpcode = offOpcode + 8;
1386 return VINF_SUCCESS;
1387}
1388
1389
1390/**
1391 * Fetches the next opcode quad word, returns automatically on failure.
1392 *
1393 * @param a_pu64 Where to return the opcode quad word.
1394 * @remark Implicitly references pIemCpu.
1395 */
1396#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1397 do \
1398 { \
1399 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1400 if (rcStrict2 != VINF_SUCCESS) \
1401 return rcStrict2; \
1402 } while (0)
1403
1404
1405/** @name Misc Worker Functions.
1406 * @{
1407 */
1408
1409
1410/**
1411 * Validates a new SS segment.
1412 *
1413 * @returns VBox strict status code.
1414 * @param pIemCpu The IEM per CPU instance data.
1415 * @param pCtx The CPU context.
1416 * @param NewSS The new SS selctor.
1417 * @param uCpl The CPL to load the stack for.
1418 * @param pDesc Where to return the descriptor.
1419 */
1420static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1421{
1422 NOREF(pCtx);
1423
1424 /* Null selectors are not allowed (we're not called for dispatching
1425 interrupts with SS=0 in long mode). */
1426 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1427 {
1428 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1429 return iemRaiseGeneralProtectionFault0(pIemCpu);
1430 }
1431
1432 /*
1433 * Read the descriptor.
1434 */
1435 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1436 if (rcStrict != VINF_SUCCESS)
1437 return rcStrict;
1438
1439 /*
1440 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1441 */
1442 if (!pDesc->Legacy.Gen.u1DescType)
1443 {
1444 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1445 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1446 }
1447
1448 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1449 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1450 {
1451 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1452 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1453 }
1454 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1455 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1456 {
1457 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1458 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1459 }
1460 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1461 if ((NewSS & X86_SEL_RPL) != uCpl)
1462 {
1463 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1464 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1465 }
1466 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1467 {
1468 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1469 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1470 }
1471
1472 /* Is it there? */
1473 /** @todo testcase: Is this checked before the canonical / limit check below? */
1474 if (!pDesc->Legacy.Gen.u1Present)
1475 {
1476 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1477 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1478 }
1479
1480 return VINF_SUCCESS;
1481}
1482
1483
1484/** @} */
1485
1486/** @name Raising Exceptions.
1487 *
1488 * @{
1489 */
1490
1491/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1492 * @{ */
1493/** CPU exception. */
1494#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1495/** External interrupt (from PIC, APIC, whatever). */
1496#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1497/** Software interrupt (int, into or bound). */
1498#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1499/** Takes an error code. */
1500#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1501/** Takes a CR2. */
1502#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1503/** Generated by the breakpoint instruction. */
1504#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1505/** @} */
1506
1507/**
1508 * Loads the specified stack far pointer from the TSS.
1509 *
1510 * @returns VBox strict status code.
1511 * @param pIemCpu The IEM per CPU instance data.
1512 * @param pCtx The CPU context.
1513 * @param uCpl The CPL to load the stack for.
1514 * @param pSelSS Where to return the new stack segment.
1515 * @param puEsp Where to return the new stack pointer.
1516 */
1517static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1518 PRTSEL pSelSS, uint32_t *puEsp)
1519{
1520 VBOXSTRICTRC rcStrict;
1521 Assert(uCpl < 4);
1522 *puEsp = 0; /* make gcc happy */
1523 *pSelSS = 0; /* make gcc happy */
1524
1525 switch (pCtx->trHid.Attr.n.u4Type)
1526 {
1527 /*
1528 * 16-bit TSS (X86TSS16).
1529 */
1530 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1531 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1532 {
1533 uint32_t off = uCpl * 4 + 2;
1534 if (off + 4 > pCtx->trHid.u32Limit)
1535 {
1536 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1537 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1538 }
1539
1540 uint32_t u32Tmp = 0; /* gcc maybe... */
1541 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1542 if (rcStrict == VINF_SUCCESS)
1543 {
1544 *puEsp = RT_LOWORD(u32Tmp);
1545 *pSelSS = RT_HIWORD(u32Tmp);
1546 return VINF_SUCCESS;
1547 }
1548 break;
1549 }
1550
1551 /*
1552 * 32-bit TSS (X86TSS32).
1553 */
1554 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1555 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1556 {
1557 uint32_t off = uCpl * 8 + 4;
1558 if (off + 7 > pCtx->trHid.u32Limit)
1559 {
1560 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1561 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1562 }
1563
1564 uint64_t u64Tmp;
1565 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1566 if (rcStrict == VINF_SUCCESS)
1567 {
1568 *puEsp = u64Tmp & UINT32_MAX;
1569 *pSelSS = (RTSEL)(u64Tmp >> 32);
1570 return VINF_SUCCESS;
1571 }
1572 break;
1573 }
1574
1575 default:
1576 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1577 }
1578 return rcStrict;
1579}
1580
1581
1582/**
1583 * Adjust the CPU state according to the exception being raised.
1584 *
1585 * @param pCtx The CPU context.
1586 * @param u8Vector The exception that has been raised.
1587 */
1588DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1589{
1590 switch (u8Vector)
1591 {
1592 case X86_XCPT_DB:
1593 pCtx->dr[7] &= ~X86_DR7_GD;
1594 break;
1595 /** @todo Read the AMD and Intel exception reference... */
1596 }
1597}
1598
1599
1600/**
1601 * Implements exceptions and interrupts for real mode.
1602 *
1603 * @returns VBox strict status code.
1604 * @param pIemCpu The IEM per CPU instance data.
1605 * @param pCtx The CPU context.
1606 * @param cbInstr The number of bytes to offset rIP by in the return
1607 * address.
1608 * @param u8Vector The interrupt / exception vector number.
1609 * @param fFlags The flags.
1610 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1611 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1612 */
1613static VBOXSTRICTRC
1614iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1615 PCPUMCTX pCtx,
1616 uint8_t cbInstr,
1617 uint8_t u8Vector,
1618 uint32_t fFlags,
1619 uint16_t uErr,
1620 uint64_t uCr2)
1621{
1622 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1623 NOREF(uErr); NOREF(uCr2);
1624
1625 /*
1626 * Read the IDT entry.
1627 */
1628 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1629 {
1630 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1631 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1632 }
1633 RTFAR16 Idte;
1634 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1635 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1636 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1637 return rcStrict;
1638
1639 /*
1640 * Push the stack frame.
1641 */
1642 uint16_t *pu16Frame;
1643 uint64_t uNewRsp;
1644 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1645 if (rcStrict != VINF_SUCCESS)
1646 return rcStrict;
1647
1648 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1649 pu16Frame[1] = (uint16_t)pCtx->cs;
1650 pu16Frame[0] = pCtx->ip + cbInstr;
1651 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1652 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1653 return rcStrict;
1654
1655 /*
1656 * Load the vector address into cs:ip and make exception specific state
1657 * adjustments.
1658 */
1659 pCtx->cs = Idte.sel;
1660 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1661 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1662 pCtx->rip = Idte.off;
1663 pCtx->eflags.Bits.u1IF = 0;
1664
1665 /** @todo do we actually do this in real mode? */
1666 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1667 iemRaiseXcptAdjustState(pCtx, u8Vector);
1668
1669 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1670}
1671
1672
1673/**
1674 * Implements exceptions and interrupts for protected mode.
1675 *
1676 * @returns VBox strict status code.
1677 * @param pIemCpu The IEM per CPU instance data.
1678 * @param pCtx The CPU context.
1679 * @param cbInstr The number of bytes to offset rIP by in the return
1680 * address.
1681 * @param u8Vector The interrupt / exception vector number.
1682 * @param fFlags The flags.
1683 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1684 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1685 */
1686static VBOXSTRICTRC
1687iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1688 PCPUMCTX pCtx,
1689 uint8_t cbInstr,
1690 uint8_t u8Vector,
1691 uint32_t fFlags,
1692 uint16_t uErr,
1693 uint64_t uCr2)
1694{
1695 NOREF(cbInstr);
1696
1697 /*
1698 * Read the IDT entry.
1699 */
1700 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1701 {
1702 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1703 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1704 }
1705 X86DESC Idte;
1706 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
1707 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1708 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1709 return rcStrict;
1710
1711 /*
1712 * Check the descriptor type, DPL and such.
1713 * ASSUMES this is done in the same order as described for call-gate calls.
1714 */
1715 if (Idte.Gate.u1DescType)
1716 {
1717 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1718 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1719 }
1720 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1721 switch (Idte.Gate.u4Type)
1722 {
1723 case X86_SEL_TYPE_SYS_UNDEFINED:
1724 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1725 case X86_SEL_TYPE_SYS_LDT:
1726 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1727 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1728 case X86_SEL_TYPE_SYS_UNDEFINED2:
1729 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1730 case X86_SEL_TYPE_SYS_UNDEFINED3:
1731 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1732 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1733 case X86_SEL_TYPE_SYS_UNDEFINED4:
1734 {
1735 /** @todo check what actually happens when the type is wrong...
1736 * esp. call gates. */
1737 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1738 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1739 }
1740
1741 case X86_SEL_TYPE_SYS_286_INT_GATE:
1742 case X86_SEL_TYPE_SYS_386_INT_GATE:
1743 fEflToClear |= X86_EFL_IF;
1744 break;
1745
1746 case X86_SEL_TYPE_SYS_TASK_GATE:
1747 /** @todo task gates. */
1748 AssertFailedReturn(VERR_NOT_SUPPORTED);
1749
1750 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1751 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1752 break;
1753
1754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1755 }
1756
1757 /* Check DPL against CPL if applicable. */
1758 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1759 {
1760 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1761 {
1762 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1763 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1764 }
1765 }
1766
1767 /* Is it there? */
1768 if (!Idte.Gate.u1Present)
1769 {
1770 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1771 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1772 }
1773
1774 /* A null CS is bad. */
1775 RTSEL NewCS = Idte.Gate.u16Sel;
1776 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1777 {
1778 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1779 return iemRaiseGeneralProtectionFault0(pIemCpu);
1780 }
1781
1782 /* Fetch the descriptor for the new CS. */
1783 IEMSELDESC DescCS;
1784 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1785 if (rcStrict != VINF_SUCCESS)
1786 return rcStrict;
1787
1788 /* Must be a code segment. */
1789 if (!DescCS.Legacy.Gen.u1DescType)
1790 {
1791 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1792 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1793 }
1794 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1795 {
1796 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1797 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1798 }
1799
1800 /* Don't allow lowering the privilege level. */
1801 /** @todo Does the lowering of privileges apply to software interrupts
1802 * only? This has bearings on the more-privileged or
1803 * same-privilege stack behavior further down. A testcase would
1804 * be nice. */
1805 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1806 {
1807 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1808 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1809 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1810 }
1811 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1812
1813 /* Check the new EIP against the new CS limit. */
1814 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1815 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1816 ? Idte.Gate.u16OffsetLow
1817 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1818 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1819 if (DescCS.Legacy.Gen.u1Granularity)
1820 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1821 if (uNewEip > cbLimitCS)
1822 {
1823 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1824 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1825 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1826 }
1827
1828 /* Make sure the selector is present. */
1829 if (!DescCS.Legacy.Gen.u1Present)
1830 {
1831 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1832 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1833 }
1834
1835 /*
1836 * If the privilege level changes, we need to get a new stack from the TSS.
1837 * This in turns means validating the new SS and ESP...
1838 */
1839 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1840 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1841 if (uNewCpl != pIemCpu->uCpl)
1842 {
1843 RTSEL NewSS;
1844 uint32_t uNewEsp;
1845 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1846 if (rcStrict != VINF_SUCCESS)
1847 return rcStrict;
1848
1849 IEMSELDESC DescSS;
1850 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1851 if (rcStrict != VINF_SUCCESS)
1852 return rcStrict;
1853
1854 /* Check that there is sufficient space for the stack frame. */
1855 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1856 if (DescSS.Legacy.Gen.u1Granularity)
1857 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1858 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_NOT_IMPLEMENTED);
1859
1860 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1861 if ( uNewEsp - 1 > cbLimitSS
1862 || uNewEsp < cbStackFrame)
1863 {
1864 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1865 u8Vector, NewSS, uNewEsp, cbStackFrame));
1866 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1867 }
1868
1869 /*
1870 * Start making changes.
1871 */
1872
1873 /* Create the stack frame. */
1874 RTPTRUNION uStackFrame;
1875 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1876 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
1877 if (rcStrict != VINF_SUCCESS)
1878 return rcStrict;
1879 void * const pvStackFrame = uStackFrame.pv;
1880
1881 if (fFlags & IEM_XCPT_FLAGS_ERR)
1882 *uStackFrame.pu32++ = uErr;
1883 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1884 ? pCtx->eip + cbInstr : pCtx->eip;
1885 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1886 uStackFrame.pu32[2] = pCtx->eflags.u;
1887 uStackFrame.pu32[3] = pCtx->esp;
1888 uStackFrame.pu32[4] = pCtx->ss;
1889 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
1890 if (rcStrict != VINF_SUCCESS)
1891 return rcStrict;
1892
1893 /* Mark the selectors 'accessed' (hope this is the correct time). */
1894 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1895 * after pushing the stack frame? (Write protect the gdt + stack to
1896 * find out.) */
1897 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1898 {
1899 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1903 }
1904
1905 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1906 {
1907 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1908 if (rcStrict != VINF_SUCCESS)
1909 return rcStrict;
1910 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1911 }
1912
1913 /*
1914 * Start commint the register changes (joins with the DPL=CPL branch).
1915 */
1916 pCtx->ss = NewSS;
1917 pCtx->ssHid.u32Limit = cbLimitSS;
1918 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
1919 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1920 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1921 pIemCpu->uCpl = uNewCpl;
1922 }
1923 /*
1924 * Same privilege, no stack change and smaller stack frame.
1925 */
1926 else
1927 {
1928 uint64_t uNewRsp;
1929 RTPTRUNION uStackFrame;
1930 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
1931 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934 void * const pvStackFrame = uStackFrame.pv;
1935
1936 if (fFlags & IEM_XCPT_FLAGS_ERR)
1937 *uStackFrame.pu32++ = uErr;
1938 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1939 ? pCtx->eip + cbInstr : pCtx->eip;
1940 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1941 uStackFrame.pu32[2] = pCtx->eflags.u;
1942 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
1943 if (rcStrict != VINF_SUCCESS)
1944 return rcStrict;
1945
1946 /* Mark the CS selector as 'accessed'. */
1947 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1948 {
1949 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1950 if (rcStrict != VINF_SUCCESS)
1951 return rcStrict;
1952 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1953 }
1954
1955 /*
1956 * Start committing the register changes (joins with the other branch).
1957 */
1958 pCtx->rsp = uNewRsp;
1959 }
1960
1961 /* ... register committing continues. */
1962 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1963 pCtx->csHid.u32Limit = cbLimitCS;
1964 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
1965 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
1966
1967 pCtx->rip = uNewEip;
1968 pCtx->rflags.u &= ~fEflToClear;
1969
1970 if (fFlags & IEM_XCPT_FLAGS_CR2)
1971 pCtx->cr2 = uCr2;
1972
1973 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1974 iemRaiseXcptAdjustState(pCtx, u8Vector);
1975
1976 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1977}
1978
1979
1980/**
1981 * Implements exceptions and interrupts for V8086 mode.
1982 *
1983 * @returns VBox strict status code.
1984 * @param pIemCpu The IEM per CPU instance data.
1985 * @param pCtx The CPU context.
1986 * @param cbInstr The number of bytes to offset rIP by in the return
1987 * address.
1988 * @param u8Vector The interrupt / exception vector number.
1989 * @param fFlags The flags.
1990 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1991 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1992 */
1993static VBOXSTRICTRC
1994iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
1995 PCPUMCTX pCtx,
1996 uint8_t cbInstr,
1997 uint8_t u8Vector,
1998 uint32_t fFlags,
1999 uint16_t uErr,
2000 uint64_t uCr2)
2001{
2002 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2003 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
2004 return VERR_NOT_IMPLEMENTED;
2005}
2006
2007
2008/**
2009 * Implements exceptions and interrupts for long mode.
2010 *
2011 * @returns VBox strict status code.
2012 * @param pIemCpu The IEM per CPU instance data.
2013 * @param pCtx The CPU context.
2014 * @param cbInstr The number of bytes to offset rIP by in the return
2015 * address.
2016 * @param u8Vector The interrupt / exception vector number.
2017 * @param fFlags The flags.
2018 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2019 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2020 */
2021static VBOXSTRICTRC
2022iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2023 PCPUMCTX pCtx,
2024 uint8_t cbInstr,
2025 uint8_t u8Vector,
2026 uint32_t fFlags,
2027 uint16_t uErr,
2028 uint64_t uCr2)
2029{
2030 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2031 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
2032 return VERR_NOT_IMPLEMENTED;
2033}
2034
2035
2036/**
2037 * Implements exceptions and interrupts.
2038 *
2039 * All exceptions and interrupts goes thru this function!
2040 *
2041 * @returns VBox strict status code.
2042 * @param pIemCpu The IEM per CPU instance data.
2043 * @param cbInstr The number of bytes to offset rIP by in the return
2044 * address.
2045 * @param u8Vector The interrupt / exception vector number.
2046 * @param fFlags The flags.
2047 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2048 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2049 */
2050DECL_NO_INLINE(static, VBOXSTRICTRC)
2051iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2052 uint8_t cbInstr,
2053 uint8_t u8Vector,
2054 uint32_t fFlags,
2055 uint16_t uErr,
2056 uint64_t uCr2)
2057{
2058 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2059
2060 /*
2061 * Do recursion accounting.
2062 */
2063 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2064 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2065 if (pIemCpu->cXcptRecursions == 0)
2066 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2067 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2068 else
2069 {
2070 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2071 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2072
2073 /** @todo double and tripple faults. */
2074 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
2075
2076 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2077 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2078 {
2079 ....
2080 } */
2081 }
2082 pIemCpu->cXcptRecursions++;
2083 pIemCpu->uCurXcpt = u8Vector;
2084 pIemCpu->fCurXcpt = fFlags;
2085
2086 /*
2087 * Extensive logging.
2088 */
2089#ifdef LOG_ENABLED
2090 if (LogIs3Enabled())
2091 {
2092 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2093 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2094 char szRegs[4096];
2095 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2096 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2097 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2098 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2099 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2100 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2101 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2102 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2103 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2104 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2105 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2106 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2107 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2108 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2109 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2110 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2111 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2112 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2113 " efer=%016VR{efer}\n"
2114 " pat=%016VR{pat}\n"
2115 " sf_mask=%016VR{sf_mask}\n"
2116 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2117 " lstar=%016VR{lstar}\n"
2118 " star=%016VR{star} cstar=%016VR{cstar}\n"
2119 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2120 );
2121
2122 char szInstr[256];
2123 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2124 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2125 szInstr, sizeof(szInstr), NULL);
2126 Log3(("%s%s\n", szRegs, szInstr));
2127 }
2128#endif /* LOG_ENABLED */
2129
2130 /*
2131 * Call the mode specific worker function.
2132 */
2133 VBOXSTRICTRC rcStrict;
2134 if (!(pCtx->cr0 & X86_CR0_PE))
2135 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2136 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2137 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2138 else if (!pCtx->eflags.Bits.u1VM)
2139 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2140 else
2141 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2142
2143 /*
2144 * Unwind.
2145 */
2146 pIemCpu->cXcptRecursions--;
2147 pIemCpu->uCurXcpt = uPrevXcpt;
2148 pIemCpu->fCurXcpt = fPrevXcpt;
2149 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
2150 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp));
2151 return rcStrict;
2152}
2153
2154
2155/** \#DE - 00. */
2156DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2157{
2158 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2159}
2160
2161
2162/** \#DB - 01. */
2163DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2164{
2165 /** @todo set/clear RF. */
2166 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2167}
2168
2169
2170/** \#UD - 06. */
2171DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2172{
2173 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2174}
2175
2176
2177/** \#NM - 07. */
2178DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2179{
2180 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2181}
2182
2183
2184#ifdef SOME_UNUSED_FUNCTION
2185/** \#TS(err) - 0a. */
2186DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2187{
2188 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2189}
2190#endif
2191
2192
2193/** \#TS(tr) - 0a. */
2194DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2195{
2196 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2197 pIemCpu->CTX_SUFF(pCtx)->tr, 0);
2198}
2199
2200
2201/** \#NP(err) - 0b. */
2202DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2203{
2204 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2205}
2206
2207
2208/** \#NP(seg) - 0b. */
2209DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2210{
2211 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2212 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2213}
2214
2215
2216/** \#NP(sel) - 0b. */
2217DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2218{
2219 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2220 uSel & ~X86_SEL_RPL, 0);
2221}
2222
2223
2224/** \#SS(seg) - 0c. */
2225DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2226{
2227 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2228 uSel & ~X86_SEL_RPL, 0);
2229}
2230
2231
2232/** \#GP(n) - 0d. */
2233DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2234{
2235 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2236}
2237
2238
2239/** \#GP(0) - 0d. */
2240DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2241{
2242 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2243}
2244
2245
2246/** \#GP(sel) - 0d. */
2247DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2248{
2249 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2250 Sel & ~X86_SEL_RPL, 0);
2251}
2252
2253
2254/** \#GP(0) - 0d. */
2255DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2256{
2257 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2258}
2259
2260
2261/** \#GP(sel) - 0d. */
2262DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2263{
2264 NOREF(iSegReg); NOREF(fAccess);
2265 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2266 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2267}
2268
2269
2270/** \#GP(sel) - 0d. */
2271DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2272{
2273 NOREF(Sel);
2274 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2275}
2276
2277
2278/** \#GP(sel) - 0d. */
2279DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2280{
2281 NOREF(iSegReg); NOREF(fAccess);
2282 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2283}
2284
2285
2286/** \#PF(n) - 0e. */
2287DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2288{
2289 uint16_t uErr;
2290 switch (rc)
2291 {
2292 case VERR_PAGE_NOT_PRESENT:
2293 case VERR_PAGE_TABLE_NOT_PRESENT:
2294 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2295 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2296 uErr = 0;
2297 break;
2298
2299 default:
2300 AssertMsgFailed(("%Rrc\n", rc));
2301 case VERR_ACCESS_DENIED:
2302 uErr = X86_TRAP_PF_P;
2303 break;
2304
2305 /** @todo reserved */
2306 }
2307
2308 if (pIemCpu->uCpl == 3)
2309 uErr |= X86_TRAP_PF_US;
2310
2311 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2312 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2313 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2314 uErr |= X86_TRAP_PF_ID;
2315
2316 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2317 uErr |= X86_TRAP_PF_RW;
2318
2319 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2320 uErr, GCPtrWhere);
2321}
2322
2323
2324/** \#MF(n) - 10. */
2325DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2326{
2327 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2328}
2329
2330
2331/**
2332 * Macro for calling iemCImplRaiseDivideError().
2333 *
2334 * This enables us to add/remove arguments and force different levels of
2335 * inlining as we wish.
2336 *
2337 * @return Strict VBox status code.
2338 */
2339#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2340IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2341{
2342 NOREF(cbInstr);
2343 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2344}
2345
2346
2347/**
2348 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2349 *
2350 * This enables us to add/remove arguments and force different levels of
2351 * inlining as we wish.
2352 *
2353 * @return Strict VBox status code.
2354 */
2355#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2356IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2357{
2358 NOREF(cbInstr);
2359 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2360}
2361
2362
2363/**
2364 * Macro for calling iemCImplRaiseInvalidOpcode().
2365 *
2366 * This enables us to add/remove arguments and force different levels of
2367 * inlining as we wish.
2368 *
2369 * @return Strict VBox status code.
2370 */
2371#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2372IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2373{
2374 NOREF(cbInstr);
2375 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2376}
2377
2378
2379/** @} */
2380
2381
2382/*
2383 *
2384 * Helpers routines.
2385 * Helpers routines.
2386 * Helpers routines.
2387 *
2388 */
2389
2390/**
2391 * Recalculates the effective operand size.
2392 *
2393 * @param pIemCpu The IEM state.
2394 */
2395static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2396{
2397 switch (pIemCpu->enmCpuMode)
2398 {
2399 case IEMMODE_16BIT:
2400 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2401 break;
2402 case IEMMODE_32BIT:
2403 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2404 break;
2405 case IEMMODE_64BIT:
2406 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2407 {
2408 case 0:
2409 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2410 break;
2411 case IEM_OP_PRF_SIZE_OP:
2412 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2413 break;
2414 case IEM_OP_PRF_SIZE_REX_W:
2415 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2416 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2417 break;
2418 }
2419 break;
2420 default:
2421 AssertFailed();
2422 }
2423}
2424
2425
2426/**
2427 * Sets the default operand size to 64-bit and recalculates the effective
2428 * operand size.
2429 *
2430 * @param pIemCpu The IEM state.
2431 */
2432static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2433{
2434 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2435 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2436 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2437 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2438 else
2439 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2440}
2441
2442
2443/*
2444 *
2445 * Common opcode decoders.
2446 * Common opcode decoders.
2447 * Common opcode decoders.
2448 *
2449 */
2450#include <iprt/mem.h>
2451
2452/**
2453 * Used to add extra details about a stub case.
2454 * @param pIemCpu The IEM per CPU state.
2455 */
2456static void iemOpStubMsg2(PIEMCPU pIemCpu)
2457{
2458 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2459 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2460 char szRegs[4096];
2461 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2462 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2463 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2464 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2465 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2466 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2467 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2468 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2469 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2470 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2471 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2472 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2473 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2474 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2475 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2476 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2477 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2478 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2479 " efer=%016VR{efer}\n"
2480 " pat=%016VR{pat}\n"
2481 " sf_mask=%016VR{sf_mask}\n"
2482 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2483 " lstar=%016VR{lstar}\n"
2484 " star=%016VR{star} cstar=%016VR{cstar}\n"
2485 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2486 );
2487
2488 char szInstr[256];
2489 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2490 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2491 szInstr, sizeof(szInstr), NULL);
2492
2493 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2494}
2495
2496
2497/** Stubs an opcode. */
2498#define FNIEMOP_STUB(a_Name) \
2499 FNIEMOP_DEF(a_Name) \
2500 { \
2501 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2502 iemOpStubMsg2(pIemCpu); \
2503 RTAssertPanic(); \
2504 return VERR_NOT_IMPLEMENTED; \
2505 } \
2506 typedef int ignore_semicolon
2507
2508/** Stubs an opcode. */
2509#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2510 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2511 { \
2512 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2513 iemOpStubMsg2(pIemCpu); \
2514 RTAssertPanic(); \
2515 NOREF(a_Name0); \
2516 return VERR_NOT_IMPLEMENTED; \
2517 } \
2518 typedef int ignore_semicolon
2519
2520
2521
2522/** @name Register Access.
2523 * @{
2524 */
2525
2526/**
2527 * Gets a reference (pointer) to the specified hidden segment register.
2528 *
2529 * @returns Hidden register reference.
2530 * @param pIemCpu The per CPU data.
2531 * @param iSegReg The segment register.
2532 */
2533static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2534{
2535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2536 switch (iSegReg)
2537 {
2538 case X86_SREG_ES: return &pCtx->esHid;
2539 case X86_SREG_CS: return &pCtx->csHid;
2540 case X86_SREG_SS: return &pCtx->ssHid;
2541 case X86_SREG_DS: return &pCtx->dsHid;
2542 case X86_SREG_FS: return &pCtx->fsHid;
2543 case X86_SREG_GS: return &pCtx->gsHid;
2544 }
2545 AssertFailedReturn(NULL);
2546}
2547
2548
2549/**
2550 * Gets a reference (pointer) to the specified segment register (the selector
2551 * value).
2552 *
2553 * @returns Pointer to the selector variable.
2554 * @param pIemCpu The per CPU data.
2555 * @param iSegReg The segment register.
2556 */
2557static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2558{
2559 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2560 switch (iSegReg)
2561 {
2562 case X86_SREG_ES: return &pCtx->es;
2563 case X86_SREG_CS: return &pCtx->cs;
2564 case X86_SREG_SS: return &pCtx->ss;
2565 case X86_SREG_DS: return &pCtx->ds;
2566 case X86_SREG_FS: return &pCtx->fs;
2567 case X86_SREG_GS: return &pCtx->gs;
2568 }
2569 AssertFailedReturn(NULL);
2570}
2571
2572
2573/**
2574 * Fetches the selector value of a segment register.
2575 *
2576 * @returns The selector value.
2577 * @param pIemCpu The per CPU data.
2578 * @param iSegReg The segment register.
2579 */
2580static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2581{
2582 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2583 switch (iSegReg)
2584 {
2585 case X86_SREG_ES: return pCtx->es;
2586 case X86_SREG_CS: return pCtx->cs;
2587 case X86_SREG_SS: return pCtx->ss;
2588 case X86_SREG_DS: return pCtx->ds;
2589 case X86_SREG_FS: return pCtx->fs;
2590 case X86_SREG_GS: return pCtx->gs;
2591 }
2592 AssertFailedReturn(0xffff);
2593}
2594
2595
2596/**
2597 * Gets a reference (pointer) to the specified general register.
2598 *
2599 * @returns Register reference.
2600 * @param pIemCpu The per CPU data.
2601 * @param iReg The general register.
2602 */
2603static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2604{
2605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2606 switch (iReg)
2607 {
2608 case X86_GREG_xAX: return &pCtx->rax;
2609 case X86_GREG_xCX: return &pCtx->rcx;
2610 case X86_GREG_xDX: return &pCtx->rdx;
2611 case X86_GREG_xBX: return &pCtx->rbx;
2612 case X86_GREG_xSP: return &pCtx->rsp;
2613 case X86_GREG_xBP: return &pCtx->rbp;
2614 case X86_GREG_xSI: return &pCtx->rsi;
2615 case X86_GREG_xDI: return &pCtx->rdi;
2616 case X86_GREG_x8: return &pCtx->r8;
2617 case X86_GREG_x9: return &pCtx->r9;
2618 case X86_GREG_x10: return &pCtx->r10;
2619 case X86_GREG_x11: return &pCtx->r11;
2620 case X86_GREG_x12: return &pCtx->r12;
2621 case X86_GREG_x13: return &pCtx->r13;
2622 case X86_GREG_x14: return &pCtx->r14;
2623 case X86_GREG_x15: return &pCtx->r15;
2624 }
2625 AssertFailedReturn(NULL);
2626}
2627
2628
2629/**
2630 * Gets a reference (pointer) to the specified 8-bit general register.
2631 *
2632 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2633 *
2634 * @returns Register reference.
2635 * @param pIemCpu The per CPU data.
2636 * @param iReg The register.
2637 */
2638static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2639{
2640 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2641 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2642
2643 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2644 if (iReg >= 4)
2645 pu8Reg++;
2646 return pu8Reg;
2647}
2648
2649
2650/**
2651 * Fetches the value of a 8-bit general register.
2652 *
2653 * @returns The register value.
2654 * @param pIemCpu The per CPU data.
2655 * @param iReg The register.
2656 */
2657static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2658{
2659 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2660 return *pbSrc;
2661}
2662
2663
2664/**
2665 * Fetches the value of a 16-bit general register.
2666 *
2667 * @returns The register value.
2668 * @param pIemCpu The per CPU data.
2669 * @param iReg The register.
2670 */
2671static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2672{
2673 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2674}
2675
2676
2677/**
2678 * Fetches the value of a 32-bit general register.
2679 *
2680 * @returns The register value.
2681 * @param pIemCpu The per CPU data.
2682 * @param iReg The register.
2683 */
2684static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2685{
2686 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2687}
2688
2689
2690/**
2691 * Fetches the value of a 64-bit general register.
2692 *
2693 * @returns The register value.
2694 * @param pIemCpu The per CPU data.
2695 * @param iReg The register.
2696 */
2697static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2698{
2699 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2700}
2701
2702
2703/**
2704 * Is the FPU state in FXSAVE format or not.
2705 *
2706 * @returns true if it is, false if it's in FNSAVE.
2707 * @param pVCpu The virtual CPU handle.
2708 */
2709DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2710{
2711#ifdef RT_ARCH_AMD64
2712 NOREF(pIemCpu);
2713 return true;
2714#else
2715 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2716 return true;
2717#endif
2718}
2719
2720
2721/**
2722 * Gets the FPU status word.
2723 *
2724 * @returns FPU status word
2725 * @param pIemCpu The per CPU data.
2726 */
2727static uint16_t iemFRegFetchFsw(PIEMCPU pIemCpu)
2728{
2729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2730 uint16_t u16Fsw;
2731 if (iemFRegIsFxSaveFormat(pIemCpu))
2732 u16Fsw = pCtx->fpu.FSW;
2733 else
2734 {
2735 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
2736 u16Fsw = pFpu->FSW;
2737 }
2738 return u16Fsw;
2739}
2740
2741/**
2742 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2743 *
2744 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2745 * segment limit.
2746 *
2747 * @param pIemCpu The per CPU data.
2748 * @param offNextInstr The offset of the next instruction.
2749 */
2750static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2751{
2752 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2753 switch (pIemCpu->enmEffOpSize)
2754 {
2755 case IEMMODE_16BIT:
2756 {
2757 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2758 if ( uNewIp > pCtx->csHid.u32Limit
2759 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2760 return iemRaiseGeneralProtectionFault0(pIemCpu);
2761 pCtx->rip = uNewIp;
2762 break;
2763 }
2764
2765 case IEMMODE_32BIT:
2766 {
2767 Assert(pCtx->rip <= UINT32_MAX);
2768 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2769
2770 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2771 if (uNewEip > pCtx->csHid.u32Limit)
2772 return iemRaiseGeneralProtectionFault0(pIemCpu);
2773 pCtx->rip = uNewEip;
2774 break;
2775 }
2776
2777 case IEMMODE_64BIT:
2778 {
2779 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2780
2781 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2782 if (!IEM_IS_CANONICAL(uNewRip))
2783 return iemRaiseGeneralProtectionFault0(pIemCpu);
2784 pCtx->rip = uNewRip;
2785 break;
2786 }
2787
2788 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2789 }
2790
2791 return VINF_SUCCESS;
2792}
2793
2794
2795/**
2796 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2797 *
2798 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2799 * segment limit.
2800 *
2801 * @returns Strict VBox status code.
2802 * @param pIemCpu The per CPU data.
2803 * @param offNextInstr The offset of the next instruction.
2804 */
2805static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2806{
2807 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2808 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2809
2810 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2811 if ( uNewIp > pCtx->csHid.u32Limit
2812 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2813 return iemRaiseGeneralProtectionFault0(pIemCpu);
2814 /** @todo Test 16-bit jump in 64-bit mode. */
2815 pCtx->rip = uNewIp;
2816
2817 return VINF_SUCCESS;
2818}
2819
2820
2821/**
2822 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2823 *
2824 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2825 * segment limit.
2826 *
2827 * @returns Strict VBox status code.
2828 * @param pIemCpu The per CPU data.
2829 * @param offNextInstr The offset of the next instruction.
2830 */
2831static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2832{
2833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2834 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2835
2836 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2837 {
2838 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2839
2840 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2841 if (uNewEip > pCtx->csHid.u32Limit)
2842 return iemRaiseGeneralProtectionFault0(pIemCpu);
2843 pCtx->rip = uNewEip;
2844 }
2845 else
2846 {
2847 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2848
2849 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2850 if (!IEM_IS_CANONICAL(uNewRip))
2851 return iemRaiseGeneralProtectionFault0(pIemCpu);
2852 pCtx->rip = uNewRip;
2853 }
2854 return VINF_SUCCESS;
2855}
2856
2857
2858/**
2859 * Performs a near jump to the specified address.
2860 *
2861 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2862 * segment limit.
2863 *
2864 * @param pIemCpu The per CPU data.
2865 * @param uNewRip The new RIP value.
2866 */
2867static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2868{
2869 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2870 switch (pIemCpu->enmEffOpSize)
2871 {
2872 case IEMMODE_16BIT:
2873 {
2874 Assert(uNewRip <= UINT16_MAX);
2875 if ( uNewRip > pCtx->csHid.u32Limit
2876 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2877 return iemRaiseGeneralProtectionFault0(pIemCpu);
2878 /** @todo Test 16-bit jump in 64-bit mode. */
2879 pCtx->rip = uNewRip;
2880 break;
2881 }
2882
2883 case IEMMODE_32BIT:
2884 {
2885 Assert(uNewRip <= UINT32_MAX);
2886 Assert(pCtx->rip <= UINT32_MAX);
2887 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2888
2889 if (uNewRip > pCtx->csHid.u32Limit)
2890 return iemRaiseGeneralProtectionFault0(pIemCpu);
2891 pCtx->rip = uNewRip;
2892 break;
2893 }
2894
2895 case IEMMODE_64BIT:
2896 {
2897 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2898
2899 if (!IEM_IS_CANONICAL(uNewRip))
2900 return iemRaiseGeneralProtectionFault0(pIemCpu);
2901 pCtx->rip = uNewRip;
2902 break;
2903 }
2904
2905 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2906 }
2907
2908 return VINF_SUCCESS;
2909}
2910
2911
2912/**
2913 * Get the address of the top of the stack.
2914 *
2915 * @param pCtx The CPU context which SP/ESP/RSP should be
2916 * read.
2917 */
2918DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
2919{
2920 if (pCtx->ssHid.Attr.n.u1Long)
2921 return pCtx->rsp;
2922 if (pCtx->ssHid.Attr.n.u1DefBig)
2923 return pCtx->esp;
2924 return pCtx->sp;
2925}
2926
2927
2928/**
2929 * Updates the RIP/EIP/IP to point to the next instruction.
2930 *
2931 * @param pIemCpu The per CPU data.
2932 * @param cbInstr The number of bytes to add.
2933 */
2934static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
2935{
2936 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2937 switch (pIemCpu->enmCpuMode)
2938 {
2939 case IEMMODE_16BIT:
2940 Assert(pCtx->rip <= UINT16_MAX);
2941 pCtx->eip += cbInstr;
2942 pCtx->eip &= UINT32_C(0xffff);
2943 break;
2944
2945 case IEMMODE_32BIT:
2946 pCtx->eip += cbInstr;
2947 Assert(pCtx->rip <= UINT32_MAX);
2948 break;
2949
2950 case IEMMODE_64BIT:
2951 pCtx->rip += cbInstr;
2952 break;
2953 default: AssertFailed();
2954 }
2955}
2956
2957
2958/**
2959 * Updates the RIP/EIP/IP to point to the next instruction.
2960 *
2961 * @param pIemCpu The per CPU data.
2962 */
2963static void iemRegUpdateRip(PIEMCPU pIemCpu)
2964{
2965 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
2966}
2967
2968
2969/**
2970 * Adds to the stack pointer.
2971 *
2972 * @param pCtx The CPU context which SP/ESP/RSP should be
2973 * updated.
2974 * @param cbToAdd The number of bytes to add.
2975 */
2976DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
2977{
2978 if (pCtx->ssHid.Attr.n.u1Long)
2979 pCtx->rsp += cbToAdd;
2980 else if (pCtx->ssHid.Attr.n.u1DefBig)
2981 pCtx->esp += cbToAdd;
2982 else
2983 pCtx->sp += cbToAdd;
2984}
2985
2986
2987/**
2988 * Subtracts from the stack pointer.
2989 *
2990 * @param pCtx The CPU context which SP/ESP/RSP should be
2991 * updated.
2992 * @param cbToSub The number of bytes to subtract.
2993 */
2994DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
2995{
2996 if (pCtx->ssHid.Attr.n.u1Long)
2997 pCtx->rsp -= cbToSub;
2998 else if (pCtx->ssHid.Attr.n.u1DefBig)
2999 pCtx->esp -= cbToSub;
3000 else
3001 pCtx->sp -= cbToSub;
3002}
3003
3004
3005/**
3006 * Adds to the temporary stack pointer.
3007 *
3008 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3009 * @param cbToAdd The number of bytes to add.
3010 * @param pCtx Where to get the current stack mode.
3011 */
3012DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
3013{
3014 if (pCtx->ssHid.Attr.n.u1Long)
3015 pTmpRsp->u += cbToAdd;
3016 else if (pCtx->ssHid.Attr.n.u1DefBig)
3017 pTmpRsp->DWords.dw0 += cbToAdd;
3018 else
3019 pTmpRsp->Words.w0 += cbToAdd;
3020}
3021
3022
3023/**
3024 * Subtracts from the temporary stack pointer.
3025 *
3026 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3027 * @param cbToSub The number of bytes to subtract.
3028 * @param pCtx Where to get the current stack mode.
3029 */
3030DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
3031{
3032 if (pCtx->ssHid.Attr.n.u1Long)
3033 pTmpRsp->u -= cbToSub;
3034 else if (pCtx->ssHid.Attr.n.u1DefBig)
3035 pTmpRsp->DWords.dw0 -= cbToSub;
3036 else
3037 pTmpRsp->Words.w0 -= cbToSub;
3038}
3039
3040
3041/**
3042 * Calculates the effective stack address for a push of the specified size as
3043 * well as the new RSP value (upper bits may be masked).
3044 *
3045 * @returns Effective stack addressf for the push.
3046 * @param pCtx Where to get the current stack mode.
3047 * @param cbItem The size of the stack item to pop.
3048 * @param puNewRsp Where to return the new RSP value.
3049 */
3050DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3051{
3052 RTUINT64U uTmpRsp;
3053 RTGCPTR GCPtrTop;
3054 uTmpRsp.u = pCtx->rsp;
3055
3056 if (pCtx->ssHid.Attr.n.u1Long)
3057 GCPtrTop = uTmpRsp.u -= cbItem;
3058 else if (pCtx->ssHid.Attr.n.u1DefBig)
3059 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3060 else
3061 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3062 *puNewRsp = uTmpRsp.u;
3063 return GCPtrTop;
3064}
3065
3066
3067/**
3068 * Gets the current stack pointer and calculates the value after a pop of the
3069 * specified size.
3070 *
3071 * @returns Current stack pointer.
3072 * @param pCtx Where to get the current stack mode.
3073 * @param cbItem The size of the stack item to pop.
3074 * @param puNewRsp Where to return the new RSP value.
3075 */
3076DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3077{
3078 RTUINT64U uTmpRsp;
3079 RTGCPTR GCPtrTop;
3080 uTmpRsp.u = pCtx->rsp;
3081
3082 if (pCtx->ssHid.Attr.n.u1Long)
3083 {
3084 GCPtrTop = uTmpRsp.u;
3085 uTmpRsp.u += cbItem;
3086 }
3087 else if (pCtx->ssHid.Attr.n.u1DefBig)
3088 {
3089 GCPtrTop = uTmpRsp.DWords.dw0;
3090 uTmpRsp.DWords.dw0 += cbItem;
3091 }
3092 else
3093 {
3094 GCPtrTop = uTmpRsp.Words.w0;
3095 uTmpRsp.Words.w0 += cbItem;
3096 }
3097 *puNewRsp = uTmpRsp.u;
3098 return GCPtrTop;
3099}
3100
3101
3102/**
3103 * Calculates the effective stack address for a push of the specified size as
3104 * well as the new temporary RSP value (upper bits may be masked).
3105 *
3106 * @returns Effective stack addressf for the push.
3107 * @param pTmpRsp The temporary stack pointer. This is updated.
3108 * @param cbItem The size of the stack item to pop.
3109 * @param puNewRsp Where to return the new RSP value.
3110 */
3111DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3112{
3113 RTGCPTR GCPtrTop;
3114
3115 if (pCtx->ssHid.Attr.n.u1Long)
3116 GCPtrTop = pTmpRsp->u -= cbItem;
3117 else if (pCtx->ssHid.Attr.n.u1DefBig)
3118 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3119 else
3120 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3121 return GCPtrTop;
3122}
3123
3124
3125/**
3126 * Gets the effective stack address for a pop of the specified size and
3127 * calculates and updates the temporary RSP.
3128 *
3129 * @returns Current stack pointer.
3130 * @param pTmpRsp The temporary stack pointer. This is updated.
3131 * @param pCtx Where to get the current stack mode.
3132 * @param cbItem The size of the stack item to pop.
3133 */
3134DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3135{
3136 RTGCPTR GCPtrTop;
3137 if (pCtx->ssHid.Attr.n.u1Long)
3138 {
3139 GCPtrTop = pTmpRsp->u;
3140 pTmpRsp->u += cbItem;
3141 }
3142 else if (pCtx->ssHid.Attr.n.u1DefBig)
3143 {
3144 GCPtrTop = pTmpRsp->DWords.dw0;
3145 pTmpRsp->DWords.dw0 += cbItem;
3146 }
3147 else
3148 {
3149 GCPtrTop = pTmpRsp->Words.w0;
3150 pTmpRsp->Words.w0 += cbItem;
3151 }
3152 return GCPtrTop;
3153}
3154
3155
3156/**
3157 * Checks if an Intel CPUID feature bit is set.
3158 *
3159 * @returns true / false.
3160 *
3161 * @param pIemCpu The IEM per CPU data.
3162 * @param fEdx The EDX bit to test, or 0 if ECX.
3163 * @param fEcx The ECX bit to test, or 0 if EDX.
3164 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3165 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3166 */
3167static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3168{
3169 uint32_t uEax, uEbx, uEcx, uEdx;
3170 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3171 return (fEcx && (uEcx & fEcx))
3172 || (fEdx && (uEdx & fEdx));
3173}
3174
3175
3176/**
3177 * Checks if an AMD CPUID feature bit is set.
3178 *
3179 * @returns true / false.
3180 *
3181 * @param pIemCpu The IEM per CPU data.
3182 * @param fEdx The EDX bit to test, or 0 if ECX.
3183 * @param fEcx The ECX bit to test, or 0 if EDX.
3184 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3185 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3186 */
3187static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3188{
3189 uint32_t uEax, uEbx, uEcx, uEdx;
3190 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3191 return (fEcx && (uEcx & fEcx))
3192 || (fEdx && (uEdx & fEdx));
3193}
3194
3195/** @} */
3196
3197
3198/** @name Memory access.
3199 *
3200 * @{
3201 */
3202
3203
3204/**
3205 * Checks if the given segment can be written to, raise the appropriate
3206 * exception if not.
3207 *
3208 * @returns VBox strict status code.
3209 *
3210 * @param pIemCpu The IEM per CPU data.
3211 * @param pHid Pointer to the hidden register.
3212 * @param iSegReg The register number.
3213 */
3214static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3215{
3216 if (!pHid->Attr.n.u1Present)
3217 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3218
3219 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3220 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3221 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3222 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
3223
3224 /** @todo DPL/RPL/CPL? */
3225
3226 return VINF_SUCCESS;
3227}
3228
3229
3230/**
3231 * Checks if the given segment can be read from, raise the appropriate
3232 * exception if not.
3233 *
3234 * @returns VBox strict status code.
3235 *
3236 * @param pIemCpu The IEM per CPU data.
3237 * @param pHid Pointer to the hidden register.
3238 * @param iSegReg The register number.
3239 */
3240static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3241{
3242 if (!pHid->Attr.n.u1Present)
3243 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3244
3245 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
3246 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3247 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
3248
3249 /** @todo DPL/RPL/CPL? */
3250
3251 return VINF_SUCCESS;
3252}
3253
3254
3255/**
3256 * Applies the segment limit, base and attributes.
3257 *
3258 * This may raise a \#GP or \#SS.
3259 *
3260 * @returns VBox strict status code.
3261 *
3262 * @param pIemCpu The IEM per CPU data.
3263 * @param fAccess The kind of access which is being performed.
3264 * @param iSegReg The index of the segment register to apply.
3265 * This is UINT8_MAX if none (for IDT, GDT, LDT,
3266 * TSS, ++).
3267 * @param pGCPtrMem Pointer to the guest memory address to apply
3268 * segmentation to. Input and output parameter.
3269 */
3270static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
3271 size_t cbMem, PRTGCPTR pGCPtrMem)
3272{
3273 if (iSegReg == UINT8_MAX)
3274 return VINF_SUCCESS;
3275
3276 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
3277 switch (pIemCpu->enmCpuMode)
3278 {
3279 case IEMMODE_16BIT:
3280 case IEMMODE_32BIT:
3281 {
3282 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
3283 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
3284
3285 Assert(pSel->Attr.n.u1Present);
3286 Assert(pSel->Attr.n.u1DescType);
3287 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
3288 {
3289 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3290 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3291 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3292
3293 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3294 {
3295 /** @todo CPL check. */
3296 }
3297
3298 /*
3299 * There are two kinds of data selectors, normal and expand down.
3300 */
3301 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
3302 {
3303 if ( GCPtrFirst32 > pSel->u32Limit
3304 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3305 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3306
3307 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3308 }
3309 else
3310 {
3311 /** @todo implement expand down segments. */
3312 AssertFailed(/** @todo implement this */);
3313 return VERR_NOT_IMPLEMENTED;
3314 }
3315 }
3316 else
3317 {
3318
3319 /*
3320 * Code selector and usually be used to read thru, writing is
3321 * only permitted in real and V8086 mode.
3322 */
3323 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3324 || ( (fAccess & IEM_ACCESS_TYPE_READ)
3325 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
3326 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
3327 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3328
3329 if ( GCPtrFirst32 > pSel->u32Limit
3330 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3331 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3332
3333 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3334 {
3335 /** @todo CPL check. */
3336 }
3337
3338 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3339 }
3340 return VINF_SUCCESS;
3341 }
3342
3343 case IEMMODE_64BIT:
3344 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
3345 *pGCPtrMem += pSel->u64Base;
3346 return VINF_SUCCESS;
3347
3348 default:
3349 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
3350 }
3351}
3352
3353
3354/**
3355 * Translates a virtual address to a physical physical address and checks if we
3356 * can access the page as specified.
3357 *
3358 * @param pIemCpu The IEM per CPU data.
3359 * @param GCPtrMem The virtual address.
3360 * @param fAccess The intended access.
3361 * @param pGCPhysMem Where to return the physical address.
3362 */
3363static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
3364 PRTGCPHYS pGCPhysMem)
3365{
3366 /** @todo Need a different PGM interface here. We're currently using
3367 * generic / REM interfaces. this won't cut it for R0 & RC. */
3368 RTGCPHYS GCPhys;
3369 uint64_t fFlags;
3370 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
3371 if (RT_FAILURE(rc))
3372 {
3373 /** @todo Check unassigned memory in unpaged mode. */
3374 *pGCPhysMem = NIL_RTGCPHYS;
3375 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
3376 }
3377
3378 /* If the page is writable and does not have the no-exec bit set, all
3379 access is allowed. Otherwise we'll have to check more carefully... */
3380 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
3381 {
3382 /* Write to read only memory? */
3383 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3384 && !(fFlags & X86_PTE_RW)
3385 && ( pIemCpu->uCpl != 0
3386 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
3387 {
3388 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page\n", GCPtrMem));
3389 *pGCPhysMem = NIL_RTGCPHYS;
3390 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3391 }
3392
3393 /* Kernel memory accessed by userland? */
3394 if ( !(fFlags & X86_PTE_US)
3395 && pIemCpu->uCpl == 3
3396 && !(fAccess & IEM_ACCESS_WHAT_SYS))
3397 {
3398 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page\n", GCPtrMem));
3399 *pGCPhysMem = NIL_RTGCPHYS;
3400 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3401 }
3402
3403 /* Executing non-executable memory? */
3404 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
3405 && (fFlags & X86_PTE_PAE_NX)
3406 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
3407 {
3408 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX\n", GCPtrMem));
3409 *pGCPhysMem = NIL_RTGCPHYS;
3410 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3411 }
3412 }
3413
3414 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
3415 *pGCPhysMem = GCPhys;
3416 return VINF_SUCCESS;
3417}
3418
3419
3420
3421/**
3422 * Maps a physical page.
3423 *
3424 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3425 * @param pIemCpu The IEM per CPU data.
3426 * @param GCPhysMem The physical address.
3427 * @param fAccess The intended access.
3428 * @param ppvMem Where to return the mapping address.
3429 */
3430static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
3431{
3432#ifdef IEM_VERIFICATION_MODE
3433 /* Force the alternative path so we can ignore writes. */
3434 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
3435 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3436#endif
3437
3438 /*
3439 * If we can map the page without trouble, do a block processing
3440 * until the end of the current page.
3441 */
3442 /** @todo need some better API. */
3443 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
3444 GCPhysMem,
3445 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3446 ppvMem);
3447}
3448
3449
3450/**
3451 * Looks up a memory mapping entry.
3452 *
3453 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
3454 * @param pIemCpu The IEM per CPU data.
3455 * @param pvMem The memory address.
3456 * @param fAccess The access to.
3457 */
3458DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3459{
3460 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
3461 if ( pIemCpu->aMemMappings[0].pv == pvMem
3462 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3463 return 0;
3464 if ( pIemCpu->aMemMappings[1].pv == pvMem
3465 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3466 return 1;
3467 if ( pIemCpu->aMemMappings[2].pv == pvMem
3468 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3469 return 2;
3470 return VERR_NOT_FOUND;
3471}
3472
3473
3474/**
3475 * Finds a free memmap entry when using iNextMapping doesn't work.
3476 *
3477 * @returns Memory mapping index, 1024 on failure.
3478 * @param pIemCpu The IEM per CPU data.
3479 */
3480static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
3481{
3482 /*
3483 * The easy case.
3484 */
3485 if (pIemCpu->cActiveMappings == 0)
3486 {
3487 pIemCpu->iNextMapping = 1;
3488 return 0;
3489 }
3490
3491 /* There should be enough mappings for all instructions. */
3492 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
3493
3494 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
3495 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
3496 return i;
3497
3498 AssertFailedReturn(1024);
3499}
3500
3501
3502/**
3503 * Commits a bounce buffer that needs writing back and unmaps it.
3504 *
3505 * @returns Strict VBox status code.
3506 * @param pIemCpu The IEM per CPU data.
3507 * @param iMemMap The index of the buffer to commit.
3508 */
3509static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
3510{
3511 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
3512 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
3513
3514 /*
3515 * Do the writing.
3516 */
3517 int rc;
3518 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
3519 && !IEM_VERIFICATION_ENABLED(pIemCpu))
3520 {
3521 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3522 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3523 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3524 if (!pIemCpu->fByPassHandlers)
3525 {
3526 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3527 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3528 pbBuf,
3529 cbFirst);
3530 if (cbSecond && rc == VINF_SUCCESS)
3531 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3532 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3533 pbBuf + cbFirst,
3534 cbSecond);
3535 }
3536 else
3537 {
3538 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3539 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3540 pbBuf,
3541 cbFirst);
3542 if (cbSecond && rc == VINF_SUCCESS)
3543 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3544 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3545 pbBuf + cbFirst,
3546 cbSecond);
3547 }
3548 }
3549 else
3550 rc = VINF_SUCCESS;
3551
3552#ifdef IEM_VERIFICATION_MODE
3553 /*
3554 * Record the write(s).
3555 */
3556 if (!pIemCpu->fNoRem)
3557 {
3558 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3559 if (pEvtRec)
3560 {
3561 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3562 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
3563 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3564 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
3565 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3566 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3567 }
3568 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
3569 {
3570 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3571 if (pEvtRec)
3572 {
3573 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3574 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
3575 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3576 memcpy(pEvtRec->u.RamWrite.ab,
3577 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
3578 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
3579 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3580 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3581 }
3582 }
3583 }
3584#endif
3585
3586 /*
3587 * Free the mapping entry.
3588 */
3589 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
3590 Assert(pIemCpu->cActiveMappings != 0);
3591 pIemCpu->cActiveMappings--;
3592 return rc;
3593}
3594
3595
3596/**
3597 * iemMemMap worker that deals with a request crossing pages.
3598 */
3599static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
3600 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
3601{
3602 /*
3603 * Do the address translations.
3604 */
3605 RTGCPHYS GCPhysFirst;
3606 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
3607 if (rcStrict != VINF_SUCCESS)
3608 return rcStrict;
3609
3610 RTGCPHYS GCPhysSecond;
3611 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
3612 if (rcStrict != VINF_SUCCESS)
3613 return rcStrict;
3614 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
3615
3616 /*
3617 * Read in the current memory content if it's a read of execute access.
3618 */
3619 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3620 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
3621 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
3622
3623 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
3624 {
3625 int rc;
3626 if (!pIemCpu->fByPassHandlers)
3627 {
3628 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
3629 if (rc != VINF_SUCCESS)
3630 return rc;
3631 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
3632 if (rc != VINF_SUCCESS)
3633 return rc;
3634 }
3635 else
3636 {
3637 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
3638 if (rc != VINF_SUCCESS)
3639 return rc;
3640 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
3641 if (rc != VINF_SUCCESS)
3642 return rc;
3643 }
3644
3645#ifdef IEM_VERIFICATION_MODE
3646 if (!pIemCpu->fNoRem)
3647 {
3648 /*
3649 * Record the reads.
3650 */
3651 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3652 if (pEvtRec)
3653 {
3654 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
3655 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
3656 pEvtRec->u.RamRead.cb = cbFirstPage;
3657 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3658 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3659 }
3660 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3661 if (pEvtRec)
3662 {
3663 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
3664 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
3665 pEvtRec->u.RamRead.cb = cbSecondPage;
3666 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3667 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3668 }
3669 }
3670#endif
3671 }
3672#ifdef VBOX_STRICT
3673 else
3674 memset(pbBuf, 0xcc, cbMem);
3675#endif
3676#ifdef VBOX_STRICT
3677 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
3678 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
3679#endif
3680
3681 /*
3682 * Commit the bounce buffer entry.
3683 */
3684 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
3685 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
3686 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
3687 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
3688 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
3689 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
3690 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
3691 pIemCpu->cActiveMappings++;
3692
3693 *ppvMem = pbBuf;
3694 return VINF_SUCCESS;
3695}
3696
3697
3698/**
3699 * iemMemMap woker that deals with iemMemPageMap failures.
3700 */
3701static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
3702 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
3703{
3704 /*
3705 * Filter out conditions we can handle and the ones which shouldn't happen.
3706 */
3707 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
3708 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
3709 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
3710 {
3711 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
3712 return rcMap;
3713 }
3714 pIemCpu->cPotentialExits++;
3715
3716 /*
3717 * Read in the current memory content if it's a read of execute access.
3718 */
3719 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3720 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
3721 {
3722 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
3723 memset(pbBuf, 0xff, cbMem);
3724 else
3725 {
3726 int rc;
3727 if (!pIemCpu->fByPassHandlers)
3728 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
3729 else
3730 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
3731 if (rc != VINF_SUCCESS)
3732 return rc;
3733 }
3734
3735#ifdef IEM_VERIFICATION_MODE
3736 if (!pIemCpu->fNoRem)
3737 {
3738 /*
3739 * Record the read.
3740 */
3741 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3742 if (pEvtRec)
3743 {
3744 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
3745 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
3746 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
3747 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3748 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3749 }
3750 }
3751#endif
3752 }
3753#ifdef VBOX_STRICT
3754 else
3755 memset(pbBuf, 0xcc, cbMem);
3756#endif
3757#ifdef VBOX_STRICT
3758 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
3759 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
3760#endif
3761
3762 /*
3763 * Commit the bounce buffer entry.
3764 */
3765 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
3766 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
3767 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
3768 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
3769 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
3770 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
3771 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
3772 pIemCpu->cActiveMappings++;
3773
3774 *ppvMem = pbBuf;
3775 return VINF_SUCCESS;
3776}
3777
3778
3779
3780/**
3781 * Maps the specified guest memory for the given kind of access.
3782 *
3783 * This may be using bounce buffering of the memory if it's crossing a page
3784 * boundary or if there is an access handler installed for any of it. Because
3785 * of lock prefix guarantees, we're in for some extra clutter when this
3786 * happens.
3787 *
3788 * This may raise a \#GP, \#SS, \#PF or \#AC.
3789 *
3790 * @returns VBox strict status code.
3791 *
3792 * @param pIemCpu The IEM per CPU data.
3793 * @param ppvMem Where to return the pointer to the mapped
3794 * memory.
3795 * @param cbMem The number of bytes to map. This is usually 1,
3796 * 2, 4, 6, 8, 12, 16 or 32. When used by string
3797 * operations it can be up to a page.
3798 * @param iSegReg The index of the segment register to use for
3799 * this access. The base and limits are checked.
3800 * Use UINT8_MAX to indicate that no segmentation
3801 * is required (for IDT, GDT and LDT accesses).
3802 * @param GCPtrMem The address of the guest memory.
3803 * @param a_fAccess How the memory is being accessed. The
3804 * IEM_ACCESS_TYPE_XXX bit is used to figure out
3805 * how to map the memory, while the
3806 * IEM_ACCESS_WHAT_XXX bit is used when raising
3807 * exceptions.
3808 */
3809static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
3810{
3811 /*
3812 * Check the input and figure out which mapping entry to use.
3813 */
3814 Assert(cbMem <= 32);
3815 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
3816
3817 unsigned iMemMap = pIemCpu->iNextMapping;
3818 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
3819 {
3820 iMemMap = iemMemMapFindFree(pIemCpu);
3821 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
3822 }
3823
3824 /*
3825 * Map the memory, checking that we can actually access it. If something
3826 * slightly complicated happens, fall back on bounce buffering.
3827 */
3828 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
3829 if (rcStrict != VINF_SUCCESS)
3830 return rcStrict;
3831
3832 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
3833 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
3834
3835 RTGCPHYS GCPhysFirst;
3836 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
3837 if (rcStrict != VINF_SUCCESS)
3838 return rcStrict;
3839
3840 void *pvMem;
3841 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
3842 if (rcStrict != VINF_SUCCESS)
3843 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
3844
3845 /*
3846 * Fill in the mapping table entry.
3847 */
3848 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
3849 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
3850 pIemCpu->iNextMapping = iMemMap + 1;
3851 pIemCpu->cActiveMappings++;
3852
3853 *ppvMem = pvMem;
3854 return VINF_SUCCESS;
3855}
3856
3857
3858/**
3859 * Commits the guest memory if bounce buffered and unmaps it.
3860 *
3861 * @returns Strict VBox status code.
3862 * @param pIemCpu The IEM per CPU data.
3863 * @param pvMem The mapping.
3864 * @param fAccess The kind of access.
3865 */
3866static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3867{
3868 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
3869 AssertReturn(iMemMap >= 0, iMemMap);
3870
3871 /*
3872 * If it's bounce buffered, we need to write back the buffer.
3873 */
3874 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
3875 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
3876 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
3877
3878 /* Free the entry. */
3879 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
3880 Assert(pIemCpu->cActiveMappings != 0);
3881 pIemCpu->cActiveMappings--;
3882 return VINF_SUCCESS;
3883}
3884
3885
3886/**
3887 * Fetches a data byte.
3888 *
3889 * @returns Strict VBox status code.
3890 * @param pIemCpu The IEM per CPU data.
3891 * @param pu8Dst Where to return the byte.
3892 * @param iSegReg The index of the segment register to use for
3893 * this access. The base and limits are checked.
3894 * @param GCPtrMem The address of the guest memory.
3895 */
3896static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3897{
3898 /* The lazy approach for now... */
3899 uint8_t const *pu8Src;
3900 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3901 if (rc == VINF_SUCCESS)
3902 {
3903 *pu8Dst = *pu8Src;
3904 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
3905 }
3906 return rc;
3907}
3908
3909
3910/**
3911 * Fetches a data word.
3912 *
3913 * @returns Strict VBox status code.
3914 * @param pIemCpu The IEM per CPU data.
3915 * @param pu16Dst Where to return the word.
3916 * @param iSegReg The index of the segment register to use for
3917 * this access. The base and limits are checked.
3918 * @param GCPtrMem The address of the guest memory.
3919 */
3920static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3921{
3922 /* The lazy approach for now... */
3923 uint16_t const *pu16Src;
3924 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3925 if (rc == VINF_SUCCESS)
3926 {
3927 *pu16Dst = *pu16Src;
3928 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
3929 }
3930 return rc;
3931}
3932
3933
3934/**
3935 * Fetches a data dword.
3936 *
3937 * @returns Strict VBox status code.
3938 * @param pIemCpu The IEM per CPU data.
3939 * @param pu32Dst Where to return the dword.
3940 * @param iSegReg The index of the segment register to use for
3941 * this access. The base and limits are checked.
3942 * @param GCPtrMem The address of the guest memory.
3943 */
3944static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3945{
3946 /* The lazy approach for now... */
3947 uint32_t const *pu32Src;
3948 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3949 if (rc == VINF_SUCCESS)
3950 {
3951 *pu32Dst = *pu32Src;
3952 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
3953 }
3954 return rc;
3955}
3956
3957
3958#ifdef SOME_UNUSED_FUNCTION
3959/**
3960 * Fetches a data dword and sign extends it to a qword.
3961 *
3962 * @returns Strict VBox status code.
3963 * @param pIemCpu The IEM per CPU data.
3964 * @param pu64Dst Where to return the sign extended value.
3965 * @param iSegReg The index of the segment register to use for
3966 * this access. The base and limits are checked.
3967 * @param GCPtrMem The address of the guest memory.
3968 */
3969static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3970{
3971 /* The lazy approach for now... */
3972 int32_t const *pi32Src;
3973 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3974 if (rc == VINF_SUCCESS)
3975 {
3976 *pu64Dst = *pi32Src;
3977 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
3978 }
3979#ifdef __GNUC__ /* warning: GCC may be a royal pain */
3980 else
3981 *pu64Dst = 0;
3982#endif
3983 return rc;
3984}
3985#endif
3986
3987
3988/**
3989 * Fetches a data qword.
3990 *
3991 * @returns Strict VBox status code.
3992 * @param pIemCpu The IEM per CPU data.
3993 * @param pu64Dst Where to return the qword.
3994 * @param iSegReg The index of the segment register to use for
3995 * this access. The base and limits are checked.
3996 * @param GCPtrMem The address of the guest memory.
3997 */
3998static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3999{
4000 /* The lazy approach for now... */
4001 uint64_t const *pu64Src;
4002 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4003 if (rc == VINF_SUCCESS)
4004 {
4005 *pu64Dst = *pu64Src;
4006 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
4007 }
4008 return rc;
4009}
4010
4011
4012/**
4013 * Fetches a descriptor register (lgdt, lidt).
4014 *
4015 * @returns Strict VBox status code.
4016 * @param pIemCpu The IEM per CPU data.
4017 * @param pcbLimit Where to return the limit.
4018 * @param pGCPTrBase Where to return the base.
4019 * @param iSegReg The index of the segment register to use for
4020 * this access. The base and limits are checked.
4021 * @param GCPtrMem The address of the guest memory.
4022 * @param enmOpSize The effective operand size.
4023 */
4024static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
4025 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
4026{
4027 uint8_t const *pu8Src;
4028 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
4029 (void **)&pu8Src,
4030 enmOpSize == IEMMODE_64BIT
4031 ? 2 + 8
4032 : enmOpSize == IEMMODE_32BIT
4033 ? 2 + 4
4034 : 2 + 3,
4035 iSegReg,
4036 GCPtrMem,
4037 IEM_ACCESS_DATA_R);
4038 if (rcStrict == VINF_SUCCESS)
4039 {
4040 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
4041 switch (enmOpSize)
4042 {
4043 case IEMMODE_16BIT:
4044 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
4045 break;
4046 case IEMMODE_32BIT:
4047 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
4048 break;
4049 case IEMMODE_64BIT:
4050 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
4051 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
4052 break;
4053
4054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4055 }
4056 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4057 }
4058 return rcStrict;
4059}
4060
4061
4062
4063/**
4064 * Stores a data byte.
4065 *
4066 * @returns Strict VBox status code.
4067 * @param pIemCpu The IEM per CPU data.
4068 * @param iSegReg The index of the segment register to use for
4069 * this access. The base and limits are checked.
4070 * @param GCPtrMem The address of the guest memory.
4071 * @param u8Value The value to store.
4072 */
4073static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
4074{
4075 /* The lazy approach for now... */
4076 uint8_t *pu8Dst;
4077 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4078 if (rc == VINF_SUCCESS)
4079 {
4080 *pu8Dst = u8Value;
4081 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
4082 }
4083 return rc;
4084}
4085
4086
4087/**
4088 * Stores a data word.
4089 *
4090 * @returns Strict VBox status code.
4091 * @param pIemCpu The IEM per CPU data.
4092 * @param iSegReg The index of the segment register to use for
4093 * this access. The base and limits are checked.
4094 * @param GCPtrMem The address of the guest memory.
4095 * @param u16Value The value to store.
4096 */
4097static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
4098{
4099 /* The lazy approach for now... */
4100 uint16_t *pu16Dst;
4101 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4102 if (rc == VINF_SUCCESS)
4103 {
4104 *pu16Dst = u16Value;
4105 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
4106 }
4107 return rc;
4108}
4109
4110
4111/**
4112 * Stores a data dword.
4113 *
4114 * @returns Strict VBox status code.
4115 * @param pIemCpu The IEM per CPU data.
4116 * @param iSegReg The index of the segment register to use for
4117 * this access. The base and limits are checked.
4118 * @param GCPtrMem The address of the guest memory.
4119 * @param u32Value The value to store.
4120 */
4121static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
4122{
4123 /* The lazy approach for now... */
4124 uint32_t *pu32Dst;
4125 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4126 if (rc == VINF_SUCCESS)
4127 {
4128 *pu32Dst = u32Value;
4129 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
4130 }
4131 return rc;
4132}
4133
4134
4135/**
4136 * Stores a data qword.
4137 *
4138 * @returns Strict VBox status code.
4139 * @param pIemCpu The IEM per CPU data.
4140 * @param iSegReg The index of the segment register to use for
4141 * this access. The base and limits are checked.
4142 * @param GCPtrMem The address of the guest memory.
4143 * @param u64Value The value to store.
4144 */
4145static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
4146{
4147 /* The lazy approach for now... */
4148 uint64_t *pu64Dst;
4149 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4150 if (rc == VINF_SUCCESS)
4151 {
4152 *pu64Dst = u64Value;
4153 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
4154 }
4155 return rc;
4156}
4157
4158
4159/**
4160 * Pushes a word onto the stack.
4161 *
4162 * @returns Strict VBox status code.
4163 * @param pIemCpu The IEM per CPU data.
4164 * @param u16Value The value to push.
4165 */
4166static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
4167{
4168 /* Increment the stack pointer. */
4169 uint64_t uNewRsp;
4170 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4171 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
4172
4173 /* Write the word the lazy way. */
4174 uint16_t *pu16Dst;
4175 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4176 if (rc == VINF_SUCCESS)
4177 {
4178 *pu16Dst = u16Value;
4179 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4180 }
4181
4182 /* Commit the new RSP value unless we an access handler made trouble. */
4183 if (rc == VINF_SUCCESS)
4184 pCtx->rsp = uNewRsp;
4185
4186 return rc;
4187}
4188
4189
4190/**
4191 * Pushes a dword onto the stack.
4192 *
4193 * @returns Strict VBox status code.
4194 * @param pIemCpu The IEM per CPU data.
4195 * @param u32Value The value to push.
4196 */
4197static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
4198{
4199 /* Increment the stack pointer. */
4200 uint64_t uNewRsp;
4201 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4202 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
4203
4204 /* Write the word the lazy way. */
4205 uint32_t *pu32Dst;
4206 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4207 if (rc == VINF_SUCCESS)
4208 {
4209 *pu32Dst = u32Value;
4210 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4211 }
4212
4213 /* Commit the new RSP value unless we an access handler made trouble. */
4214 if (rc == VINF_SUCCESS)
4215 pCtx->rsp = uNewRsp;
4216
4217 return rc;
4218}
4219
4220
4221/**
4222 * Pushes a qword onto the stack.
4223 *
4224 * @returns Strict VBox status code.
4225 * @param pIemCpu The IEM per CPU data.
4226 * @param u64Value The value to push.
4227 */
4228static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
4229{
4230 /* Increment the stack pointer. */
4231 uint64_t uNewRsp;
4232 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4233 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
4234
4235 /* Write the word the lazy way. */
4236 uint64_t *pu64Dst;
4237 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4238 if (rc == VINF_SUCCESS)
4239 {
4240 *pu64Dst = u64Value;
4241 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4242 }
4243
4244 /* Commit the new RSP value unless we an access handler made trouble. */
4245 if (rc == VINF_SUCCESS)
4246 pCtx->rsp = uNewRsp;
4247
4248 return rc;
4249}
4250
4251
4252/**
4253 * Pops a word from the stack.
4254 *
4255 * @returns Strict VBox status code.
4256 * @param pIemCpu The IEM per CPU data.
4257 * @param pu16Value Where to store the popped value.
4258 */
4259static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
4260{
4261 /* Increment the stack pointer. */
4262 uint64_t uNewRsp;
4263 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4264 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
4265
4266 /* Write the word the lazy way. */
4267 uint16_t const *pu16Src;
4268 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4269 if (rc == VINF_SUCCESS)
4270 {
4271 *pu16Value = *pu16Src;
4272 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4273
4274 /* Commit the new RSP value. */
4275 if (rc == VINF_SUCCESS)
4276 pCtx->rsp = uNewRsp;
4277 }
4278
4279 return rc;
4280}
4281
4282
4283/**
4284 * Pops a dword from the stack.
4285 *
4286 * @returns Strict VBox status code.
4287 * @param pIemCpu The IEM per CPU data.
4288 * @param pu32Value Where to store the popped value.
4289 */
4290static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
4291{
4292 /* Increment the stack pointer. */
4293 uint64_t uNewRsp;
4294 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4295 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
4296
4297 /* Write the word the lazy way. */
4298 uint32_t const *pu32Src;
4299 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4300 if (rc == VINF_SUCCESS)
4301 {
4302 *pu32Value = *pu32Src;
4303 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4304
4305 /* Commit the new RSP value. */
4306 if (rc == VINF_SUCCESS)
4307 pCtx->rsp = uNewRsp;
4308 }
4309
4310 return rc;
4311}
4312
4313
4314/**
4315 * Pops a qword from the stack.
4316 *
4317 * @returns Strict VBox status code.
4318 * @param pIemCpu The IEM per CPU data.
4319 * @param pu64Value Where to store the popped value.
4320 */
4321static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
4322{
4323 /* Increment the stack pointer. */
4324 uint64_t uNewRsp;
4325 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4326 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
4327
4328 /* Write the word the lazy way. */
4329 uint64_t const *pu64Src;
4330 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4331 if (rc == VINF_SUCCESS)
4332 {
4333 *pu64Value = *pu64Src;
4334 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4335
4336 /* Commit the new RSP value. */
4337 if (rc == VINF_SUCCESS)
4338 pCtx->rsp = uNewRsp;
4339 }
4340
4341 return rc;
4342}
4343
4344
4345/**
4346 * Pushes a word onto the stack, using a temporary stack pointer.
4347 *
4348 * @returns Strict VBox status code.
4349 * @param pIemCpu The IEM per CPU data.
4350 * @param u16Value The value to push.
4351 * @param pTmpRsp Pointer to the temporary stack pointer.
4352 */
4353static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
4354{
4355 /* Increment the stack pointer. */
4356 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4357 RTUINT64U NewRsp = *pTmpRsp;
4358 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
4359
4360 /* Write the word the lazy way. */
4361 uint16_t *pu16Dst;
4362 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4363 if (rc == VINF_SUCCESS)
4364 {
4365 *pu16Dst = u16Value;
4366 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4367 }
4368
4369 /* Commit the new RSP value unless we an access handler made trouble. */
4370 if (rc == VINF_SUCCESS)
4371 *pTmpRsp = NewRsp;
4372
4373 return rc;
4374}
4375
4376
4377/**
4378 * Pushes a dword onto the stack, using a temporary stack pointer.
4379 *
4380 * @returns Strict VBox status code.
4381 * @param pIemCpu The IEM per CPU data.
4382 * @param u32Value The value to push.
4383 * @param pTmpRsp Pointer to the temporary stack pointer.
4384 */
4385static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
4386{
4387 /* Increment the stack pointer. */
4388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4389 RTUINT64U NewRsp = *pTmpRsp;
4390 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
4391
4392 /* Write the word the lazy way. */
4393 uint32_t *pu32Dst;
4394 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4395 if (rc == VINF_SUCCESS)
4396 {
4397 *pu32Dst = u32Value;
4398 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4399 }
4400
4401 /* Commit the new RSP value unless we an access handler made trouble. */
4402 if (rc == VINF_SUCCESS)
4403 *pTmpRsp = NewRsp;
4404
4405 return rc;
4406}
4407
4408
4409#ifdef SOME_UNUSED_FUNCTION
4410/**
4411 * Pushes a dword onto the stack, using a temporary stack pointer.
4412 *
4413 * @returns Strict VBox status code.
4414 * @param pIemCpu The IEM per CPU data.
4415 * @param u64Value The value to push.
4416 * @param pTmpRsp Pointer to the temporary stack pointer.
4417 */
4418static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
4419{
4420 /* Increment the stack pointer. */
4421 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4422 RTUINT64U NewRsp = *pTmpRsp;
4423 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
4424
4425 /* Write the word the lazy way. */
4426 uint64_t *pu64Dst;
4427 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4428 if (rc == VINF_SUCCESS)
4429 {
4430 *pu64Dst = u64Value;
4431 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4432 }
4433
4434 /* Commit the new RSP value unless we an access handler made trouble. */
4435 if (rc == VINF_SUCCESS)
4436 *pTmpRsp = NewRsp;
4437
4438 return rc;
4439}
4440#endif
4441
4442
4443/**
4444 * Pops a word from the stack, using a temporary stack pointer.
4445 *
4446 * @returns Strict VBox status code.
4447 * @param pIemCpu The IEM per CPU data.
4448 * @param pu16Value Where to store the popped value.
4449 * @param pTmpRsp Pointer to the temporary stack pointer.
4450 */
4451static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
4452{
4453 /* Increment the stack pointer. */
4454 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4455 RTUINT64U NewRsp = *pTmpRsp;
4456 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
4457
4458 /* Write the word the lazy way. */
4459 uint16_t const *pu16Src;
4460 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4461 if (rc == VINF_SUCCESS)
4462 {
4463 *pu16Value = *pu16Src;
4464 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4465
4466 /* Commit the new RSP value. */
4467 if (rc == VINF_SUCCESS)
4468 *pTmpRsp = NewRsp;
4469 }
4470
4471 return rc;
4472}
4473
4474
4475/**
4476 * Pops a dword from the stack, using a temporary stack pointer.
4477 *
4478 * @returns Strict VBox status code.
4479 * @param pIemCpu The IEM per CPU data.
4480 * @param pu32Value Where to store the popped value.
4481 * @param pTmpRsp Pointer to the temporary stack pointer.
4482 */
4483static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
4484{
4485 /* Increment the stack pointer. */
4486 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4487 RTUINT64U NewRsp = *pTmpRsp;
4488 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
4489
4490 /* Write the word the lazy way. */
4491 uint32_t const *pu32Src;
4492 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4493 if (rc == VINF_SUCCESS)
4494 {
4495 *pu32Value = *pu32Src;
4496 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4497
4498 /* Commit the new RSP value. */
4499 if (rc == VINF_SUCCESS)
4500 *pTmpRsp = NewRsp;
4501 }
4502
4503 return rc;
4504}
4505
4506
4507/**
4508 * Pops a qword from the stack, using a temporary stack pointer.
4509 *
4510 * @returns Strict VBox status code.
4511 * @param pIemCpu The IEM per CPU data.
4512 * @param pu64Value Where to store the popped value.
4513 * @param pTmpRsp Pointer to the temporary stack pointer.
4514 */
4515static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
4516{
4517 /* Increment the stack pointer. */
4518 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4519 RTUINT64U NewRsp = *pTmpRsp;
4520 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4521
4522 /* Write the word the lazy way. */
4523 uint64_t const *pu64Src;
4524 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4525 if (rcStrict == VINF_SUCCESS)
4526 {
4527 *pu64Value = *pu64Src;
4528 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4529
4530 /* Commit the new RSP value. */
4531 if (rcStrict == VINF_SUCCESS)
4532 *pTmpRsp = NewRsp;
4533 }
4534
4535 return rcStrict;
4536}
4537
4538
4539/**
4540 * Begin a special stack push (used by interrupt, exceptions and such).
4541 *
4542 * This will raise #SS or #PF if appropriate.
4543 *
4544 * @returns Strict VBox status code.
4545 * @param pIemCpu The IEM per CPU data.
4546 * @param cbMem The number of bytes to push onto the stack.
4547 * @param ppvMem Where to return the pointer to the stack memory.
4548 * As with the other memory functions this could be
4549 * direct access or bounce buffered access, so
4550 * don't commit register until the commit call
4551 * succeeds.
4552 * @param puNewRsp Where to return the new RSP value. This must be
4553 * passed unchanged to
4554 * iemMemStackPushCommitSpecial().
4555 */
4556static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
4557{
4558 Assert(cbMem < UINT8_MAX);
4559 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4560 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
4561 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4562}
4563
4564
4565/**
4566 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
4567 *
4568 * This will update the rSP.
4569 *
4570 * @returns Strict VBox status code.
4571 * @param pIemCpu The IEM per CPU data.
4572 * @param pvMem The pointer returned by
4573 * iemMemStackPushBeginSpecial().
4574 * @param uNewRsp The new RSP value returned by
4575 * iemMemStackPushBeginSpecial().
4576 */
4577static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
4578{
4579 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
4580 if (rcStrict == VINF_SUCCESS)
4581 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
4582 return rcStrict;
4583}
4584
4585
4586/**
4587 * Begin a special stack pop (used by iret, retf and such).
4588 *
4589 * This will raise \#SS or \#PF if appropriate.
4590 *
4591 * @returns Strict VBox status code.
4592 * @param pIemCpu The IEM per CPU data.
4593 * @param cbMem The number of bytes to push onto the stack.
4594 * @param ppvMem Where to return the pointer to the stack memory.
4595 * @param puNewRsp Where to return the new RSP value. This must be
4596 * passed unchanged to
4597 * iemMemStackPopCommitSpecial() or applied
4598 * manually if iemMemStackPopDoneSpecial() is used.
4599 */
4600static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4601{
4602 Assert(cbMem < UINT8_MAX);
4603 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4604 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
4605 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4606}
4607
4608
4609/**
4610 * Continue a special stack pop (used by iret).
4611 *
4612 * This will raise \#SS or \#PF if appropriate.
4613 *
4614 * @returns Strict VBox status code.
4615 * @param pIemCpu The IEM per CPU data.
4616 * @param cbMem The number of bytes to push onto the stack.
4617 * @param ppvMem Where to return the pointer to the stack memory.
4618 * @param puNewRsp Where to return the new RSP value. This must be
4619 * passed unchanged to
4620 * iemMemStackPopCommitSpecial() or applied
4621 * manually if iemMemStackPopDoneSpecial() is used.
4622 */
4623static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4624{
4625 Assert(cbMem < UINT8_MAX);
4626 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4627 RTUINT64U NewRsp;
4628 NewRsp.u = *puNewRsp;
4629 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4630 *puNewRsp = NewRsp.u;
4631 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4632}
4633
4634
4635/**
4636 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
4637 *
4638 * This will update the rSP.
4639 *
4640 * @returns Strict VBox status code.
4641 * @param pIemCpu The IEM per CPU data.
4642 * @param pvMem The pointer returned by
4643 * iemMemStackPopBeginSpecial().
4644 * @param uNewRsp The new RSP value returned by
4645 * iemMemStackPopBeginSpecial().
4646 */
4647static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
4648{
4649 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
4650 if (rcStrict == VINF_SUCCESS)
4651 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
4652 return rcStrict;
4653}
4654
4655
4656/**
4657 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
4658 * iemMemStackPopContinueSpecial).
4659 *
4660 * The caller will manually commit the rSP.
4661 *
4662 * @returns Strict VBox status code.
4663 * @param pIemCpu The IEM per CPU data.
4664 * @param pvMem The pointer returned by
4665 * iemMemStackPopBeginSpecial() or
4666 * iemMemStackPopContinueSpecial().
4667 */
4668static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
4669{
4670 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
4671}
4672
4673
4674/**
4675 * Fetches a system table dword.
4676 *
4677 * @returns Strict VBox status code.
4678 * @param pIemCpu The IEM per CPU data.
4679 * @param pu32Dst Where to return the dword.
4680 * @param iSegReg The index of the segment register to use for
4681 * this access. The base and limits are checked.
4682 * @param GCPtrMem The address of the guest memory.
4683 */
4684static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4685{
4686 /* The lazy approach for now... */
4687 uint32_t const *pu32Src;
4688 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
4689 if (rc == VINF_SUCCESS)
4690 {
4691 *pu32Dst = *pu32Src;
4692 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
4693 }
4694 return rc;
4695}
4696
4697
4698/**
4699 * Fetches a system table qword.
4700 *
4701 * @returns Strict VBox status code.
4702 * @param pIemCpu The IEM per CPU data.
4703 * @param pu64Dst Where to return the qword.
4704 * @param iSegReg The index of the segment register to use for
4705 * this access. The base and limits are checked.
4706 * @param GCPtrMem The address of the guest memory.
4707 */
4708static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4709{
4710 /* The lazy approach for now... */
4711 uint64_t const *pu64Src;
4712 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
4713 if (rc == VINF_SUCCESS)
4714 {
4715 *pu64Dst = *pu64Src;
4716 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
4717 }
4718 return rc;
4719}
4720
4721
4722/**
4723 * Fetches a descriptor table entry.
4724 *
4725 * @returns Strict VBox status code.
4726 * @param pIemCpu The IEM per CPU.
4727 * @param pDesc Where to return the descriptor table entry.
4728 * @param uSel The selector which table entry to fetch.
4729 */
4730static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
4731{
4732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4733
4734 /** @todo did the 286 require all 8 bytes to be accessible? */
4735 /*
4736 * Get the selector table base and check bounds.
4737 */
4738 RTGCPTR GCPtrBase;
4739 if (uSel & X86_SEL_LDT)
4740 {
4741 if ( !pCtx->ldtrHid.Attr.n.u1Present
4742 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
4743 {
4744 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
4745 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
4746 /** @todo is this the right exception? */
4747 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4748 }
4749
4750 Assert(pCtx->ldtrHid.Attr.n.u1Present);
4751 GCPtrBase = pCtx->ldtrHid.u64Base;
4752 }
4753 else
4754 {
4755 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
4756 {
4757 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
4758 /** @todo is this the right exception? */
4759 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4760 }
4761 GCPtrBase = pCtx->gdtr.pGdt;
4762 }
4763
4764 /*
4765 * Read the legacy descriptor and maybe the long mode extensions if
4766 * required.
4767 */
4768 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4769 if (rcStrict == VINF_SUCCESS)
4770 {
4771 if ( !IEM_IS_LONG_MODE(pIemCpu)
4772 || pDesc->Legacy.Gen.u1DescType)
4773 pDesc->Long.au64[1] = 0;
4774 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
4775 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4776 else
4777 {
4778 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
4779 /** @todo is this the right exception? */
4780 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4781 }
4782 }
4783 return rcStrict;
4784}
4785
4786
4787/**
4788 * Marks the selector descriptor as accessed (only non-system descriptors).
4789 *
4790 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
4791 * will therefore skip the limit checks.
4792 *
4793 * @returns Strict VBox status code.
4794 * @param pIemCpu The IEM per CPU.
4795 * @param uSel The selector.
4796 */
4797static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
4798{
4799 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4800
4801 /*
4802 * Get the selector table base and calculate the entry address.
4803 */
4804 RTGCPTR GCPtr = uSel & X86_SEL_LDT
4805 ? pCtx->ldtrHid.u64Base
4806 : pCtx->gdtr.pGdt;
4807 GCPtr += uSel & X86_SEL_MASK;
4808
4809 /*
4810 * ASMAtomicBitSet will assert if the address is misaligned, so do some
4811 * ugly stuff to avoid this. This will make sure it's an atomic access
4812 * as well more or less remove any question about 8-bit or 32-bit accesss.
4813 */
4814 VBOXSTRICTRC rcStrict;
4815 uint32_t volatile *pu32;
4816 if ((GCPtr & 3) == 0)
4817 {
4818 /* The normal case, map the 32-bit bits around the accessed bit (40). */
4819 GCPtr += 2 + 2;
4820 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
4821 if (rcStrict != VINF_SUCCESS)
4822 return rcStrict;
4823 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
4824 }
4825 else
4826 {
4827 /* The misaligned GDT/LDT case, map the whole thing. */
4828 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
4829 if (rcStrict != VINF_SUCCESS)
4830 return rcStrict;
4831 switch ((uintptr_t)pu32 & 3)
4832 {
4833 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
4834 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
4835 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
4836 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
4837 }
4838 }
4839
4840 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
4841}
4842
4843/** @} */
4844
4845
4846/*
4847 * Include the C/C++ implementation of instruction.
4848 */
4849#include "IEMAllCImpl.cpp.h"
4850
4851
4852
4853/** @name "Microcode" macros.
4854 *
4855 * The idea is that we should be able to use the same code to interpret
4856 * instructions as well as recompiler instructions. Thus this obfuscation.
4857 *
4858 * @{
4859 */
4860#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
4861#define IEM_MC_END() }
4862#define IEM_MC_PAUSE() do {} while (0)
4863#define IEM_MC_CONTINUE() do {} while (0)
4864
4865/** Internal macro. */
4866#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
4867 do \
4868 { \
4869 VBOXSTRICTRC rcStrict2 = a_Expr; \
4870 if (rcStrict2 != VINF_SUCCESS) \
4871 return rcStrict2; \
4872 } while (0)
4873
4874#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
4875#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
4876#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
4877#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
4878#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
4879#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
4880#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
4881
4882#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
4883#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
4884 do { \
4885 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
4886 return iemRaiseDeviceNotAvailable(pIemCpu); \
4887 } while (0)
4888#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
4889 do { \
4890 if (iemFRegFetchFsw(pIemCpu) & X86_FSW_ES) \
4891 return iemRaiseMathFault(pIemCpu); \
4892 } while (0)
4893#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
4894 do { \
4895 if (pIemCpu->uCpl != 0) \
4896 return iemRaiseGeneralProtectionFault0(pIemCpu); \
4897 } while (0)
4898
4899
4900#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
4901#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
4902#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
4903#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
4904#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
4905#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
4906 uint32_t a_Name; \
4907 uint32_t *a_pName = &a_Name
4908#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
4909 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
4910
4911#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
4912#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
4913
4914#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4915#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4916#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4917#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4918#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
4919#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
4920#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
4921#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
4922#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
4923#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
4924#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
4925#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
4926#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
4927#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
4928#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
4929#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
4930#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
4931#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
4932#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
4933#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
4934#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
4935#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
4936#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
4937#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
4938#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
4939#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = iemFRegFetchFsw(pIemCpu)
4940
4941#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
4942#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
4943#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
4944#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
4945#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
4946#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
4947#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
4948#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
4949#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
4950
4951#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
4952#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
4953/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
4954 * commit. */
4955#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
4956#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
4957#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
4958
4959#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
4960#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
4961#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
4962 do { \
4963 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
4964 *pu32Reg += (a_u32Value); \
4965 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
4966 } while (0)
4967#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
4968
4969#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
4970#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
4971#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
4972 do { \
4973 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
4974 *pu32Reg -= (a_u32Value); \
4975 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
4976 } while (0)
4977#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
4978
4979#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
4980#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
4981#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
4982#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
4983#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
4984#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
4985#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
4986
4987#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
4988#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
4989#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
4990#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
4991
4992#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
4993#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
4994#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
4995
4996#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
4997#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
4998
4999#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
5000#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
5001#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
5002
5003#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
5004#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
5005#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
5006
5007#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5008
5009#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5010
5011#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
5012#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
5013#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
5014 do { \
5015 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5016 *pu32Reg &= (a_u32Value); \
5017 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5018 } while (0)
5019#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
5020
5021#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
5022#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
5023#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
5024 do { \
5025 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5026 *pu32Reg |= (a_u32Value); \
5027 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5028 } while (0)
5029#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
5030
5031
5032#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
5033#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
5034#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
5035
5036
5037
5038#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
5039 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
5040#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
5041 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
5042#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
5043 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
5044
5045#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5046 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
5047#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5048 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5049
5050#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5051 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
5052#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5053 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5054
5055#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5056 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5057
5058#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5059 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5060#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5061 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5062
5063#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5064 do { \
5065 uint8_t u8Tmp; \
5066 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5067 (a_u16Dst) = u8Tmp; \
5068 } while (0)
5069#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5070 do { \
5071 uint8_t u8Tmp; \
5072 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5073 (a_u32Dst) = u8Tmp; \
5074 } while (0)
5075#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5076 do { \
5077 uint8_t u8Tmp; \
5078 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5079 (a_u64Dst) = u8Tmp; \
5080 } while (0)
5081#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5082 do { \
5083 uint16_t u16Tmp; \
5084 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5085 (a_u32Dst) = u16Tmp; \
5086 } while (0)
5087#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5088 do { \
5089 uint16_t u16Tmp; \
5090 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5091 (a_u64Dst) = u16Tmp; \
5092 } while (0)
5093#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5094 do { \
5095 uint32_t u32Tmp; \
5096 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5097 (a_u64Dst) = u32Tmp; \
5098 } while (0)
5099
5100#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5101 do { \
5102 uint8_t u8Tmp; \
5103 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5104 (a_u16Dst) = (int8_t)u8Tmp; \
5105 } while (0)
5106#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5107 do { \
5108 uint8_t u8Tmp; \
5109 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5110 (a_u32Dst) = (int8_t)u8Tmp; \
5111 } while (0)
5112#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5113 do { \
5114 uint8_t u8Tmp; \
5115 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5116 (a_u64Dst) = (int8_t)u8Tmp; \
5117 } while (0)
5118#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5119 do { \
5120 uint16_t u16Tmp; \
5121 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5122 (a_u32Dst) = (int16_t)u16Tmp; \
5123 } while (0)
5124#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5125 do { \
5126 uint16_t u16Tmp; \
5127 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5128 (a_u64Dst) = (int16_t)u16Tmp; \
5129 } while (0)
5130#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5131 do { \
5132 uint32_t u32Tmp; \
5133 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5134 (a_u64Dst) = (int32_t)u32Tmp; \
5135 } while (0)
5136
5137#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
5138 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
5139#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
5140 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
5141#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
5142 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
5143#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
5144 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
5145
5146#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
5147 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
5148
5149#define IEM_MC_PUSH_U16(a_u16Value) \
5150 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
5151#define IEM_MC_PUSH_U32(a_u32Value) \
5152 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
5153#define IEM_MC_PUSH_U64(a_u64Value) \
5154 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
5155
5156#define IEM_MC_POP_U16(a_pu16Value) \
5157 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
5158#define IEM_MC_POP_U32(a_pu32Value) \
5159 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
5160#define IEM_MC_POP_U64(a_pu64Value) \
5161 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
5162
5163/** Maps guest memory for direct or bounce buffered access.
5164 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5165 * @remarks May return.
5166 */
5167#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
5168 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5169
5170/** Maps guest memory for direct or bounce buffered access.
5171 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5172 * @remarks May return.
5173 */
5174#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
5175 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5176
5177/** Commits the memory and unmaps the guest memory.
5178 * @remarks May return.
5179 */
5180#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
5181 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
5182
5183/** Calculate efficient address from R/M. */
5184#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
5185 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
5186
5187#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
5188#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
5189#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
5190#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
5191
5192/**
5193 * Defers the rest of the instruction emulation to a C implementation routine
5194 * and returns, only taking the standard parameters.
5195 *
5196 * @param a_pfnCImpl The pointer to the C routine.
5197 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5198 */
5199#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5200
5201/**
5202 * Defers the rest of instruction emulation to a C implementation routine and
5203 * returns, taking one argument in addition to the standard ones.
5204 *
5205 * @param a_pfnCImpl The pointer to the C routine.
5206 * @param a0 The argument.
5207 */
5208#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5209
5210/**
5211 * Defers the rest of the instruction emulation to a C implementation routine
5212 * and returns, taking two arguments in addition to the standard ones.
5213 *
5214 * @param a_pfnCImpl The pointer to the C routine.
5215 * @param a0 The first extra argument.
5216 * @param a1 The second extra argument.
5217 */
5218#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5219
5220/**
5221 * Defers the rest of the instruction emulation to a C implementation routine
5222 * and returns, taking two arguments in addition to the standard ones.
5223 *
5224 * @param a_pfnCImpl The pointer to the C routine.
5225 * @param a0 The first extra argument.
5226 * @param a1 The second extra argument.
5227 * @param a2 The third extra argument.
5228 */
5229#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5230
5231/**
5232 * Defers the rest of the instruction emulation to a C implementation routine
5233 * and returns, taking two arguments in addition to the standard ones.
5234 *
5235 * @param a_pfnCImpl The pointer to the C routine.
5236 * @param a0 The first extra argument.
5237 * @param a1 The second extra argument.
5238 * @param a2 The third extra argument.
5239 * @param a3 The fourth extra argument.
5240 * @param a4 The fifth extra argument.
5241 */
5242#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
5243
5244/**
5245 * Defers the entire instruction emulation to a C implementation routine and
5246 * returns, only taking the standard parameters.
5247 *
5248 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5249 *
5250 * @param a_pfnCImpl The pointer to the C routine.
5251 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5252 */
5253#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5254
5255/**
5256 * Defers the entire instruction emulation to a C implementation routine and
5257 * returns, taking one argument in addition to the standard ones.
5258 *
5259 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5260 *
5261 * @param a_pfnCImpl The pointer to the C routine.
5262 * @param a0 The argument.
5263 */
5264#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5265
5266/**
5267 * Defers the entire instruction emulation to a C implementation routine and
5268 * returns, taking two arguments in addition to the standard ones.
5269 *
5270 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5271 *
5272 * @param a_pfnCImpl The pointer to the C routine.
5273 * @param a0 The first extra argument.
5274 * @param a1 The second extra argument.
5275 */
5276#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5277
5278/**
5279 * Defers the entire instruction emulation to a C implementation routine and
5280 * returns, taking three arguments in addition to the standard ones.
5281 *
5282 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5283 *
5284 * @param a_pfnCImpl The pointer to the C routine.
5285 * @param a0 The first extra argument.
5286 * @param a1 The second extra argument.
5287 * @param a2 The third extra argument.
5288 */
5289#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5290
5291#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
5292#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
5293#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
5294#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
5295#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
5296 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5297 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5298#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
5299 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5300 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5301#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
5302 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5303 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5304 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5305#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
5306 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5307 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5308 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5309#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
5310#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
5311#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
5312#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5313 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5314 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5315#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5316 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5317 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5318#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5319 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5320 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5321#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5322 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5323 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5324#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5325 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5326 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5327#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5328 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5329 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5330#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
5331#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
5332#define IEM_MC_ELSE() } else {
5333#define IEM_MC_ENDIF() } do {} while (0)
5334
5335/** @} */
5336
5337
5338/** @name Opcode Debug Helpers.
5339 * @{
5340 */
5341#ifdef DEBUG
5342# define IEMOP_MNEMONIC(a_szMnemonic) \
5343 Log2(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5344 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
5345# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
5346 Log2(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5347 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
5348#else
5349# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
5350# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
5351#endif
5352
5353/** @} */
5354
5355
5356/** @name Opcode Helpers.
5357 * @{
5358 */
5359
5360/** The instruction allows no lock prefixing (in this encoding), throw #UD if
5361 * lock prefixed. */
5362#define IEMOP_HLP_NO_LOCK_PREFIX() \
5363 do \
5364 { \
5365 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5366 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
5367 } while (0)
5368
5369/** The instruction is not available in 64-bit mode, throw #UD if we're in
5370 * 64-bit mode. */
5371#define IEMOP_HLP_NO_64BIT() \
5372 do \
5373 { \
5374 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5375 return IEMOP_RAISE_INVALID_OPCODE(); \
5376 } while (0)
5377
5378/** The instruction defaults to 64-bit operand size if 64-bit mode. */
5379#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
5380 do \
5381 { \
5382 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5383 iemRecalEffOpSize64Default(pIemCpu); \
5384 } while (0)
5385
5386
5387
5388/**
5389 * Calculates the effective address of a ModR/M memory operand.
5390 *
5391 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
5392 *
5393 * @return Strict VBox status code.
5394 * @param pIemCpu The IEM per CPU data.
5395 * @param bRm The ModRM byte.
5396 * @param pGCPtrEff Where to return the effective address.
5397 */
5398static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
5399{
5400 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
5401 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5402#define SET_SS_DEF() \
5403 do \
5404 { \
5405 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
5406 pIemCpu->iEffSeg = X86_SREG_SS; \
5407 } while (0)
5408
5409/** @todo Check the effective address size crap! */
5410 switch (pIemCpu->enmEffAddrMode)
5411 {
5412 case IEMMODE_16BIT:
5413 {
5414 uint16_t u16EffAddr;
5415
5416 /* Handle the disp16 form with no registers first. */
5417 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5418 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
5419 else
5420 {
5421 /* Get the displacment. */
5422 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5423 {
5424 case 0: u16EffAddr = 0; break;
5425 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
5426 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
5427 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5428 }
5429
5430 /* Add the base and index registers to the disp. */
5431 switch (bRm & X86_MODRM_RM_MASK)
5432 {
5433 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5434 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5435 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5436 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5437 case 4: u16EffAddr += pCtx->si; break;
5438 case 5: u16EffAddr += pCtx->di; break;
5439 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5440 case 7: u16EffAddr += pCtx->bx; break;
5441 }
5442 }
5443
5444 *pGCPtrEff = u16EffAddr;
5445 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5446 return VINF_SUCCESS;
5447 }
5448
5449 case IEMMODE_32BIT:
5450 {
5451 uint32_t u32EffAddr;
5452
5453 /* Handle the disp32 form with no registers first. */
5454 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5455 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
5456 else
5457 {
5458 /* Get the register (or SIB) value. */
5459 switch ((bRm & X86_MODRM_RM_MASK))
5460 {
5461 case 0: u32EffAddr = pCtx->eax; break;
5462 case 1: u32EffAddr = pCtx->ecx; break;
5463 case 2: u32EffAddr = pCtx->edx; break;
5464 case 3: u32EffAddr = pCtx->ebx; break;
5465 case 4: /* SIB */
5466 {
5467 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5468
5469 /* Get the index and scale it. */
5470 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
5471 {
5472 case 0: u32EffAddr = pCtx->eax; break;
5473 case 1: u32EffAddr = pCtx->ecx; break;
5474 case 2: u32EffAddr = pCtx->edx; break;
5475 case 3: u32EffAddr = pCtx->ebx; break;
5476 case 4: u32EffAddr = 0; /*none */ break;
5477 case 5: u32EffAddr = pCtx->ebp; break;
5478 case 6: u32EffAddr = pCtx->esi; break;
5479 case 7: u32EffAddr = pCtx->edi; break;
5480 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5481 }
5482 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5483
5484 /* add base */
5485 switch (bSib & X86_SIB_BASE_MASK)
5486 {
5487 case 0: u32EffAddr += pCtx->eax; break;
5488 case 1: u32EffAddr += pCtx->ecx; break;
5489 case 2: u32EffAddr += pCtx->edx; break;
5490 case 3: u32EffAddr += pCtx->ebx; break;
5491 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5492 case 5:
5493 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5494 {
5495 u32EffAddr += pCtx->ebp;
5496 SET_SS_DEF();
5497 }
5498 else
5499 {
5500 uint32_t u32Disp;
5501 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5502 u32EffAddr += u32Disp;
5503 }
5504 break;
5505 case 6: u32EffAddr += pCtx->esi; break;
5506 case 7: u32EffAddr += pCtx->edi; break;
5507 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5508 }
5509 break;
5510 }
5511 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5512 case 6: u32EffAddr = pCtx->esi; break;
5513 case 7: u32EffAddr = pCtx->edi; break;
5514 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5515 }
5516
5517 /* Get and add the displacement. */
5518 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5519 {
5520 case 0:
5521 break;
5522 case 1:
5523 {
5524 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
5525 u32EffAddr += i8Disp;
5526 break;
5527 }
5528 case 2:
5529 {
5530 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5531 u32EffAddr += u32Disp;
5532 break;
5533 }
5534 default:
5535 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5536 }
5537
5538 }
5539 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
5540 *pGCPtrEff = u32EffAddr;
5541 else
5542 {
5543 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
5544 *pGCPtrEff = u32EffAddr & UINT16_MAX;
5545 }
5546 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5547 return VINF_SUCCESS;
5548 }
5549
5550 case IEMMODE_64BIT:
5551 {
5552 uint64_t u64EffAddr;
5553
5554 /* Handle the rip+disp32 form with no registers first. */
5555 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5556 {
5557 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
5558 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
5559 }
5560 else
5561 {
5562 /* Get the register (or SIB) value. */
5563 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
5564 {
5565 case 0: u64EffAddr = pCtx->rax; break;
5566 case 1: u64EffAddr = pCtx->rcx; break;
5567 case 2: u64EffAddr = pCtx->rdx; break;
5568 case 3: u64EffAddr = pCtx->rbx; break;
5569 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
5570 case 6: u64EffAddr = pCtx->rsi; break;
5571 case 7: u64EffAddr = pCtx->rdi; break;
5572 case 8: u64EffAddr = pCtx->r8; break;
5573 case 9: u64EffAddr = pCtx->r9; break;
5574 case 10: u64EffAddr = pCtx->r10; break;
5575 case 11: u64EffAddr = pCtx->r11; break;
5576 case 13: u64EffAddr = pCtx->r13; break;
5577 case 14: u64EffAddr = pCtx->r14; break;
5578 case 15: u64EffAddr = pCtx->r15; break;
5579 /* SIB */
5580 case 4:
5581 case 12:
5582 {
5583 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5584
5585 /* Get the index and scale it. */
5586 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
5587 {
5588 case 0: u64EffAddr = pCtx->rax; break;
5589 case 1: u64EffAddr = pCtx->rcx; break;
5590 case 2: u64EffAddr = pCtx->rdx; break;
5591 case 3: u64EffAddr = pCtx->rbx; break;
5592 case 4: u64EffAddr = 0; /*none */ break;
5593 case 5: u64EffAddr = pCtx->rbp; break;
5594 case 6: u64EffAddr = pCtx->rsi; break;
5595 case 7: u64EffAddr = pCtx->rdi; break;
5596 case 8: u64EffAddr = pCtx->r8; break;
5597 case 9: u64EffAddr = pCtx->r9; break;
5598 case 10: u64EffAddr = pCtx->r10; break;
5599 case 11: u64EffAddr = pCtx->r11; break;
5600 case 12: u64EffAddr = pCtx->r12; break;
5601 case 13: u64EffAddr = pCtx->r13; break;
5602 case 14: u64EffAddr = pCtx->r14; break;
5603 case 15: u64EffAddr = pCtx->r15; break;
5604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5605 }
5606 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5607
5608 /* add base */
5609 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
5610 {
5611 case 0: u64EffAddr += pCtx->rax; break;
5612 case 1: u64EffAddr += pCtx->rcx; break;
5613 case 2: u64EffAddr += pCtx->rdx; break;
5614 case 3: u64EffAddr += pCtx->rbx; break;
5615 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
5616 case 6: u64EffAddr += pCtx->rsi; break;
5617 case 7: u64EffAddr += pCtx->rdi; break;
5618 case 8: u64EffAddr += pCtx->r8; break;
5619 case 9: u64EffAddr += pCtx->r9; break;
5620 case 10: u64EffAddr += pCtx->r10; break;
5621 case 11: u64EffAddr += pCtx->r11; break;
5622 case 14: u64EffAddr += pCtx->r14; break;
5623 case 15: u64EffAddr += pCtx->r15; break;
5624 /* complicated encodings */
5625 case 5:
5626 case 13:
5627 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5628 {
5629 if (!pIemCpu->uRexB)
5630 {
5631 u64EffAddr += pCtx->rbp;
5632 SET_SS_DEF();
5633 }
5634 else
5635 u64EffAddr += pCtx->r13;
5636 }
5637 else
5638 {
5639 uint32_t u32Disp;
5640 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5641 u64EffAddr += (int32_t)u32Disp;
5642 }
5643 break;
5644 }
5645 break;
5646 }
5647 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5648 }
5649
5650 /* Get and add the displacement. */
5651 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5652 {
5653 case 0:
5654 break;
5655 case 1:
5656 {
5657 int8_t i8Disp;
5658 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
5659 u64EffAddr += i8Disp;
5660 break;
5661 }
5662 case 2:
5663 {
5664 uint32_t u32Disp;
5665 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5666 u64EffAddr += (int32_t)u32Disp;
5667 break;
5668 }
5669 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
5670 }
5671
5672 }
5673 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
5674 *pGCPtrEff = u64EffAddr;
5675 else
5676 *pGCPtrEff = u64EffAddr & UINT16_MAX;
5677 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5678 return VINF_SUCCESS;
5679 }
5680 }
5681
5682 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5683}
5684
5685/** @} */
5686
5687
5688
5689/*
5690 * Include the instructions
5691 */
5692#include "IEMAllInstructions.cpp.h"
5693
5694
5695
5696
5697#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5698
5699/**
5700 * Sets up execution verification mode.
5701 */
5702static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
5703{
5704 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
5705 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
5706
5707 /*
5708 * Enable verification and/or logging.
5709 */
5710 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
5711 if ( pIemCpu->fNoRem
5712#if 0 /* auto enable on first paged protected mode interrupt */
5713 && pOrgCtx->eflags.Bits.u1IF
5714 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
5715 && TRPMHasTrap(pVCpu)
5716 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
5717#endif
5718#if 0
5719 && pOrgCtx->cs == 0x10
5720 && ( pOrgCtx->rip == 0x90119e3e
5721 || pOrgCtx->rip == 0x901d9810
5722 )
5723#endif
5724#if 0 /* Auto enable; DSL. */
5725 && pOrgCtx->cs == 0x10
5726 && ( pOrgCtx->rip == 0x00100fc7
5727 || pOrgCtx->rip == 0x00100ffc
5728 || pOrgCtx->rip == 0x00100ffe
5729 )
5730#endif
5731#if 1
5732 && pOrgCtx->rip == 0x9022bb3a
5733#endif
5734#if 0
5735 && 0
5736#endif
5737 )
5738 {
5739 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
5740 RTLogFlags(NULL, "enabled");
5741 pIemCpu->fNoRem = false;
5742 }
5743
5744 /*
5745 * Switch state.
5746 */
5747 if (IEM_VERIFICATION_ENABLED(pIemCpu))
5748 {
5749 static CPUMCTX s_DebugCtx; /* Ugly! */
5750
5751 s_DebugCtx = *pOrgCtx;
5752 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
5753 }
5754
5755 /*
5756 * See if there is an interrupt pending in TRPM and inject it if we can.
5757 */
5758 if ( pOrgCtx->eflags.Bits.u1IF
5759 && TRPMHasTrap(pVCpu)
5760 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
5761 {
5762 uint8_t u8TrapNo;
5763 TRPMEVENT enmType;
5764 RTGCUINT uErrCode;
5765 RTGCPTR uCr2;
5766 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
5767 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
5768 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5769 TRPMResetTrap(pVCpu);
5770 }
5771
5772 /*
5773 * Reset the counters.
5774 */
5775 pIemCpu->cIOReads = 0;
5776 pIemCpu->cIOWrites = 0;
5777 pIemCpu->fUndefinedEFlags = 0;
5778
5779 if (IEM_VERIFICATION_ENABLED(pIemCpu))
5780 {
5781 /*
5782 * Free all verification records.
5783 */
5784 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
5785 pIemCpu->pIemEvtRecHead = NULL;
5786 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
5787 do
5788 {
5789 while (pEvtRec)
5790 {
5791 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
5792 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
5793 pIemCpu->pFreeEvtRec = pEvtRec;
5794 pEvtRec = pNext;
5795 }
5796 pEvtRec = pIemCpu->pOtherEvtRecHead;
5797 pIemCpu->pOtherEvtRecHead = NULL;
5798 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
5799 } while (pEvtRec);
5800 }
5801}
5802
5803
5804/**
5805 * Allocate an event record.
5806 * @returns Poitner to a record.
5807 */
5808static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
5809{
5810 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5811 return NULL;
5812
5813 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
5814 if (pEvtRec)
5815 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
5816 else
5817 {
5818 if (!pIemCpu->ppIemEvtRecNext)
5819 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
5820
5821 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
5822 if (!pEvtRec)
5823 return NULL;
5824 }
5825 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
5826 pEvtRec->pNext = NULL;
5827 return pEvtRec;
5828}
5829
5830
5831/**
5832 * IOMMMIORead notification.
5833 */
5834VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
5835{
5836 PVMCPU pVCpu = VMMGetCpu(pVM);
5837 if (!pVCpu)
5838 return;
5839 PIEMCPU pIemCpu = &pVCpu->iem.s;
5840 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5841 if (!pEvtRec)
5842 return;
5843 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5844 pEvtRec->u.RamRead.GCPhys = GCPhys;
5845 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
5846 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5847 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5848}
5849
5850
5851/**
5852 * IOMMMIOWrite notification.
5853 */
5854VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
5855{
5856 PVMCPU pVCpu = VMMGetCpu(pVM);
5857 if (!pVCpu)
5858 return;
5859 PIEMCPU pIemCpu = &pVCpu->iem.s;
5860 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5861 if (!pEvtRec)
5862 return;
5863 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5864 pEvtRec->u.RamWrite.GCPhys = GCPhys;
5865 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
5866 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
5867 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
5868 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
5869 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
5870 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5871 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5872}
5873
5874
5875/**
5876 * IOMIOPortRead notification.
5877 */
5878VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
5879{
5880 PVMCPU pVCpu = VMMGetCpu(pVM);
5881 if (!pVCpu)
5882 return;
5883 PIEMCPU pIemCpu = &pVCpu->iem.s;
5884 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5885 if (!pEvtRec)
5886 return;
5887 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
5888 pEvtRec->u.IOPortRead.Port = Port;
5889 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
5890 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5891 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5892}
5893
5894/**
5895 * IOMIOPortWrite notification.
5896 */
5897VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
5898{
5899 PVMCPU pVCpu = VMMGetCpu(pVM);
5900 if (!pVCpu)
5901 return;
5902 PIEMCPU pIemCpu = &pVCpu->iem.s;
5903 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5904 if (!pEvtRec)
5905 return;
5906 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
5907 pEvtRec->u.IOPortWrite.Port = Port;
5908 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
5909 pEvtRec->u.IOPortWrite.u32Value = u32Value;
5910 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5911 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5912}
5913
5914
5915VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
5916{
5917 AssertFailed();
5918}
5919
5920
5921VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
5922{
5923 AssertFailed();
5924}
5925
5926
5927/**
5928 * Fakes and records an I/O port read.
5929 *
5930 * @returns VINF_SUCCESS.
5931 * @param pIemCpu The IEM per CPU data.
5932 * @param Port The I/O port.
5933 * @param pu32Value Where to store the fake value.
5934 * @param cbValue The size of the access.
5935 */
5936static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
5937{
5938 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5939 if (pEvtRec)
5940 {
5941 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
5942 pEvtRec->u.IOPortRead.Port = Port;
5943 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
5944 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5945 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5946 }
5947 pIemCpu->cIOReads++;
5948 *pu32Value = 0xffffffff;
5949 return VINF_SUCCESS;
5950}
5951
5952
5953/**
5954 * Fakes and records an I/O port write.
5955 *
5956 * @returns VINF_SUCCESS.
5957 * @param pIemCpu The IEM per CPU data.
5958 * @param Port The I/O port.
5959 * @param u32Value The value being written.
5960 * @param cbValue The size of the access.
5961 */
5962static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
5963{
5964 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5965 if (pEvtRec)
5966 {
5967 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
5968 pEvtRec->u.IOPortWrite.Port = Port;
5969 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
5970 pEvtRec->u.IOPortWrite.u32Value = u32Value;
5971 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5972 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5973 }
5974 pIemCpu->cIOWrites++;
5975 return VINF_SUCCESS;
5976}
5977
5978
5979/**
5980 * Used to add extra details about a stub case.
5981 * @param pIemCpu The IEM per CPU state.
5982 */
5983static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
5984{
5985 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5986 PVM pVM = IEMCPU_TO_VM(pIemCpu);
5987 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
5988 char szRegs[4096];
5989 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5990 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5991 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5992 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5993 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5994 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5995 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5996 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5997 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5998 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5999 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6000 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6001 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6002 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6003 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6004 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6005 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6006 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6007 " efer=%016VR{efer}\n"
6008 " pat=%016VR{pat}\n"
6009 " sf_mask=%016VR{sf_mask}\n"
6010 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6011 " lstar=%016VR{lstar}\n"
6012 " star=%016VR{star} cstar=%016VR{cstar}\n"
6013 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6014 );
6015
6016 char szInstr1[256];
6017 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
6018 DBGF_DISAS_FLAGS_DEFAULT_MODE,
6019 szInstr1, sizeof(szInstr1), NULL);
6020 char szInstr2[256];
6021 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
6022 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6023 szInstr2, sizeof(szInstr2), NULL);
6024
6025 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
6026}
6027
6028
6029/**
6030 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
6031 * dump to the assertion info.
6032 *
6033 * @param pEvtRec The record to dump.
6034 */
6035static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
6036{
6037 switch (pEvtRec->enmEvent)
6038 {
6039 case IEMVERIFYEVENT_IOPORT_READ:
6040 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
6041 pEvtRec->u.IOPortWrite.Port,
6042 pEvtRec->u.IOPortWrite.cbValue);
6043 break;
6044 case IEMVERIFYEVENT_IOPORT_WRITE:
6045 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
6046 pEvtRec->u.IOPortWrite.Port,
6047 pEvtRec->u.IOPortWrite.cbValue,
6048 pEvtRec->u.IOPortWrite.u32Value);
6049 break;
6050 case IEMVERIFYEVENT_RAM_READ:
6051 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
6052 pEvtRec->u.RamRead.GCPhys,
6053 pEvtRec->u.RamRead.cb);
6054 break;
6055 case IEMVERIFYEVENT_RAM_WRITE:
6056 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
6057 pEvtRec->u.RamWrite.GCPhys,
6058 pEvtRec->u.RamWrite.cb,
6059 (int)pEvtRec->u.RamWrite.cb,
6060 pEvtRec->u.RamWrite.ab);
6061 break;
6062 default:
6063 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
6064 break;
6065 }
6066}
6067
6068
6069/**
6070 * Raises an assertion on the specified record, showing the given message with
6071 * a record dump attached.
6072 *
6073 * @param pIemCpu The IEM per CPU data.
6074 * @param pEvtRec1 The first record.
6075 * @param pEvtRec2 The second record.
6076 * @param pszMsg The message explaining why we're asserting.
6077 */
6078static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
6079{
6080 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6081 iemVerifyAssertAddRecordDump(pEvtRec1);
6082 iemVerifyAssertAddRecordDump(pEvtRec2);
6083 iemVerifyAssertMsg2(pIemCpu);
6084 RTAssertPanic();
6085}
6086
6087
6088/**
6089 * Raises an assertion on the specified record, showing the given message with
6090 * a record dump attached.
6091 *
6092 * @param pIemCpu The IEM per CPU data.
6093 * @param pEvtRec1 The first record.
6094 * @param pszMsg The message explaining why we're asserting.
6095 */
6096static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
6097{
6098 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6099 iemVerifyAssertAddRecordDump(pEvtRec);
6100 iemVerifyAssertMsg2(pIemCpu);
6101 RTAssertPanic();
6102}
6103
6104
6105/**
6106 * Verifies a write record.
6107 *
6108 * @param pIemCpu The IEM per CPU data.
6109 * @param pEvtRec The write record.
6110 */
6111static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
6112{
6113 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
6114 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
6115 if ( RT_FAILURE(rc)
6116 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
6117 {
6118 /* fend off ins */
6119 if ( !pIemCpu->cIOReads
6120 || pEvtRec->u.RamWrite.ab[0] != 0xcc
6121 || ( pEvtRec->u.RamWrite.cb != 1
6122 && pEvtRec->u.RamWrite.cb != 2
6123 && pEvtRec->u.RamWrite.cb != 4) )
6124 {
6125 /* fend off ROMs */
6126 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
6127 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
6128 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
6129 {
6130 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6131 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
6132 RTAssertMsg2Add("REM: %.*Rhxs\n"
6133 "IEM: %.*Rhxs\n",
6134 pEvtRec->u.RamWrite.cb, abBuf,
6135 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
6136 iemVerifyAssertAddRecordDump(pEvtRec);
6137 iemVerifyAssertMsg2(pIemCpu);
6138 RTAssertPanic();
6139 }
6140 }
6141 }
6142
6143}
6144
6145/**
6146 * Performs the post-execution verfication checks.
6147 */
6148static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
6149{
6150 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6151 return;
6152
6153 /*
6154 * Switch back the state.
6155 */
6156 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
6157 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
6158 Assert(pOrgCtx != pDebugCtx);
6159 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6160
6161 /*
6162 * Execute the instruction in REM.
6163 */
6164 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6165 EMRemLock(pVM);
6166 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
6167 AssertRC(rc);
6168 EMRemUnlock(pVM);
6169
6170 /*
6171 * Compare the register states.
6172 */
6173 unsigned cDiffs = 0;
6174 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
6175 {
6176 Log(("REM and IEM ends up with different registers!\n"));
6177
6178# define CHECK_FIELD(a_Field) \
6179 do \
6180 { \
6181 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6182 { \
6183 switch (sizeof(pOrgCtx->a_Field)) \
6184 { \
6185 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6186 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6187 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6188 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6189 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
6190 } \
6191 cDiffs++; \
6192 } \
6193 } while (0)
6194
6195# define CHECK_BIT_FIELD(a_Field) \
6196 do \
6197 { \
6198 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6199 { \
6200 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
6201 cDiffs++; \
6202 } \
6203 } while (0)
6204
6205# define CHECK_SEL(a_Sel) \
6206 do \
6207 { \
6208 CHECK_FIELD(a_Sel); \
6209 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
6210 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
6211 { \
6212 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
6213 cDiffs++; \
6214 } \
6215 CHECK_FIELD(a_Sel##Hid.u64Base); \
6216 CHECK_FIELD(a_Sel##Hid.u32Limit); \
6217 } while (0)
6218
6219 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
6220 {
6221 RTAssertMsg2Weak(" the FPU state differs\n");
6222 cDiffs++;
6223 CHECK_FIELD(fpu.FCW);
6224 CHECK_FIELD(fpu.FSW);
6225 CHECK_FIELD(fpu.FTW);
6226 CHECK_FIELD(fpu.FOP);
6227 CHECK_FIELD(fpu.FPUIP);
6228 CHECK_FIELD(fpu.CS);
6229 CHECK_FIELD(fpu.Rsrvd1);
6230 CHECK_FIELD(fpu.FPUDP);
6231 CHECK_FIELD(fpu.DS);
6232 CHECK_FIELD(fpu.Rsrvd2);
6233 CHECK_FIELD(fpu.MXCSR);
6234 CHECK_FIELD(fpu.MXCSR_MASK);
6235 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
6236 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
6237 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
6238 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
6239 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
6240 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
6241 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
6242 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
6243 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
6244 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
6245 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
6246 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
6247 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
6248 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
6249 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
6250 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
6251 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
6252 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
6253 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
6254 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
6255 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
6256 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
6257 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
6258 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
6259 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
6260 CHECK_FIELD(fpu.au32RsrvdRest[i]);
6261 }
6262 CHECK_FIELD(rip);
6263 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
6264 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
6265 {
6266 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
6267 CHECK_BIT_FIELD(rflags.Bits.u1CF);
6268 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
6269 CHECK_BIT_FIELD(rflags.Bits.u1PF);
6270 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
6271 CHECK_BIT_FIELD(rflags.Bits.u1AF);
6272 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
6273 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
6274 CHECK_BIT_FIELD(rflags.Bits.u1SF);
6275 CHECK_BIT_FIELD(rflags.Bits.u1TF);
6276 CHECK_BIT_FIELD(rflags.Bits.u1IF);
6277 CHECK_BIT_FIELD(rflags.Bits.u1DF);
6278 CHECK_BIT_FIELD(rflags.Bits.u1OF);
6279 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
6280 CHECK_BIT_FIELD(rflags.Bits.u1NT);
6281 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
6282 CHECK_BIT_FIELD(rflags.Bits.u1RF);
6283 CHECK_BIT_FIELD(rflags.Bits.u1VM);
6284 CHECK_BIT_FIELD(rflags.Bits.u1AC);
6285 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
6286 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
6287 CHECK_BIT_FIELD(rflags.Bits.u1ID);
6288 }
6289
6290 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
6291 CHECK_FIELD(rax);
6292 CHECK_FIELD(rcx);
6293 if (!pIemCpu->fIgnoreRaxRdx)
6294 CHECK_FIELD(rdx);
6295 CHECK_FIELD(rbx);
6296 CHECK_FIELD(rsp);
6297 CHECK_FIELD(rbp);
6298 CHECK_FIELD(rsi);
6299 CHECK_FIELD(rdi);
6300 CHECK_FIELD(r8);
6301 CHECK_FIELD(r9);
6302 CHECK_FIELD(r10);
6303 CHECK_FIELD(r11);
6304 CHECK_FIELD(r12);
6305 CHECK_FIELD(r13);
6306 CHECK_SEL(cs);
6307 CHECK_SEL(ss);
6308 CHECK_SEL(ds);
6309 CHECK_SEL(es);
6310 CHECK_SEL(fs);
6311 CHECK_SEL(gs);
6312 CHECK_FIELD(cr0);
6313 CHECK_FIELD(cr2);
6314 CHECK_FIELD(cr3);
6315 CHECK_FIELD(cr4);
6316 CHECK_FIELD(dr[0]);
6317 CHECK_FIELD(dr[1]);
6318 CHECK_FIELD(dr[2]);
6319 CHECK_FIELD(dr[3]);
6320 CHECK_FIELD(dr[6]);
6321 CHECK_FIELD(dr[7]);
6322 CHECK_FIELD(gdtr.cbGdt);
6323 CHECK_FIELD(gdtr.pGdt);
6324 CHECK_FIELD(idtr.cbIdt);
6325 CHECK_FIELD(idtr.pIdt);
6326 CHECK_FIELD(ldtr);
6327 CHECK_FIELD(ldtrHid.u64Base);
6328 CHECK_FIELD(ldtrHid.u32Limit);
6329 CHECK_FIELD(ldtrHid.Attr.u);
6330 CHECK_FIELD(tr);
6331 CHECK_FIELD(trHid.u64Base);
6332 CHECK_FIELD(trHid.u32Limit);
6333 CHECK_FIELD(trHid.Attr.u);
6334 CHECK_FIELD(SysEnter.cs);
6335 CHECK_FIELD(SysEnter.eip);
6336 CHECK_FIELD(SysEnter.esp);
6337 CHECK_FIELD(msrEFER);
6338 CHECK_FIELD(msrSTAR);
6339 CHECK_FIELD(msrPAT);
6340 CHECK_FIELD(msrLSTAR);
6341 CHECK_FIELD(msrCSTAR);
6342 CHECK_FIELD(msrSFMASK);
6343 CHECK_FIELD(msrKERNELGSBASE);
6344
6345 if (cDiffs != 0)
6346 {
6347 if (LogIs3Enabled())
6348 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
6349 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
6350 iemVerifyAssertMsg2(pIemCpu);
6351 RTAssertPanic();
6352 }
6353# undef CHECK_FIELD
6354# undef CHECK_BIT_FIELD
6355 }
6356
6357 /*
6358 * If the register state compared fine, check the verification event
6359 * records.
6360 */
6361 if (cDiffs == 0)
6362 {
6363 /*
6364 * Compare verficiation event records.
6365 * - I/O port accesses should be a 1:1 match.
6366 */
6367 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
6368 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
6369 while (pIemRec && pOtherRec)
6370 {
6371 /* Since we might miss RAM writes and reads, ignore reads and check
6372 that any written memory is the same extra ones. */
6373 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
6374 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
6375 && pIemRec->pNext)
6376 {
6377 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6378 iemVerifyWriteRecord(pIemCpu, pIemRec);
6379 pIemRec = pIemRec->pNext;
6380 }
6381
6382 /* Do the compare. */
6383 if (pIemRec->enmEvent != pOtherRec->enmEvent)
6384 {
6385 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
6386 break;
6387 }
6388 bool fEquals;
6389 switch (pIemRec->enmEvent)
6390 {
6391 case IEMVERIFYEVENT_IOPORT_READ:
6392 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
6393 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
6394 break;
6395 case IEMVERIFYEVENT_IOPORT_WRITE:
6396 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
6397 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
6398 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
6399 break;
6400 case IEMVERIFYEVENT_RAM_READ:
6401 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
6402 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
6403 break;
6404 case IEMVERIFYEVENT_RAM_WRITE:
6405 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
6406 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
6407 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
6408 break;
6409 default:
6410 fEquals = false;
6411 break;
6412 }
6413 if (!fEquals)
6414 {
6415 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
6416 break;
6417 }
6418
6419 /* advance */
6420 pIemRec = pIemRec->pNext;
6421 pOtherRec = pOtherRec->pNext;
6422 }
6423
6424 /* Ignore extra writes and reads. */
6425 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
6426 {
6427 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6428 iemVerifyWriteRecord(pIemCpu, pIemRec);
6429 pIemRec = pIemRec->pNext;
6430 }
6431 if (pIemRec != NULL)
6432 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
6433 else if (pOtherRec != NULL)
6434 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
6435 }
6436 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6437
6438#if 0
6439 /*
6440 * HACK ALERT! You don't normally want to verify a whole boot sequence.
6441 */
6442 if (pIemCpu->cInstructions == 1)
6443 RTLogFlags(NULL, "disabled");
6444#endif
6445}
6446
6447#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6448
6449/* stubs */
6450static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6451{
6452 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
6453 return VERR_INTERNAL_ERROR;
6454}
6455
6456static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6457{
6458 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
6459 return VERR_INTERNAL_ERROR;
6460}
6461
6462#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6463
6464
6465/**
6466 * Execute one instruction.
6467 *
6468 * @return Strict VBox status code.
6469 * @param pVCpu The current virtual CPU.
6470 */
6471VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6472{
6473 PIEMCPU pIemCpu = &pVCpu->iem.s;
6474
6475#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6476 iemExecVerificationModeSetup(pIemCpu);
6477#endif
6478#ifdef LOG_ENABLED
6479 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6480 if (LogIs2Enabled())
6481 {
6482 char szInstr[256];
6483 uint32_t cbInstr = 0;
6484 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6485 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6486 szInstr, sizeof(szInstr), &cbInstr);
6487
6488 Log2(("**** "
6489 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6490 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6491 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6492 " %s\n"
6493 ,
6494 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6495 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6496 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6497 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6498 szInstr));
6499
6500 if (LogIs3Enabled())
6501 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
6502 }
6503#endif
6504
6505 /*
6506 * Do the decoding and emulation.
6507 */
6508 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6509 if (rcStrict != VINF_SUCCESS)
6510 return rcStrict;
6511
6512 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6513 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6514 if (rcStrict == VINF_SUCCESS)
6515 pIemCpu->cInstructions++;
6516//#ifdef DEBUG
6517// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
6518//#endif
6519
6520 /* Execute the next instruction as well if a cli, pop ss or
6521 mov ss, Gr has just completed successfully. */
6522 if ( rcStrict == VINF_SUCCESS
6523 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6524 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
6525 {
6526 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6527 if (rcStrict == VINF_SUCCESS)
6528 {
6529 b; IEM_OPCODE_GET_NEXT_U8(&b);
6530 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6531 if (rcStrict == VINF_SUCCESS)
6532 pIemCpu->cInstructions++;
6533 }
6534 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
6535 }
6536
6537 /*
6538 * Assert some sanity.
6539 */
6540#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6541 iemExecVerificationModeCheck(pIemCpu);
6542#endif
6543 return rcStrict;
6544}
6545
6546
6547/**
6548 * Injects a trap, fault, abort, software interrupt or external interrupt.
6549 *
6550 * The parameter list matches TRPMQueryTrapAll pretty closely.
6551 *
6552 * @returns Strict VBox status code.
6553 * @param pVCpu The current virtual CPU.
6554 * @param u8TrapNo The trap number.
6555 * @param enmType What type is it (trap/fault/abort), software
6556 * interrupt or hardware interrupt.
6557 * @param uErrCode The error code if applicable.
6558 * @param uCr2 The CR2 value if applicable.
6559 */
6560VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
6561{
6562 iemInitDecoder(&pVCpu->iem.s);
6563
6564 uint32_t fFlags;
6565 switch (enmType)
6566 {
6567 case TRPM_HARDWARE_INT:
6568 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
6569 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6570 uErrCode = uCr2 = 0;
6571 break;
6572
6573 case TRPM_SOFTWARE_INT:
6574 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
6575 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6576 uErrCode = uCr2 = 0;
6577 break;
6578
6579 case TRPM_TRAP:
6580 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
6581 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6582 if (u8TrapNo == X86_XCPT_PF)
6583 fFlags |= IEM_XCPT_FLAGS_CR2;
6584 switch (u8TrapNo)
6585 {
6586 case X86_XCPT_DF:
6587 case X86_XCPT_TS:
6588 case X86_XCPT_NP:
6589 case X86_XCPT_SS:
6590 case X86_XCPT_PF:
6591 case X86_XCPT_AC:
6592 fFlags |= IEM_XCPT_FLAGS_ERR;
6593 break;
6594 }
6595 break;
6596
6597 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6598 }
6599
6600 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
6601}
6602
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette