VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 40141

Last change on this file since 40141 was 40141, checked in by vboxsync, 13 years ago

fdiv implementation in progress (-> laptop).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 254.6 KB
Line 
1/* $Id: IEMAll.cpp 40141 2012-02-15 21:34:51Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_IEM
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/dbgf.h>
54#ifdef IEM_VERIFICATION_MODE
55# include <VBox/vmm/rem.h>
56# include <VBox/vmm/mm.h>
57#endif
58#include "IEMInternal.h"
59#include <VBox/vmm/vm.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <iprt/assert.h>
64#include <iprt/string.h>
65#include <iprt/x86.h>
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/** @typedef PFNIEMOP
72 * Pointer to an opcode decoder function.
73 */
74
75/** @def FNIEMOP_DEF
76 * Define an opcode decoder function.
77 *
78 * We're using macors for this so that adding and removing parameters as well as
79 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
80 *
81 * @param a_Name The function name.
82 */
83
84
85#if defined(__GNUC__) && defined(RT_ARCH_X86)
86typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
87# define FNIEMOP_DEF(a_Name) \
88 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
89# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
90 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
91# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
92 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
93
94#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
95typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
96# define FNIEMOP_DEF(a_Name) \
97 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
98# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
99 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
100# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
101 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
102
103#elif defined(__GNUC__)
104typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
105# define FNIEMOP_DEF(a_Name) \
106 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
107# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
108 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
109# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
110 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
111
112#else
113typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
114# define FNIEMOP_DEF(a_Name) \
115 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
116# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
117 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
118# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
119 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
120
121#endif
122
123
124/**
125 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
126 */
127typedef union IEMSELDESC
128{
129 /** The legacy view. */
130 X86DESC Legacy;
131 /** The long mode view. */
132 X86DESC64 Long;
133} IEMSELDESC;
134/** Pointer to a selector descriptor table entry. */
135typedef IEMSELDESC *PIEMSELDESC;
136
137
138/*******************************************************************************
139* Defined Constants And Macros *
140*******************************************************************************/
141/** @name IEM status codes.
142 *
143 * Not quite sure how this will play out in the end, just aliasing safe status
144 * codes for now.
145 *
146 * @{ */
147#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
148/** @} */
149
150/** Temporary hack to disable the double execution. Will be removed in favor
151 * of a dedicated execution mode in EM. */
152//#define IEM_VERIFICATION_MODE_NO_REM
153
154/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
155 * due to GCC lacking knowledge about the value range of a switch. */
156#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
157
158/**
159 * Call an opcode decoder function.
160 *
161 * We're using macors for this so that adding and removing parameters can be
162 * done as we please. See FNIEMOP_DEF.
163 */
164#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
165
166/**
167 * Call a common opcode decoder function taking one extra argument.
168 *
169 * We're using macors for this so that adding and removing parameters can be
170 * done as we please. See FNIEMOP_DEF_1.
171 */
172#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
173
174/**
175 * Call a common opcode decoder function taking one extra argument.
176 *
177 * We're using macors for this so that adding and removing parameters can be
178 * done as we please. See FNIEMOP_DEF_1.
179 */
180#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
181
182/**
183 * Check if we're currently executing in real or virtual 8086 mode.
184 *
185 * @returns @c true if it is, @c false if not.
186 * @param a_pIemCpu The IEM state of the current CPU.
187 */
188#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
189
190/**
191 * Check if we're currently executing in long mode.
192 *
193 * @returns @c true if it is, @c false if not.
194 * @param a_pIemCpu The IEM state of the current CPU.
195 */
196#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
197
198/**
199 * Check if we're currently executing in real mode.
200 *
201 * @returns @c true if it is, @c false if not.
202 * @param a_pIemCpu The IEM state of the current CPU.
203 */
204#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
205
206/**
207 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
208 */
209#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
210
211/**
212 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
213 */
214#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
215
216/**
217 * Tests if at least on of the specified AMD CPUID features (extended) are
218 * marked present.
219 */
220#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
221
222/**
223 * Checks if a intel CPUID feature is present.
224 */
225#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
226 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
227 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
228
229/**
230 * Check if the address is canonical.
231 */
232#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
233
234
235/*******************************************************************************
236* Global Variables *
237*******************************************************************************/
238extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
239
240
241/** Function table for the ADD instruction. */
242static const IEMOPBINSIZES g_iemAImpl_add =
243{
244 iemAImpl_add_u8, iemAImpl_add_u8_locked,
245 iemAImpl_add_u16, iemAImpl_add_u16_locked,
246 iemAImpl_add_u32, iemAImpl_add_u32_locked,
247 iemAImpl_add_u64, iemAImpl_add_u64_locked
248};
249
250/** Function table for the ADC instruction. */
251static const IEMOPBINSIZES g_iemAImpl_adc =
252{
253 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
254 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
255 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
256 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
257};
258
259/** Function table for the SUB instruction. */
260static const IEMOPBINSIZES g_iemAImpl_sub =
261{
262 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
263 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
264 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
265 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
266};
267
268/** Function table for the SBB instruction. */
269static const IEMOPBINSIZES g_iemAImpl_sbb =
270{
271 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
272 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
273 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
274 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
275};
276
277/** Function table for the OR instruction. */
278static const IEMOPBINSIZES g_iemAImpl_or =
279{
280 iemAImpl_or_u8, iemAImpl_or_u8_locked,
281 iemAImpl_or_u16, iemAImpl_or_u16_locked,
282 iemAImpl_or_u32, iemAImpl_or_u32_locked,
283 iemAImpl_or_u64, iemAImpl_or_u64_locked
284};
285
286/** Function table for the XOR instruction. */
287static const IEMOPBINSIZES g_iemAImpl_xor =
288{
289 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
290 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
291 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
292 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
293};
294
295/** Function table for the AND instruction. */
296static const IEMOPBINSIZES g_iemAImpl_and =
297{
298 iemAImpl_and_u8, iemAImpl_and_u8_locked,
299 iemAImpl_and_u16, iemAImpl_and_u16_locked,
300 iemAImpl_and_u32, iemAImpl_and_u32_locked,
301 iemAImpl_and_u64, iemAImpl_and_u64_locked
302};
303
304/** Function table for the CMP instruction.
305 * @remarks Making operand order ASSUMPTIONS.
306 */
307static const IEMOPBINSIZES g_iemAImpl_cmp =
308{
309 iemAImpl_cmp_u8, NULL,
310 iemAImpl_cmp_u16, NULL,
311 iemAImpl_cmp_u32, NULL,
312 iemAImpl_cmp_u64, NULL
313};
314
315/** Function table for the TEST instruction.
316 * @remarks Making operand order ASSUMPTIONS.
317 */
318static const IEMOPBINSIZES g_iemAImpl_test =
319{
320 iemAImpl_test_u8, NULL,
321 iemAImpl_test_u16, NULL,
322 iemAImpl_test_u32, NULL,
323 iemAImpl_test_u64, NULL
324};
325
326/** Function table for the BT instruction. */
327static const IEMOPBINSIZES g_iemAImpl_bt =
328{
329 NULL, NULL,
330 iemAImpl_bt_u16, NULL,
331 iemAImpl_bt_u32, NULL,
332 iemAImpl_bt_u64, NULL
333};
334
335/** Function table for the BTC instruction. */
336static const IEMOPBINSIZES g_iemAImpl_btc =
337{
338 NULL, NULL,
339 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
340 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
341 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
342};
343
344/** Function table for the BTR instruction. */
345static const IEMOPBINSIZES g_iemAImpl_btr =
346{
347 NULL, NULL,
348 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
349 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
350 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
351};
352
353/** Function table for the BTS instruction. */
354static const IEMOPBINSIZES g_iemAImpl_bts =
355{
356 NULL, NULL,
357 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
358 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
359 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
360};
361
362/** Function table for the BSF instruction. */
363static const IEMOPBINSIZES g_iemAImpl_bsf =
364{
365 NULL, NULL,
366 iemAImpl_bsf_u16, NULL,
367 iemAImpl_bsf_u32, NULL,
368 iemAImpl_bsf_u64, NULL
369};
370
371/** Function table for the BSR instruction. */
372static const IEMOPBINSIZES g_iemAImpl_bsr =
373{
374 NULL, NULL,
375 iemAImpl_bsr_u16, NULL,
376 iemAImpl_bsr_u32, NULL,
377 iemAImpl_bsr_u64, NULL
378};
379
380/** Function table for the IMUL instruction. */
381static const IEMOPBINSIZES g_iemAImpl_imul_two =
382{
383 NULL, NULL,
384 iemAImpl_imul_two_u16, NULL,
385 iemAImpl_imul_two_u32, NULL,
386 iemAImpl_imul_two_u64, NULL
387};
388
389/** Group 1 /r lookup table. */
390static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
391{
392 &g_iemAImpl_add,
393 &g_iemAImpl_or,
394 &g_iemAImpl_adc,
395 &g_iemAImpl_sbb,
396 &g_iemAImpl_and,
397 &g_iemAImpl_sub,
398 &g_iemAImpl_xor,
399 &g_iemAImpl_cmp
400};
401
402/** Function table for the INC instruction. */
403static const IEMOPUNARYSIZES g_iemAImpl_inc =
404{
405 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
406 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
407 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
408 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
409};
410
411/** Function table for the DEC instruction. */
412static const IEMOPUNARYSIZES g_iemAImpl_dec =
413{
414 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
415 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
416 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
417 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
418};
419
420/** Function table for the NEG instruction. */
421static const IEMOPUNARYSIZES g_iemAImpl_neg =
422{
423 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
424 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
425 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
426 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
427};
428
429/** Function table for the NOT instruction. */
430static const IEMOPUNARYSIZES g_iemAImpl_not =
431{
432 iemAImpl_not_u8, iemAImpl_not_u8_locked,
433 iemAImpl_not_u16, iemAImpl_not_u16_locked,
434 iemAImpl_not_u32, iemAImpl_not_u32_locked,
435 iemAImpl_not_u64, iemAImpl_not_u64_locked
436};
437
438
439/** Function table for the ROL instruction. */
440static const IEMOPSHIFTSIZES g_iemAImpl_rol =
441{
442 iemAImpl_rol_u8,
443 iemAImpl_rol_u16,
444 iemAImpl_rol_u32,
445 iemAImpl_rol_u64
446};
447
448/** Function table for the ROR instruction. */
449static const IEMOPSHIFTSIZES g_iemAImpl_ror =
450{
451 iemAImpl_ror_u8,
452 iemAImpl_ror_u16,
453 iemAImpl_ror_u32,
454 iemAImpl_ror_u64
455};
456
457/** Function table for the RCL instruction. */
458static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
459{
460 iemAImpl_rcl_u8,
461 iemAImpl_rcl_u16,
462 iemAImpl_rcl_u32,
463 iemAImpl_rcl_u64
464};
465
466/** Function table for the RCR instruction. */
467static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
468{
469 iemAImpl_rcr_u8,
470 iemAImpl_rcr_u16,
471 iemAImpl_rcr_u32,
472 iemAImpl_rcr_u64
473};
474
475/** Function table for the SHL instruction. */
476static const IEMOPSHIFTSIZES g_iemAImpl_shl =
477{
478 iemAImpl_shl_u8,
479 iemAImpl_shl_u16,
480 iemAImpl_shl_u32,
481 iemAImpl_shl_u64
482};
483
484/** Function table for the SHR instruction. */
485static const IEMOPSHIFTSIZES g_iemAImpl_shr =
486{
487 iemAImpl_shr_u8,
488 iemAImpl_shr_u16,
489 iemAImpl_shr_u32,
490 iemAImpl_shr_u64
491};
492
493/** Function table for the SAR instruction. */
494static const IEMOPSHIFTSIZES g_iemAImpl_sar =
495{
496 iemAImpl_sar_u8,
497 iemAImpl_sar_u16,
498 iemAImpl_sar_u32,
499 iemAImpl_sar_u64
500};
501
502
503/** Function table for the MUL instruction. */
504static const IEMOPMULDIVSIZES g_iemAImpl_mul =
505{
506 iemAImpl_mul_u8,
507 iemAImpl_mul_u16,
508 iemAImpl_mul_u32,
509 iemAImpl_mul_u64
510};
511
512/** Function table for the IMUL instruction working implicitly on rAX. */
513static const IEMOPMULDIVSIZES g_iemAImpl_imul =
514{
515 iemAImpl_imul_u8,
516 iemAImpl_imul_u16,
517 iemAImpl_imul_u32,
518 iemAImpl_imul_u64
519};
520
521/** Function table for the DIV instruction. */
522static const IEMOPMULDIVSIZES g_iemAImpl_div =
523{
524 iemAImpl_div_u8,
525 iemAImpl_div_u16,
526 iemAImpl_div_u32,
527 iemAImpl_div_u64
528};
529
530/** Function table for the MUL instruction. */
531static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
532{
533 iemAImpl_idiv_u8,
534 iemAImpl_idiv_u16,
535 iemAImpl_idiv_u32,
536 iemAImpl_idiv_u64
537};
538
539/** Function table for the SHLD instruction */
540static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
541{
542 iemAImpl_shld_u16,
543 iemAImpl_shld_u32,
544 iemAImpl_shld_u64,
545};
546
547/** Function table for the SHRD instruction */
548static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
549{
550 iemAImpl_shrd_u16,
551 iemAImpl_shrd_u32,
552 iemAImpl_shrd_u64,
553};
554
555
556/*******************************************************************************
557* Internal Functions *
558*******************************************************************************/
559static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
560/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
561static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
562static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
563static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
564static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
565static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
566static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
567static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
568static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
569static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
570static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
571static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
572static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
573static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
574static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
575static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
576static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
577static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
578static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
579static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
580static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
581static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
582static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
583
584#ifdef IEM_VERIFICATION_MODE
585static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
586#endif
587static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
588static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
589
590
591/**
592 * Initializes the decoder state.
593 *
594 * @param pIemCpu The per CPU IEM state.
595 */
596DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
597{
598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
599
600 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
601 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
602 ? IEMMODE_64BIT
603 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
604 ? IEMMODE_32BIT
605 : IEMMODE_16BIT;
606 pIemCpu->enmCpuMode = enmMode;
607 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
608 pIemCpu->enmEffAddrMode = enmMode;
609 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
610 pIemCpu->enmEffOpSize = enmMode;
611 pIemCpu->fPrefixes = 0;
612 pIemCpu->uRexReg = 0;
613 pIemCpu->uRexB = 0;
614 pIemCpu->uRexIndex = 0;
615 pIemCpu->iEffSeg = X86_SREG_DS;
616 pIemCpu->offOpcode = 0;
617 pIemCpu->cbOpcode = 0;
618 pIemCpu->cActiveMappings = 0;
619 pIemCpu->iNextMapping = 0;
620}
621
622
623/**
624 * Prefetch opcodes the first time when starting executing.
625 *
626 * @returns Strict VBox status code.
627 * @param pIemCpu The IEM state.
628 */
629static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
630{
631#ifdef IEM_VERIFICATION_MODE
632 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
633#endif
634 iemInitDecoder(pIemCpu);
635
636 /*
637 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
638 *
639 * First translate CS:rIP to a physical address.
640 */
641 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
642 uint32_t cbToTryRead;
643 RTGCPTR GCPtrPC;
644 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
645 {
646 cbToTryRead = PAGE_SIZE;
647 GCPtrPC = pCtx->rip;
648 if (!IEM_IS_CANONICAL(GCPtrPC))
649 return iemRaiseGeneralProtectionFault0(pIemCpu);
650 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
651 }
652 else
653 {
654 uint32_t GCPtrPC32 = pCtx->eip;
655 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
656 if (GCPtrPC32 > pCtx->csHid.u32Limit)
657 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
658 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
659 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
660 }
661
662 RTGCPHYS GCPhys;
663 uint64_t fFlags;
664 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
665 if (RT_FAILURE(rc))
666 {
667 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
668 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
669 }
670 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
671 {
672 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
673 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
674 }
675 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
676 {
677 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
678 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
679 }
680 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
681 /** @todo Check reserved bits and such stuff. PGM is better at doing
682 * that, so do it when implementing the guest virtual address
683 * TLB... */
684
685#ifdef IEM_VERIFICATION_MODE
686 /*
687 * Optimistic optimization: Use unconsumed opcode bytes from the previous
688 * instruction.
689 */
690 /** @todo optimize this differently by not using PGMPhysRead. */
691 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
692 pIemCpu->GCPhysOpcodes = GCPhys;
693 if ( offPrevOpcodes < cbOldOpcodes
694 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
695 {
696 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
697 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
698 pIemCpu->cbOpcode = cbNew;
699 return VINF_SUCCESS;
700 }
701#endif
702
703 /*
704 * Read the bytes at this address.
705 */
706 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
707 if (cbToTryRead > cbLeftOnPage)
708 cbToTryRead = cbLeftOnPage;
709 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
710 cbToTryRead = sizeof(pIemCpu->abOpcode);
711 /** @todo patch manager */
712 if (!pIemCpu->fByPassHandlers)
713 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
714 else
715 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
716 if (rc != VINF_SUCCESS)
717 {
718 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc\n", GCPtrPC, rc));
719 return rc;
720 }
721 pIemCpu->cbOpcode = cbToTryRead;
722
723 return VINF_SUCCESS;
724}
725
726
727/**
728 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
729 * exception if it fails.
730 *
731 * @returns Strict VBox status code.
732 * @param pIemCpu The IEM state.
733 * @param cbMin Where to return the opcode byte.
734 */
735static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
736{
737 /*
738 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
739 *
740 * First translate CS:rIP to a physical address.
741 */
742 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
743 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
744 uint32_t cbToTryRead;
745 RTGCPTR GCPtrNext;
746 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
747 {
748 cbToTryRead = PAGE_SIZE;
749 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
750 if (!IEM_IS_CANONICAL(GCPtrNext))
751 return iemRaiseGeneralProtectionFault0(pIemCpu);
752 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
753 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
754 }
755 else
756 {
757 uint32_t GCPtrNext32 = pCtx->eip;
758 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
759 GCPtrNext32 += pIemCpu->cbOpcode;
760 if (GCPtrNext32 > pCtx->csHid.u32Limit)
761 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
762 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
763 if (cbToTryRead < cbMin - cbLeft)
764 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
765 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
766 }
767
768 RTGCPHYS GCPhys;
769 uint64_t fFlags;
770 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
771 if (RT_FAILURE(rc))
772 {
773 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
774 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
775 }
776 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
777 {
778 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
779 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
780 }
781 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
782 {
783 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
784 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
785 }
786 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
787 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
788 /** @todo Check reserved bits and such stuff. PGM is better at doing
789 * that, so do it when implementing the guest virtual address
790 * TLB... */
791
792 /*
793 * Read the bytes at this address.
794 */
795 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
796 if (cbToTryRead > cbLeftOnPage)
797 cbToTryRead = cbLeftOnPage;
798 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
799 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
800 Assert(cbToTryRead >= cbMin - cbLeft);
801 if (!pIemCpu->fByPassHandlers)
802 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
803 else
804 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
805 if (rc != VINF_SUCCESS)
806 {
807 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc\n", GCPtrNext, rc));
808 return rc;
809 }
810 pIemCpu->cbOpcode += cbToTryRead;
811 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
812
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
819 *
820 * @returns Strict VBox status code.
821 * @param pIemCpu The IEM state.
822 * @param pb Where to return the opcode byte.
823 */
824DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
825{
826 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
827 if (rcStrict == VINF_SUCCESS)
828 {
829 uint8_t offOpcode = pIemCpu->offOpcode;
830 *pb = pIemCpu->abOpcode[offOpcode];
831 pIemCpu->offOpcode = offOpcode + 1;
832 }
833 else
834 *pb = 0;
835 return rcStrict;
836}
837
838
839/**
840 * Fetches the next opcode byte.
841 *
842 * @returns Strict VBox status code.
843 * @param pIemCpu The IEM state.
844 * @param pu8 Where to return the opcode byte.
845 */
846DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
847{
848 uint8_t const offOpcode = pIemCpu->offOpcode;
849 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
850 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
851
852 *pu8 = pIemCpu->abOpcode[offOpcode];
853 pIemCpu->offOpcode = offOpcode + 1;
854 return VINF_SUCCESS;
855}
856
857
858/**
859 * Fetches the next opcode byte, returns automatically on failure.
860 *
861 * @param a_pu8 Where to return the opcode byte.
862 * @remark Implicitly references pIemCpu.
863 */
864#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
865 do \
866 { \
867 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
868 if (rcStrict2 != VINF_SUCCESS) \
869 return rcStrict2; \
870 } while (0)
871
872
873/**
874 * Fetches the next signed byte from the opcode stream.
875 *
876 * @returns Strict VBox status code.
877 * @param pIemCpu The IEM state.
878 * @param pi8 Where to return the signed byte.
879 */
880DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
881{
882 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
883}
884
885
886/**
887 * Fetches the next signed byte from the opcode stream, returning automatically
888 * on failure.
889 *
890 * @param pi8 Where to return the signed byte.
891 * @remark Implicitly references pIemCpu.
892 */
893#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
894 do \
895 { \
896 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
897 if (rcStrict2 != VINF_SUCCESS) \
898 return rcStrict2; \
899 } while (0)
900
901
902/**
903 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
904 *
905 * @returns Strict VBox status code.
906 * @param pIemCpu The IEM state.
907 * @param pu16 Where to return the opcode dword.
908 */
909DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
910{
911 uint8_t u8;
912 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
913 if (rcStrict == VINF_SUCCESS)
914 *pu16 = (int8_t)u8;
915 return rcStrict;
916}
917
918
919/**
920 * Fetches the next signed byte from the opcode stream, extending it to
921 * unsigned 16-bit.
922 *
923 * @returns Strict VBox status code.
924 * @param pIemCpu The IEM state.
925 * @param pu16 Where to return the unsigned word.
926 */
927DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
928{
929 uint8_t const offOpcode = pIemCpu->offOpcode;
930 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
931 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
932
933 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
934 pIemCpu->offOpcode = offOpcode + 1;
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Fetches the next signed byte from the opcode stream and sign-extending it to
941 * a word, returning automatically on failure.
942 *
943 * @param pu16 Where to return the word.
944 * @remark Implicitly references pIemCpu.
945 */
946#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
947 do \
948 { \
949 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
950 if (rcStrict2 != VINF_SUCCESS) \
951 return rcStrict2; \
952 } while (0)
953
954
955/**
956 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
957 *
958 * @returns Strict VBox status code.
959 * @param pIemCpu The IEM state.
960 * @param pu16 Where to return the opcode word.
961 */
962DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
963{
964 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
965 if (rcStrict == VINF_SUCCESS)
966 {
967 uint8_t offOpcode = pIemCpu->offOpcode;
968 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
969 pIemCpu->offOpcode = offOpcode + 2;
970 }
971 else
972 *pu16 = 0;
973 return rcStrict;
974}
975
976
977/**
978 * Fetches the next opcode word.
979 *
980 * @returns Strict VBox status code.
981 * @param pIemCpu The IEM state.
982 * @param pu16 Where to return the opcode word.
983 */
984DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
985{
986 uint8_t const offOpcode = pIemCpu->offOpcode;
987 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
988 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
989
990 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
991 pIemCpu->offOpcode = offOpcode + 2;
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Fetches the next opcode word, returns automatically on failure.
998 *
999 * @param a_pu16 Where to return the opcode word.
1000 * @remark Implicitly references pIemCpu.
1001 */
1002#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1003 do \
1004 { \
1005 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1006 if (rcStrict2 != VINF_SUCCESS) \
1007 return rcStrict2; \
1008 } while (0)
1009
1010
1011/**
1012 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1013 *
1014 * @returns Strict VBox status code.
1015 * @param pIemCpu The IEM state.
1016 * @param pu32 Where to return the opcode double word.
1017 */
1018DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1019{
1020 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1021 if (rcStrict == VINF_SUCCESS)
1022 {
1023 uint8_t offOpcode = pIemCpu->offOpcode;
1024 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1025 pIemCpu->offOpcode = offOpcode + 2;
1026 }
1027 else
1028 *pu32 = 0;
1029 return rcStrict;
1030}
1031
1032
1033/**
1034 * Fetches the next opcode word, zero extending it to a double word.
1035 *
1036 * @returns Strict VBox status code.
1037 * @param pIemCpu The IEM state.
1038 * @param pu32 Where to return the opcode double word.
1039 */
1040DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1041{
1042 uint8_t const offOpcode = pIemCpu->offOpcode;
1043 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1044 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1045
1046 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1047 pIemCpu->offOpcode = offOpcode + 2;
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Fetches the next opcode word and zero extends it to a double word, returns
1054 * automatically on failure.
1055 *
1056 * @param a_pu32 Where to return the opcode double word.
1057 * @remark Implicitly references pIemCpu.
1058 */
1059#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1060 do \
1061 { \
1062 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1063 if (rcStrict2 != VINF_SUCCESS) \
1064 return rcStrict2; \
1065 } while (0)
1066
1067
1068/**
1069 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1070 *
1071 * @returns Strict VBox status code.
1072 * @param pIemCpu The IEM state.
1073 * @param pu64 Where to return the opcode quad word.
1074 */
1075DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1076{
1077 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1078 if (rcStrict == VINF_SUCCESS)
1079 {
1080 uint8_t offOpcode = pIemCpu->offOpcode;
1081 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1082 pIemCpu->offOpcode = offOpcode + 2;
1083 }
1084 else
1085 *pu64 = 0;
1086 return rcStrict;
1087}
1088
1089
1090/**
1091 * Fetches the next opcode word, zero extending it to a quad word.
1092 *
1093 * @returns Strict VBox status code.
1094 * @param pIemCpu The IEM state.
1095 * @param pu64 Where to return the opcode quad word.
1096 */
1097DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1098{
1099 uint8_t const offOpcode = pIemCpu->offOpcode;
1100 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1101 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1102
1103 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1104 pIemCpu->offOpcode = offOpcode + 2;
1105 return VINF_SUCCESS;
1106}
1107
1108
1109/**
1110 * Fetches the next opcode word and zero extends it to a quad word, returns
1111 * automatically on failure.
1112 *
1113 * @param a_pu64 Where to return the opcode quad word.
1114 * @remark Implicitly references pIemCpu.
1115 */
1116#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1117 do \
1118 { \
1119 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1120 if (rcStrict2 != VINF_SUCCESS) \
1121 return rcStrict2; \
1122 } while (0)
1123
1124
1125/**
1126 * Fetches the next signed word from the opcode stream.
1127 *
1128 * @returns Strict VBox status code.
1129 * @param pIemCpu The IEM state.
1130 * @param pi16 Where to return the signed word.
1131 */
1132DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1133{
1134 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1135}
1136
1137
1138/**
1139 * Fetches the next signed word from the opcode stream, returning automatically
1140 * on failure.
1141 *
1142 * @param pi16 Where to return the signed word.
1143 * @remark Implicitly references pIemCpu.
1144 */
1145#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1146 do \
1147 { \
1148 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1149 if (rcStrict2 != VINF_SUCCESS) \
1150 return rcStrict2; \
1151 } while (0)
1152
1153
1154/**
1155 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1156 *
1157 * @returns Strict VBox status code.
1158 * @param pIemCpu The IEM state.
1159 * @param pu32 Where to return the opcode dword.
1160 */
1161DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1162{
1163 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1164 if (rcStrict == VINF_SUCCESS)
1165 {
1166 uint8_t offOpcode = pIemCpu->offOpcode;
1167 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1168 pIemCpu->abOpcode[offOpcode + 1],
1169 pIemCpu->abOpcode[offOpcode + 2],
1170 pIemCpu->abOpcode[offOpcode + 3]);
1171 pIemCpu->offOpcode = offOpcode + 4;
1172 }
1173 else
1174 *pu32 = 0;
1175 return rcStrict;
1176}
1177
1178
1179/**
1180 * Fetches the next opcode dword.
1181 *
1182 * @returns Strict VBox status code.
1183 * @param pIemCpu The IEM state.
1184 * @param pu32 Where to return the opcode double word.
1185 */
1186DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1187{
1188 uint8_t const offOpcode = pIemCpu->offOpcode;
1189 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1190 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1191
1192 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1193 pIemCpu->abOpcode[offOpcode + 1],
1194 pIemCpu->abOpcode[offOpcode + 2],
1195 pIemCpu->abOpcode[offOpcode + 3]);
1196 pIemCpu->offOpcode = offOpcode + 4;
1197 return VINF_SUCCESS;
1198}
1199
1200
1201/**
1202 * Fetches the next opcode dword, returns automatically on failure.
1203 *
1204 * @param a_pu32 Where to return the opcode dword.
1205 * @remark Implicitly references pIemCpu.
1206 */
1207#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1208 do \
1209 { \
1210 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1211 if (rcStrict2 != VINF_SUCCESS) \
1212 return rcStrict2; \
1213 } while (0)
1214
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pIemCpu The IEM state.
1221 * @param pu32 Where to return the opcode dword.
1222 */
1223DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pIemCpu->offOpcode;
1229 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1230 pIemCpu->abOpcode[offOpcode + 1],
1231 pIemCpu->abOpcode[offOpcode + 2],
1232 pIemCpu->abOpcode[offOpcode + 3]);
1233 pIemCpu->offOpcode = offOpcode + 4;
1234 }
1235 else
1236 *pu64 = 0;
1237 return rcStrict;
1238}
1239
1240
1241/**
1242 * Fetches the next opcode dword, zero extending it to a quad word.
1243 *
1244 * @returns Strict VBox status code.
1245 * @param pIemCpu The IEM state.
1246 * @param pu64 Where to return the opcode quad word.
1247 */
1248DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1249{
1250 uint8_t const offOpcode = pIemCpu->offOpcode;
1251 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1252 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1253
1254 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1255 pIemCpu->abOpcode[offOpcode + 1],
1256 pIemCpu->abOpcode[offOpcode + 2],
1257 pIemCpu->abOpcode[offOpcode + 3]);
1258 pIemCpu->offOpcode = offOpcode + 4;
1259 return VINF_SUCCESS;
1260}
1261
1262
1263/**
1264 * Fetches the next opcode dword and zero extends it to a quad word, returns
1265 * automatically on failure.
1266 *
1267 * @param a_pu64 Where to return the opcode quad word.
1268 * @remark Implicitly references pIemCpu.
1269 */
1270#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1271 do \
1272 { \
1273 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1274 if (rcStrict2 != VINF_SUCCESS) \
1275 return rcStrict2; \
1276 } while (0)
1277
1278
1279/**
1280 * Fetches the next signed double word from the opcode stream.
1281 *
1282 * @returns Strict VBox status code.
1283 * @param pIemCpu The IEM state.
1284 * @param pi32 Where to return the signed double word.
1285 */
1286DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1287{
1288 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1289}
1290
1291/**
1292 * Fetches the next signed double word from the opcode stream, returning
1293 * automatically on failure.
1294 *
1295 * @param pi32 Where to return the signed double word.
1296 * @remark Implicitly references pIemCpu.
1297 */
1298#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1299 do \
1300 { \
1301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1302 if (rcStrict2 != VINF_SUCCESS) \
1303 return rcStrict2; \
1304 } while (0)
1305
1306
1307/**
1308 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1309 *
1310 * @returns Strict VBox status code.
1311 * @param pIemCpu The IEM state.
1312 * @param pu64 Where to return the opcode qword.
1313 */
1314DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1315{
1316 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1317 if (rcStrict == VINF_SUCCESS)
1318 {
1319 uint8_t offOpcode = pIemCpu->offOpcode;
1320 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1321 pIemCpu->abOpcode[offOpcode + 1],
1322 pIemCpu->abOpcode[offOpcode + 2],
1323 pIemCpu->abOpcode[offOpcode + 3]);
1324 pIemCpu->offOpcode = offOpcode + 4;
1325 }
1326 else
1327 *pu64 = 0;
1328 return rcStrict;
1329}
1330
1331
1332/**
1333 * Fetches the next opcode dword, sign extending it into a quad word.
1334 *
1335 * @returns Strict VBox status code.
1336 * @param pIemCpu The IEM state.
1337 * @param pu64 Where to return the opcode quad word.
1338 */
1339DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1340{
1341 uint8_t const offOpcode = pIemCpu->offOpcode;
1342 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1343 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1344
1345 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1346 pIemCpu->abOpcode[offOpcode + 1],
1347 pIemCpu->abOpcode[offOpcode + 2],
1348 pIemCpu->abOpcode[offOpcode + 3]);
1349 *pu64 = i32;
1350 pIemCpu->offOpcode = offOpcode + 4;
1351 return VINF_SUCCESS;
1352}
1353
1354
1355/**
1356 * Fetches the next opcode double word and sign extends it to a quad word,
1357 * returns automatically on failure.
1358 *
1359 * @param a_pu64 Where to return the opcode quad word.
1360 * @remark Implicitly references pIemCpu.
1361 */
1362#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1363 do \
1364 { \
1365 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1366 if (rcStrict2 != VINF_SUCCESS) \
1367 return rcStrict2; \
1368 } while (0)
1369
1370
1371/**
1372 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pIemCpu The IEM state.
1376 * @param pu64 Where to return the opcode qword.
1377 */
1378DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1379{
1380 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1381 if (rcStrict == VINF_SUCCESS)
1382 {
1383 uint8_t offOpcode = pIemCpu->offOpcode;
1384 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1385 pIemCpu->abOpcode[offOpcode + 1],
1386 pIemCpu->abOpcode[offOpcode + 2],
1387 pIemCpu->abOpcode[offOpcode + 3],
1388 pIemCpu->abOpcode[offOpcode + 4],
1389 pIemCpu->abOpcode[offOpcode + 5],
1390 pIemCpu->abOpcode[offOpcode + 6],
1391 pIemCpu->abOpcode[offOpcode + 7]);
1392 pIemCpu->offOpcode = offOpcode + 8;
1393 }
1394 else
1395 *pu64 = 0;
1396 return rcStrict;
1397}
1398
1399
1400/**
1401 * Fetches the next opcode qword.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pIemCpu The IEM state.
1405 * @param pu64 Where to return the opcode qword.
1406 */
1407DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1408{
1409 uint8_t const offOpcode = pIemCpu->offOpcode;
1410 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1411 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1412
1413 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1414 pIemCpu->abOpcode[offOpcode + 1],
1415 pIemCpu->abOpcode[offOpcode + 2],
1416 pIemCpu->abOpcode[offOpcode + 3],
1417 pIemCpu->abOpcode[offOpcode + 4],
1418 pIemCpu->abOpcode[offOpcode + 5],
1419 pIemCpu->abOpcode[offOpcode + 6],
1420 pIemCpu->abOpcode[offOpcode + 7]);
1421 pIemCpu->offOpcode = offOpcode + 8;
1422 return VINF_SUCCESS;
1423}
1424
1425
1426/**
1427 * Fetches the next opcode quad word, returns automatically on failure.
1428 *
1429 * @param a_pu64 Where to return the opcode quad word.
1430 * @remark Implicitly references pIemCpu.
1431 */
1432#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1433 do \
1434 { \
1435 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1436 if (rcStrict2 != VINF_SUCCESS) \
1437 return rcStrict2; \
1438 } while (0)
1439
1440
1441/** @name Misc Worker Functions.
1442 * @{
1443 */
1444
1445
1446/**
1447 * Validates a new SS segment.
1448 *
1449 * @returns VBox strict status code.
1450 * @param pIemCpu The IEM per CPU instance data.
1451 * @param pCtx The CPU context.
1452 * @param NewSS The new SS selctor.
1453 * @param uCpl The CPL to load the stack for.
1454 * @param pDesc Where to return the descriptor.
1455 */
1456static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1457{
1458 NOREF(pCtx);
1459
1460 /* Null selectors are not allowed (we're not called for dispatching
1461 interrupts with SS=0 in long mode). */
1462 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1463 {
1464 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1465 return iemRaiseGeneralProtectionFault0(pIemCpu);
1466 }
1467
1468 /*
1469 * Read the descriptor.
1470 */
1471 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1472 if (rcStrict != VINF_SUCCESS)
1473 return rcStrict;
1474
1475 /*
1476 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1477 */
1478 if (!pDesc->Legacy.Gen.u1DescType)
1479 {
1480 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1482 }
1483
1484 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1485 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1486 {
1487 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1488 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1489 }
1490 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1491 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1492 {
1493 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1494 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1495 }
1496 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1497 if ((NewSS & X86_SEL_RPL) != uCpl)
1498 {
1499 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1500 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1501 }
1502 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1503 {
1504 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1505 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1506 }
1507
1508 /* Is it there? */
1509 /** @todo testcase: Is this checked before the canonical / limit check below? */
1510 if (!pDesc->Legacy.Gen.u1Present)
1511 {
1512 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1513 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1514 }
1515
1516 return VINF_SUCCESS;
1517}
1518
1519
1520/** @} */
1521
1522/** @name Raising Exceptions.
1523 *
1524 * @{
1525 */
1526
1527/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1528 * @{ */
1529/** CPU exception. */
1530#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1531/** External interrupt (from PIC, APIC, whatever). */
1532#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1533/** Software interrupt (int, into or bound). */
1534#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1535/** Takes an error code. */
1536#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1537/** Takes a CR2. */
1538#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1539/** Generated by the breakpoint instruction. */
1540#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1541/** @} */
1542
1543/**
1544 * Loads the specified stack far pointer from the TSS.
1545 *
1546 * @returns VBox strict status code.
1547 * @param pIemCpu The IEM per CPU instance data.
1548 * @param pCtx The CPU context.
1549 * @param uCpl The CPL to load the stack for.
1550 * @param pSelSS Where to return the new stack segment.
1551 * @param puEsp Where to return the new stack pointer.
1552 */
1553static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1554 PRTSEL pSelSS, uint32_t *puEsp)
1555{
1556 VBOXSTRICTRC rcStrict;
1557 Assert(uCpl < 4);
1558 *puEsp = 0; /* make gcc happy */
1559 *pSelSS = 0; /* make gcc happy */
1560
1561 switch (pCtx->trHid.Attr.n.u4Type)
1562 {
1563 /*
1564 * 16-bit TSS (X86TSS16).
1565 */
1566 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1567 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1568 {
1569 uint32_t off = uCpl * 4 + 2;
1570 if (off + 4 > pCtx->trHid.u32Limit)
1571 {
1572 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1573 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1574 }
1575
1576 uint32_t u32Tmp = 0; /* gcc maybe... */
1577 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1578 if (rcStrict == VINF_SUCCESS)
1579 {
1580 *puEsp = RT_LOWORD(u32Tmp);
1581 *pSelSS = RT_HIWORD(u32Tmp);
1582 return VINF_SUCCESS;
1583 }
1584 break;
1585 }
1586
1587 /*
1588 * 32-bit TSS (X86TSS32).
1589 */
1590 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1591 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1592 {
1593 uint32_t off = uCpl * 8 + 4;
1594 if (off + 7 > pCtx->trHid.u32Limit)
1595 {
1596 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1597 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1598 }
1599
1600 uint64_t u64Tmp;
1601 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1602 if (rcStrict == VINF_SUCCESS)
1603 {
1604 *puEsp = u64Tmp & UINT32_MAX;
1605 *pSelSS = (RTSEL)(u64Tmp >> 32);
1606 return VINF_SUCCESS;
1607 }
1608 break;
1609 }
1610
1611 default:
1612 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1613 }
1614 return rcStrict;
1615}
1616
1617
1618/**
1619 * Adjust the CPU state according to the exception being raised.
1620 *
1621 * @param pCtx The CPU context.
1622 * @param u8Vector The exception that has been raised.
1623 */
1624DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1625{
1626 switch (u8Vector)
1627 {
1628 case X86_XCPT_DB:
1629 pCtx->dr[7] &= ~X86_DR7_GD;
1630 break;
1631 /** @todo Read the AMD and Intel exception reference... */
1632 }
1633}
1634
1635
1636/**
1637 * Implements exceptions and interrupts for real mode.
1638 *
1639 * @returns VBox strict status code.
1640 * @param pIemCpu The IEM per CPU instance data.
1641 * @param pCtx The CPU context.
1642 * @param cbInstr The number of bytes to offset rIP by in the return
1643 * address.
1644 * @param u8Vector The interrupt / exception vector number.
1645 * @param fFlags The flags.
1646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1648 */
1649static VBOXSTRICTRC
1650iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1651 PCPUMCTX pCtx,
1652 uint8_t cbInstr,
1653 uint8_t u8Vector,
1654 uint32_t fFlags,
1655 uint16_t uErr,
1656 uint64_t uCr2)
1657{
1658 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1659 NOREF(uErr); NOREF(uCr2);
1660
1661 /*
1662 * Read the IDT entry.
1663 */
1664 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1665 {
1666 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1667 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1668 }
1669 RTFAR16 Idte;
1670 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1671 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1672 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1673 return rcStrict;
1674
1675 /*
1676 * Push the stack frame.
1677 */
1678 uint16_t *pu16Frame;
1679 uint64_t uNewRsp;
1680 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1681 if (rcStrict != VINF_SUCCESS)
1682 return rcStrict;
1683
1684 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1685 pu16Frame[1] = (uint16_t)pCtx->cs;
1686 pu16Frame[0] = pCtx->ip + cbInstr;
1687 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1688 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1689 return rcStrict;
1690
1691 /*
1692 * Load the vector address into cs:ip and make exception specific state
1693 * adjustments.
1694 */
1695 pCtx->cs = Idte.sel;
1696 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1697 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1698 pCtx->rip = Idte.off;
1699 pCtx->eflags.Bits.u1IF = 0;
1700
1701 /** @todo do we actually do this in real mode? */
1702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1703 iemRaiseXcptAdjustState(pCtx, u8Vector);
1704
1705 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1706}
1707
1708
1709/**
1710 * Implements exceptions and interrupts for protected mode.
1711 *
1712 * @returns VBox strict status code.
1713 * @param pIemCpu The IEM per CPU instance data.
1714 * @param pCtx The CPU context.
1715 * @param cbInstr The number of bytes to offset rIP by in the return
1716 * address.
1717 * @param u8Vector The interrupt / exception vector number.
1718 * @param fFlags The flags.
1719 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1720 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1721 */
1722static VBOXSTRICTRC
1723iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1724 PCPUMCTX pCtx,
1725 uint8_t cbInstr,
1726 uint8_t u8Vector,
1727 uint32_t fFlags,
1728 uint16_t uErr,
1729 uint64_t uCr2)
1730{
1731 NOREF(cbInstr);
1732
1733 /*
1734 * Read the IDT entry.
1735 */
1736 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1737 {
1738 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1739 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1740 }
1741 X86DESC Idte;
1742 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
1743 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1744 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1745 return rcStrict;
1746 Log4(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
1747 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1748 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
1749
1750 /*
1751 * Check the descriptor type, DPL and such.
1752 * ASSUMES this is done in the same order as described for call-gate calls.
1753 */
1754 if (Idte.Gate.u1DescType)
1755 {
1756 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1757 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1758 }
1759 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1760 switch (Idte.Gate.u4Type)
1761 {
1762 case X86_SEL_TYPE_SYS_UNDEFINED:
1763 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1764 case X86_SEL_TYPE_SYS_LDT:
1765 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1766 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1767 case X86_SEL_TYPE_SYS_UNDEFINED2:
1768 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1769 case X86_SEL_TYPE_SYS_UNDEFINED3:
1770 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1771 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1772 case X86_SEL_TYPE_SYS_UNDEFINED4:
1773 {
1774 /** @todo check what actually happens when the type is wrong...
1775 * esp. call gates. */
1776 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1777 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1778 }
1779
1780 case X86_SEL_TYPE_SYS_286_INT_GATE:
1781 case X86_SEL_TYPE_SYS_386_INT_GATE:
1782 fEflToClear |= X86_EFL_IF;
1783 break;
1784
1785 case X86_SEL_TYPE_SYS_TASK_GATE:
1786 /** @todo task gates. */
1787 AssertFailedReturn(VERR_NOT_SUPPORTED);
1788
1789 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1790 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1791 break;
1792
1793 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1794 }
1795
1796 /* Check DPL against CPL if applicable. */
1797 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1798 {
1799 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1800 {
1801 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1802 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1803 }
1804 }
1805
1806 /* Is it there? */
1807 if (!Idte.Gate.u1Present)
1808 {
1809 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1810 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1811 }
1812
1813 /* A null CS is bad. */
1814 RTSEL NewCS = Idte.Gate.u16Sel;
1815 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1816 {
1817 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1818 return iemRaiseGeneralProtectionFault0(pIemCpu);
1819 }
1820
1821 /* Fetch the descriptor for the new CS. */
1822 IEMSELDESC DescCS;
1823 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1824 if (rcStrict != VINF_SUCCESS)
1825 {
1826 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1827 return rcStrict;
1828 }
1829
1830 /* Must be a code segment. */
1831 if (!DescCS.Legacy.Gen.u1DescType)
1832 {
1833 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1834 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1835 }
1836 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1837 {
1838 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1839 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1840 }
1841
1842 /* Don't allow lowering the privilege level. */
1843 /** @todo Does the lowering of privileges apply to software interrupts
1844 * only? This has bearings on the more-privileged or
1845 * same-privilege stack behavior further down. A testcase would
1846 * be nice. */
1847 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1848 {
1849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1850 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1851 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1852 }
1853 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1854
1855 /* Check the new EIP against the new CS limit. */
1856 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1857 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1858 ? Idte.Gate.u16OffsetLow
1859 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1860 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1861 if (DescCS.Legacy.Gen.u1Granularity)
1862 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1863 if (uNewEip > cbLimitCS)
1864 {
1865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1866 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1867 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1868 }
1869
1870 /* Make sure the selector is present. */
1871 if (!DescCS.Legacy.Gen.u1Present)
1872 {
1873 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1874 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1875 }
1876
1877 /*
1878 * If the privilege level changes, we need to get a new stack from the TSS.
1879 * This in turns means validating the new SS and ESP...
1880 */
1881 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1882 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1883 if (uNewCpl != pIemCpu->uCpl)
1884 {
1885 RTSEL NewSS;
1886 uint32_t uNewEsp;
1887 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1888 if (rcStrict != VINF_SUCCESS)
1889 return rcStrict;
1890
1891 IEMSELDESC DescSS;
1892 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1893 if (rcStrict != VINF_SUCCESS)
1894 return rcStrict;
1895
1896 /* Check that there is sufficient space for the stack frame. */
1897 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1898 if (DescSS.Legacy.Gen.u1Granularity)
1899 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1900 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1901
1902 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1903 if ( uNewEsp - 1 > cbLimitSS
1904 || uNewEsp < cbStackFrame)
1905 {
1906 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1907 u8Vector, NewSS, uNewEsp, cbStackFrame));
1908 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1909 }
1910
1911 /*
1912 * Start making changes.
1913 */
1914
1915 /* Create the stack frame. */
1916 RTPTRUNION uStackFrame;
1917 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1918 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
1919 if (rcStrict != VINF_SUCCESS)
1920 return rcStrict;
1921 void * const pvStackFrame = uStackFrame.pv;
1922
1923 if (fFlags & IEM_XCPT_FLAGS_ERR)
1924 *uStackFrame.pu32++ = uErr;
1925 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1926 ? pCtx->eip + cbInstr : pCtx->eip;
1927 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1928 uStackFrame.pu32[2] = pCtx->eflags.u;
1929 uStackFrame.pu32[3] = pCtx->esp;
1930 uStackFrame.pu32[4] = pCtx->ss;
1931 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934
1935 /* Mark the selectors 'accessed' (hope this is the correct time). */
1936 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1937 * after pushing the stack frame? (Write protect the gdt + stack to
1938 * find out.) */
1939 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1940 {
1941 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1942 if (rcStrict != VINF_SUCCESS)
1943 return rcStrict;
1944 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1945 }
1946
1947 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1948 {
1949 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1950 if (rcStrict != VINF_SUCCESS)
1951 return rcStrict;
1952 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1953 }
1954
1955 /*
1956 * Start commint the register changes (joins with the DPL=CPL branch).
1957 */
1958 pCtx->ss = NewSS;
1959 pCtx->ssHid.u32Limit = cbLimitSS;
1960 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
1961 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1962 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1963 pIemCpu->uCpl = uNewCpl;
1964 }
1965 /*
1966 * Same privilege, no stack change and smaller stack frame.
1967 */
1968 else
1969 {
1970 uint64_t uNewRsp;
1971 RTPTRUNION uStackFrame;
1972 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
1973 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
1974 if (rcStrict != VINF_SUCCESS)
1975 return rcStrict;
1976 void * const pvStackFrame = uStackFrame.pv;
1977
1978 if (fFlags & IEM_XCPT_FLAGS_ERR)
1979 *uStackFrame.pu32++ = uErr;
1980 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1981 ? pCtx->eip + cbInstr : pCtx->eip;
1982 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1983 uStackFrame.pu32[2] = pCtx->eflags.u;
1984 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
1985 if (rcStrict != VINF_SUCCESS)
1986 return rcStrict;
1987
1988 /* Mark the CS selector as 'accessed'. */
1989 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1990 {
1991 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1992 if (rcStrict != VINF_SUCCESS)
1993 return rcStrict;
1994 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1995 }
1996
1997 /*
1998 * Start committing the register changes (joins with the other branch).
1999 */
2000 pCtx->rsp = uNewRsp;
2001 }
2002
2003 /* ... register committing continues. */
2004 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2005 pCtx->csHid.u32Limit = cbLimitCS;
2006 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2007 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2008
2009 pCtx->rip = uNewEip;
2010 pCtx->rflags.u &= ~fEflToClear;
2011
2012 if (fFlags & IEM_XCPT_FLAGS_CR2)
2013 pCtx->cr2 = uCr2;
2014
2015 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2016 iemRaiseXcptAdjustState(pCtx, u8Vector);
2017
2018 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2019}
2020
2021
2022/**
2023 * Implements exceptions and interrupts for V8086 mode.
2024 *
2025 * @returns VBox strict status code.
2026 * @param pIemCpu The IEM per CPU instance data.
2027 * @param pCtx The CPU context.
2028 * @param cbInstr The number of bytes to offset rIP by in the return
2029 * address.
2030 * @param u8Vector The interrupt / exception vector number.
2031 * @param fFlags The flags.
2032 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2033 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2034 */
2035static VBOXSTRICTRC
2036iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2037 PCPUMCTX pCtx,
2038 uint8_t cbInstr,
2039 uint8_t u8Vector,
2040 uint32_t fFlags,
2041 uint16_t uErr,
2042 uint64_t uCr2)
2043{
2044 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2045 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
2046 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2047}
2048
2049
2050/**
2051 * Implements exceptions and interrupts for long mode.
2052 *
2053 * @returns VBox strict status code.
2054 * @param pIemCpu The IEM per CPU instance data.
2055 * @param pCtx The CPU context.
2056 * @param cbInstr The number of bytes to offset rIP by in the return
2057 * address.
2058 * @param u8Vector The interrupt / exception vector number.
2059 * @param fFlags The flags.
2060 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2061 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2062 */
2063static VBOXSTRICTRC
2064iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2065 PCPUMCTX pCtx,
2066 uint8_t cbInstr,
2067 uint8_t u8Vector,
2068 uint32_t fFlags,
2069 uint16_t uErr,
2070 uint64_t uCr2)
2071{
2072 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2073 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
2074 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2075}
2076
2077
2078/**
2079 * Implements exceptions and interrupts.
2080 *
2081 * All exceptions and interrupts goes thru this function!
2082 *
2083 * @returns VBox strict status code.
2084 * @param pIemCpu The IEM per CPU instance data.
2085 * @param cbInstr The number of bytes to offset rIP by in the return
2086 * address.
2087 * @param u8Vector The interrupt / exception vector number.
2088 * @param fFlags The flags.
2089 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2090 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2091 */
2092DECL_NO_INLINE(static, VBOXSTRICTRC)
2093iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2094 uint8_t cbInstr,
2095 uint8_t u8Vector,
2096 uint32_t fFlags,
2097 uint16_t uErr,
2098 uint64_t uCr2)
2099{
2100 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2101
2102 /*
2103 * Do recursion accounting.
2104 */
2105 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2106 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2107 if (pIemCpu->cXcptRecursions == 0)
2108 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2109 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2110 else
2111 {
2112 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2113 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2114
2115 /** @todo double and tripple faults. */
2116 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
2117
2118 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2119 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2120 {
2121 ....
2122 } */
2123 }
2124 pIemCpu->cXcptRecursions++;
2125 pIemCpu->uCurXcpt = u8Vector;
2126 pIemCpu->fCurXcpt = fFlags;
2127
2128 /*
2129 * Extensive logging.
2130 */
2131#ifdef LOG_ENABLED
2132 if (LogIs3Enabled())
2133 {
2134 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2135 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2136 char szRegs[4096];
2137 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2138 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2139 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2140 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2141 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2142 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2143 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2144 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2145 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2146 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2147 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2148 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2149 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2150 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2151 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2152 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2153 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2154 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2155 " efer=%016VR{efer}\n"
2156 " pat=%016VR{pat}\n"
2157 " sf_mask=%016VR{sf_mask}\n"
2158 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2159 " lstar=%016VR{lstar}\n"
2160 " star=%016VR{star} cstar=%016VR{cstar}\n"
2161 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2162 );
2163
2164 char szInstr[256];
2165 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2166 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2167 szInstr, sizeof(szInstr), NULL);
2168 Log3(("%s%s\n", szRegs, szInstr));
2169 }
2170#endif /* LOG_ENABLED */
2171
2172 /*
2173 * Call the mode specific worker function.
2174 */
2175 VBOXSTRICTRC rcStrict;
2176 if (!(pCtx->cr0 & X86_CR0_PE))
2177 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2178 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2179 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2180 else if (!pCtx->eflags.Bits.u1VM)
2181 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2182 else
2183 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2184
2185 /*
2186 * Unwind.
2187 */
2188 pIemCpu->cXcptRecursions--;
2189 pIemCpu->uCurXcpt = uPrevXcpt;
2190 pIemCpu->fCurXcpt = fPrevXcpt;
2191 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
2192 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp));
2193 return rcStrict;
2194}
2195
2196
2197/** \#DE - 00. */
2198DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2199{
2200 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2201}
2202
2203
2204/** \#DB - 01. */
2205DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2206{
2207 /** @todo set/clear RF. */
2208 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2209}
2210
2211
2212/** \#UD - 06. */
2213DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2214{
2215 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2216}
2217
2218
2219/** \#NM - 07. */
2220DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2221{
2222 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2223}
2224
2225
2226#ifdef SOME_UNUSED_FUNCTION
2227/** \#TS(err) - 0a. */
2228DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2229{
2230 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2231}
2232#endif
2233
2234
2235/** \#TS(tr) - 0a. */
2236DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2237{
2238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2239 pIemCpu->CTX_SUFF(pCtx)->tr, 0);
2240}
2241
2242
2243/** \#NP(err) - 0b. */
2244DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2245{
2246 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2247}
2248
2249
2250/** \#NP(seg) - 0b. */
2251DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2252{
2253 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2254 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2255}
2256
2257
2258/** \#NP(sel) - 0b. */
2259DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2260{
2261 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2262 uSel & ~X86_SEL_RPL, 0);
2263}
2264
2265
2266/** \#SS(seg) - 0c. */
2267DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2268{
2269 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2270 uSel & ~X86_SEL_RPL, 0);
2271}
2272
2273
2274/** \#GP(n) - 0d. */
2275DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2276{
2277 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2278}
2279
2280
2281/** \#GP(0) - 0d. */
2282DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2283{
2284 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2285}
2286
2287
2288/** \#GP(sel) - 0d. */
2289DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2290{
2291 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2292 Sel & ~X86_SEL_RPL, 0);
2293}
2294
2295
2296/** \#GP(0) - 0d. */
2297DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2298{
2299 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2300}
2301
2302
2303/** \#GP(sel) - 0d. */
2304DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2305{
2306 NOREF(iSegReg); NOREF(fAccess);
2307 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2308 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2309}
2310
2311
2312/** \#GP(sel) - 0d. */
2313DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2314{
2315 NOREF(Sel);
2316 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2317}
2318
2319
2320/** \#GP(sel) - 0d. */
2321DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2322{
2323 NOREF(iSegReg); NOREF(fAccess);
2324 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2325}
2326
2327
2328/** \#PF(n) - 0e. */
2329DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2330{
2331 uint16_t uErr;
2332 switch (rc)
2333 {
2334 case VERR_PAGE_NOT_PRESENT:
2335 case VERR_PAGE_TABLE_NOT_PRESENT:
2336 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2337 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2338 uErr = 0;
2339 break;
2340
2341 default:
2342 AssertMsgFailed(("%Rrc\n", rc));
2343 case VERR_ACCESS_DENIED:
2344 uErr = X86_TRAP_PF_P;
2345 break;
2346
2347 /** @todo reserved */
2348 }
2349
2350 if (pIemCpu->uCpl == 3)
2351 uErr |= X86_TRAP_PF_US;
2352
2353 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2354 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2355 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2356 uErr |= X86_TRAP_PF_ID;
2357
2358 /* Note! RW access callers reporting a WRITE protection fault, will clear
2359 the READ flag before calling. So, read-modify-write accesses (RW)
2360 can safely be reported as READ faults. */
2361 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2362 uErr |= X86_TRAP_PF_RW;
2363
2364 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2365 uErr, GCPtrWhere);
2366}
2367
2368
2369/** \#MF(0) - 10. */
2370DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2371{
2372 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2373}
2374
2375
2376/** \#AC(0) - 11. */
2377DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2378{
2379 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2380}
2381
2382
2383/**
2384 * Macro for calling iemCImplRaiseDivideError().
2385 *
2386 * This enables us to add/remove arguments and force different levels of
2387 * inlining as we wish.
2388 *
2389 * @return Strict VBox status code.
2390 */
2391#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2392IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2393{
2394 NOREF(cbInstr);
2395 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2396}
2397
2398
2399/**
2400 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2401 *
2402 * This enables us to add/remove arguments and force different levels of
2403 * inlining as we wish.
2404 *
2405 * @return Strict VBox status code.
2406 */
2407#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2408IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2409{
2410 NOREF(cbInstr);
2411 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2412}
2413
2414
2415/**
2416 * Macro for calling iemCImplRaiseInvalidOpcode().
2417 *
2418 * This enables us to add/remove arguments and force different levels of
2419 * inlining as we wish.
2420 *
2421 * @return Strict VBox status code.
2422 */
2423#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2424IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2425{
2426 NOREF(cbInstr);
2427 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2428}
2429
2430
2431/** @} */
2432
2433
2434/*
2435 *
2436 * Helpers routines.
2437 * Helpers routines.
2438 * Helpers routines.
2439 *
2440 */
2441
2442/**
2443 * Recalculates the effective operand size.
2444 *
2445 * @param pIemCpu The IEM state.
2446 */
2447static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2448{
2449 switch (pIemCpu->enmCpuMode)
2450 {
2451 case IEMMODE_16BIT:
2452 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2453 break;
2454 case IEMMODE_32BIT:
2455 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2456 break;
2457 case IEMMODE_64BIT:
2458 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2459 {
2460 case 0:
2461 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2462 break;
2463 case IEM_OP_PRF_SIZE_OP:
2464 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2465 break;
2466 case IEM_OP_PRF_SIZE_REX_W:
2467 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2468 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2469 break;
2470 }
2471 break;
2472 default:
2473 AssertFailed();
2474 }
2475}
2476
2477
2478/**
2479 * Sets the default operand size to 64-bit and recalculates the effective
2480 * operand size.
2481 *
2482 * @param pIemCpu The IEM state.
2483 */
2484static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2485{
2486 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2487 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2488 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2489 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2490 else
2491 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2492}
2493
2494
2495/*
2496 *
2497 * Common opcode decoders.
2498 * Common opcode decoders.
2499 * Common opcode decoders.
2500 *
2501 */
2502#include <iprt/mem.h>
2503
2504/**
2505 * Used to add extra details about a stub case.
2506 * @param pIemCpu The IEM per CPU state.
2507 */
2508static void iemOpStubMsg2(PIEMCPU pIemCpu)
2509{
2510 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2511 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2512 char szRegs[4096];
2513 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2514 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2515 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2516 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2517 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2518 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2519 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2520 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2521 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2522 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2523 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2524 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2525 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2526 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2527 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2528 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2529 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2530 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2531 " efer=%016VR{efer}\n"
2532 " pat=%016VR{pat}\n"
2533 " sf_mask=%016VR{sf_mask}\n"
2534 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2535 " lstar=%016VR{lstar}\n"
2536 " star=%016VR{star} cstar=%016VR{cstar}\n"
2537 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2538 );
2539
2540 char szInstr[256];
2541 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2542 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2543 szInstr, sizeof(szInstr), NULL);
2544
2545 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2546}
2547
2548
2549/** Stubs an opcode. */
2550#define FNIEMOP_STUB(a_Name) \
2551 FNIEMOP_DEF(a_Name) \
2552 { \
2553 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2554 iemOpStubMsg2(pIemCpu); \
2555 RTAssertPanic(); \
2556 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2557 } \
2558 typedef int ignore_semicolon
2559
2560/** Stubs an opcode. */
2561#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2562 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2563 { \
2564 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2565 iemOpStubMsg2(pIemCpu); \
2566 RTAssertPanic(); \
2567 NOREF(a_Name0); \
2568 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2569 } \
2570 typedef int ignore_semicolon
2571
2572
2573
2574/** @name Register Access.
2575 * @{
2576 */
2577
2578/**
2579 * Gets a reference (pointer) to the specified hidden segment register.
2580 *
2581 * @returns Hidden register reference.
2582 * @param pIemCpu The per CPU data.
2583 * @param iSegReg The segment register.
2584 */
2585static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2586{
2587 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2588 switch (iSegReg)
2589 {
2590 case X86_SREG_ES: return &pCtx->esHid;
2591 case X86_SREG_CS: return &pCtx->csHid;
2592 case X86_SREG_SS: return &pCtx->ssHid;
2593 case X86_SREG_DS: return &pCtx->dsHid;
2594 case X86_SREG_FS: return &pCtx->fsHid;
2595 case X86_SREG_GS: return &pCtx->gsHid;
2596 }
2597 AssertFailedReturn(NULL);
2598}
2599
2600
2601/**
2602 * Gets a reference (pointer) to the specified segment register (the selector
2603 * value).
2604 *
2605 * @returns Pointer to the selector variable.
2606 * @param pIemCpu The per CPU data.
2607 * @param iSegReg The segment register.
2608 */
2609static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2610{
2611 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2612 switch (iSegReg)
2613 {
2614 case X86_SREG_ES: return &pCtx->es;
2615 case X86_SREG_CS: return &pCtx->cs;
2616 case X86_SREG_SS: return &pCtx->ss;
2617 case X86_SREG_DS: return &pCtx->ds;
2618 case X86_SREG_FS: return &pCtx->fs;
2619 case X86_SREG_GS: return &pCtx->gs;
2620 }
2621 AssertFailedReturn(NULL);
2622}
2623
2624
2625/**
2626 * Fetches the selector value of a segment register.
2627 *
2628 * @returns The selector value.
2629 * @param pIemCpu The per CPU data.
2630 * @param iSegReg The segment register.
2631 */
2632static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2633{
2634 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2635 switch (iSegReg)
2636 {
2637 case X86_SREG_ES: return pCtx->es;
2638 case X86_SREG_CS: return pCtx->cs;
2639 case X86_SREG_SS: return pCtx->ss;
2640 case X86_SREG_DS: return pCtx->ds;
2641 case X86_SREG_FS: return pCtx->fs;
2642 case X86_SREG_GS: return pCtx->gs;
2643 }
2644 AssertFailedReturn(0xffff);
2645}
2646
2647
2648/**
2649 * Gets a reference (pointer) to the specified general register.
2650 *
2651 * @returns Register reference.
2652 * @param pIemCpu The per CPU data.
2653 * @param iReg The general register.
2654 */
2655static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2656{
2657 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2658 switch (iReg)
2659 {
2660 case X86_GREG_xAX: return &pCtx->rax;
2661 case X86_GREG_xCX: return &pCtx->rcx;
2662 case X86_GREG_xDX: return &pCtx->rdx;
2663 case X86_GREG_xBX: return &pCtx->rbx;
2664 case X86_GREG_xSP: return &pCtx->rsp;
2665 case X86_GREG_xBP: return &pCtx->rbp;
2666 case X86_GREG_xSI: return &pCtx->rsi;
2667 case X86_GREG_xDI: return &pCtx->rdi;
2668 case X86_GREG_x8: return &pCtx->r8;
2669 case X86_GREG_x9: return &pCtx->r9;
2670 case X86_GREG_x10: return &pCtx->r10;
2671 case X86_GREG_x11: return &pCtx->r11;
2672 case X86_GREG_x12: return &pCtx->r12;
2673 case X86_GREG_x13: return &pCtx->r13;
2674 case X86_GREG_x14: return &pCtx->r14;
2675 case X86_GREG_x15: return &pCtx->r15;
2676 }
2677 AssertFailedReturn(NULL);
2678}
2679
2680
2681/**
2682 * Gets a reference (pointer) to the specified 8-bit general register.
2683 *
2684 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2685 *
2686 * @returns Register reference.
2687 * @param pIemCpu The per CPU data.
2688 * @param iReg The register.
2689 */
2690static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2691{
2692 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2693 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2694
2695 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2696 if (iReg >= 4)
2697 pu8Reg++;
2698 return pu8Reg;
2699}
2700
2701
2702/**
2703 * Fetches the value of a 8-bit general register.
2704 *
2705 * @returns The register value.
2706 * @param pIemCpu The per CPU data.
2707 * @param iReg The register.
2708 */
2709static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2710{
2711 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2712 return *pbSrc;
2713}
2714
2715
2716/**
2717 * Fetches the value of a 16-bit general register.
2718 *
2719 * @returns The register value.
2720 * @param pIemCpu The per CPU data.
2721 * @param iReg The register.
2722 */
2723static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2724{
2725 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2726}
2727
2728
2729/**
2730 * Fetches the value of a 32-bit general register.
2731 *
2732 * @returns The register value.
2733 * @param pIemCpu The per CPU data.
2734 * @param iReg The register.
2735 */
2736static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2737{
2738 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2739}
2740
2741
2742/**
2743 * Fetches the value of a 64-bit general register.
2744 *
2745 * @returns The register value.
2746 * @param pIemCpu The per CPU data.
2747 * @param iReg The register.
2748 */
2749static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2750{
2751 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2752}
2753
2754
2755/**
2756 * Is the FPU state in FXSAVE format or not.
2757 *
2758 * @returns true if it is, false if it's in FNSAVE.
2759 * @param pVCpu The virtual CPU handle.
2760 */
2761DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2762{
2763#ifdef RT_ARCH_AMD64
2764 NOREF(pIemCpu);
2765 return true;
2766#else
2767 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2768 return true;
2769#endif
2770}
2771
2772
2773/**
2774 * Gets the FPU status word.
2775 *
2776 * @returns FPU status word
2777 * @param pIemCpu The per CPU data.
2778 */
2779static uint16_t iemFRegFetchFsw(PIEMCPU pIemCpu)
2780{
2781 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2782 uint16_t u16Fsw;
2783 if (iemFRegIsFxSaveFormat(pIemCpu))
2784 u16Fsw = pCtx->fpu.FSW;
2785 else
2786 {
2787 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
2788 u16Fsw = pFpu->FSW;
2789 }
2790 return u16Fsw;
2791}
2792
2793/**
2794 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2795 *
2796 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2797 * segment limit.
2798 *
2799 * @param pIemCpu The per CPU data.
2800 * @param offNextInstr The offset of the next instruction.
2801 */
2802static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2803{
2804 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2805 switch (pIemCpu->enmEffOpSize)
2806 {
2807 case IEMMODE_16BIT:
2808 {
2809 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2810 if ( uNewIp > pCtx->csHid.u32Limit
2811 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2812 return iemRaiseGeneralProtectionFault0(pIemCpu);
2813 pCtx->rip = uNewIp;
2814 break;
2815 }
2816
2817 case IEMMODE_32BIT:
2818 {
2819 Assert(pCtx->rip <= UINT32_MAX);
2820 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2821
2822 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2823 if (uNewEip > pCtx->csHid.u32Limit)
2824 return iemRaiseGeneralProtectionFault0(pIemCpu);
2825 pCtx->rip = uNewEip;
2826 break;
2827 }
2828
2829 case IEMMODE_64BIT:
2830 {
2831 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2832
2833 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2834 if (!IEM_IS_CANONICAL(uNewRip))
2835 return iemRaiseGeneralProtectionFault0(pIemCpu);
2836 pCtx->rip = uNewRip;
2837 break;
2838 }
2839
2840 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2841 }
2842
2843 return VINF_SUCCESS;
2844}
2845
2846
2847/**
2848 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2849 *
2850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2851 * segment limit.
2852 *
2853 * @returns Strict VBox status code.
2854 * @param pIemCpu The per CPU data.
2855 * @param offNextInstr The offset of the next instruction.
2856 */
2857static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2858{
2859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2860 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2861
2862 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2863 if ( uNewIp > pCtx->csHid.u32Limit
2864 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2865 return iemRaiseGeneralProtectionFault0(pIemCpu);
2866 /** @todo Test 16-bit jump in 64-bit mode. */
2867 pCtx->rip = uNewIp;
2868
2869 return VINF_SUCCESS;
2870}
2871
2872
2873/**
2874 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2875 *
2876 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2877 * segment limit.
2878 *
2879 * @returns Strict VBox status code.
2880 * @param pIemCpu The per CPU data.
2881 * @param offNextInstr The offset of the next instruction.
2882 */
2883static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2884{
2885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2886 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2887
2888 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2889 {
2890 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2891
2892 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2893 if (uNewEip > pCtx->csHid.u32Limit)
2894 return iemRaiseGeneralProtectionFault0(pIemCpu);
2895 pCtx->rip = uNewEip;
2896 }
2897 else
2898 {
2899 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2900
2901 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2902 if (!IEM_IS_CANONICAL(uNewRip))
2903 return iemRaiseGeneralProtectionFault0(pIemCpu);
2904 pCtx->rip = uNewRip;
2905 }
2906 return VINF_SUCCESS;
2907}
2908
2909
2910/**
2911 * Performs a near jump to the specified address.
2912 *
2913 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2914 * segment limit.
2915 *
2916 * @param pIemCpu The per CPU data.
2917 * @param uNewRip The new RIP value.
2918 */
2919static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2920{
2921 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2922 switch (pIemCpu->enmEffOpSize)
2923 {
2924 case IEMMODE_16BIT:
2925 {
2926 Assert(uNewRip <= UINT16_MAX);
2927 if ( uNewRip > pCtx->csHid.u32Limit
2928 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2929 return iemRaiseGeneralProtectionFault0(pIemCpu);
2930 /** @todo Test 16-bit jump in 64-bit mode. */
2931 pCtx->rip = uNewRip;
2932 break;
2933 }
2934
2935 case IEMMODE_32BIT:
2936 {
2937 Assert(uNewRip <= UINT32_MAX);
2938 Assert(pCtx->rip <= UINT32_MAX);
2939 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2940
2941 if (uNewRip > pCtx->csHid.u32Limit)
2942 return iemRaiseGeneralProtectionFault0(pIemCpu);
2943 pCtx->rip = uNewRip;
2944 break;
2945 }
2946
2947 case IEMMODE_64BIT:
2948 {
2949 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2950
2951 if (!IEM_IS_CANONICAL(uNewRip))
2952 return iemRaiseGeneralProtectionFault0(pIemCpu);
2953 pCtx->rip = uNewRip;
2954 break;
2955 }
2956
2957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2958 }
2959
2960 return VINF_SUCCESS;
2961}
2962
2963
2964/**
2965 * Get the address of the top of the stack.
2966 *
2967 * @param pCtx The CPU context which SP/ESP/RSP should be
2968 * read.
2969 */
2970DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
2971{
2972 if (pCtx->ssHid.Attr.n.u1Long)
2973 return pCtx->rsp;
2974 if (pCtx->ssHid.Attr.n.u1DefBig)
2975 return pCtx->esp;
2976 return pCtx->sp;
2977}
2978
2979
2980/**
2981 * Updates the RIP/EIP/IP to point to the next instruction.
2982 *
2983 * @param pIemCpu The per CPU data.
2984 * @param cbInstr The number of bytes to add.
2985 */
2986static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
2987{
2988 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2989 switch (pIemCpu->enmCpuMode)
2990 {
2991 case IEMMODE_16BIT:
2992 Assert(pCtx->rip <= UINT16_MAX);
2993 pCtx->eip += cbInstr;
2994 pCtx->eip &= UINT32_C(0xffff);
2995 break;
2996
2997 case IEMMODE_32BIT:
2998 pCtx->eip += cbInstr;
2999 Assert(pCtx->rip <= UINT32_MAX);
3000 break;
3001
3002 case IEMMODE_64BIT:
3003 pCtx->rip += cbInstr;
3004 break;
3005 default: AssertFailed();
3006 }
3007}
3008
3009
3010/**
3011 * Updates the RIP/EIP/IP to point to the next instruction.
3012 *
3013 * @param pIemCpu The per CPU data.
3014 */
3015static void iemRegUpdateRip(PIEMCPU pIemCpu)
3016{
3017 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3018}
3019
3020
3021/**
3022 * Adds to the stack pointer.
3023 *
3024 * @param pCtx The CPU context which SP/ESP/RSP should be
3025 * updated.
3026 * @param cbToAdd The number of bytes to add.
3027 */
3028DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3029{
3030 if (pCtx->ssHid.Attr.n.u1Long)
3031 pCtx->rsp += cbToAdd;
3032 else if (pCtx->ssHid.Attr.n.u1DefBig)
3033 pCtx->esp += cbToAdd;
3034 else
3035 pCtx->sp += cbToAdd;
3036}
3037
3038
3039/**
3040 * Subtracts from the stack pointer.
3041 *
3042 * @param pCtx The CPU context which SP/ESP/RSP should be
3043 * updated.
3044 * @param cbToSub The number of bytes to subtract.
3045 */
3046DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3047{
3048 if (pCtx->ssHid.Attr.n.u1Long)
3049 pCtx->rsp -= cbToSub;
3050 else if (pCtx->ssHid.Attr.n.u1DefBig)
3051 pCtx->esp -= cbToSub;
3052 else
3053 pCtx->sp -= cbToSub;
3054}
3055
3056
3057/**
3058 * Adds to the temporary stack pointer.
3059 *
3060 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3061 * @param cbToAdd The number of bytes to add.
3062 * @param pCtx Where to get the current stack mode.
3063 */
3064DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
3065{
3066 if (pCtx->ssHid.Attr.n.u1Long)
3067 pTmpRsp->u += cbToAdd;
3068 else if (pCtx->ssHid.Attr.n.u1DefBig)
3069 pTmpRsp->DWords.dw0 += cbToAdd;
3070 else
3071 pTmpRsp->Words.w0 += cbToAdd;
3072}
3073
3074
3075/**
3076 * Subtracts from the temporary stack pointer.
3077 *
3078 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3079 * @param cbToSub The number of bytes to subtract.
3080 * @param pCtx Where to get the current stack mode.
3081 */
3082DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
3083{
3084 if (pCtx->ssHid.Attr.n.u1Long)
3085 pTmpRsp->u -= cbToSub;
3086 else if (pCtx->ssHid.Attr.n.u1DefBig)
3087 pTmpRsp->DWords.dw0 -= cbToSub;
3088 else
3089 pTmpRsp->Words.w0 -= cbToSub;
3090}
3091
3092
3093/**
3094 * Calculates the effective stack address for a push of the specified size as
3095 * well as the new RSP value (upper bits may be masked).
3096 *
3097 * @returns Effective stack addressf for the push.
3098 * @param pCtx Where to get the current stack mode.
3099 * @param cbItem The size of the stack item to pop.
3100 * @param puNewRsp Where to return the new RSP value.
3101 */
3102DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3103{
3104 RTUINT64U uTmpRsp;
3105 RTGCPTR GCPtrTop;
3106 uTmpRsp.u = pCtx->rsp;
3107
3108 if (pCtx->ssHid.Attr.n.u1Long)
3109 GCPtrTop = uTmpRsp.u -= cbItem;
3110 else if (pCtx->ssHid.Attr.n.u1DefBig)
3111 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3112 else
3113 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3114 *puNewRsp = uTmpRsp.u;
3115 return GCPtrTop;
3116}
3117
3118
3119/**
3120 * Gets the current stack pointer and calculates the value after a pop of the
3121 * specified size.
3122 *
3123 * @returns Current stack pointer.
3124 * @param pCtx Where to get the current stack mode.
3125 * @param cbItem The size of the stack item to pop.
3126 * @param puNewRsp Where to return the new RSP value.
3127 */
3128DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3129{
3130 RTUINT64U uTmpRsp;
3131 RTGCPTR GCPtrTop;
3132 uTmpRsp.u = pCtx->rsp;
3133
3134 if (pCtx->ssHid.Attr.n.u1Long)
3135 {
3136 GCPtrTop = uTmpRsp.u;
3137 uTmpRsp.u += cbItem;
3138 }
3139 else if (pCtx->ssHid.Attr.n.u1DefBig)
3140 {
3141 GCPtrTop = uTmpRsp.DWords.dw0;
3142 uTmpRsp.DWords.dw0 += cbItem;
3143 }
3144 else
3145 {
3146 GCPtrTop = uTmpRsp.Words.w0;
3147 uTmpRsp.Words.w0 += cbItem;
3148 }
3149 *puNewRsp = uTmpRsp.u;
3150 return GCPtrTop;
3151}
3152
3153
3154/**
3155 * Calculates the effective stack address for a push of the specified size as
3156 * well as the new temporary RSP value (upper bits may be masked).
3157 *
3158 * @returns Effective stack addressf for the push.
3159 * @param pTmpRsp The temporary stack pointer. This is updated.
3160 * @param cbItem The size of the stack item to pop.
3161 * @param puNewRsp Where to return the new RSP value.
3162 */
3163DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3164{
3165 RTGCPTR GCPtrTop;
3166
3167 if (pCtx->ssHid.Attr.n.u1Long)
3168 GCPtrTop = pTmpRsp->u -= cbItem;
3169 else if (pCtx->ssHid.Attr.n.u1DefBig)
3170 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3171 else
3172 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3173 return GCPtrTop;
3174}
3175
3176
3177/**
3178 * Gets the effective stack address for a pop of the specified size and
3179 * calculates and updates the temporary RSP.
3180 *
3181 * @returns Current stack pointer.
3182 * @param pTmpRsp The temporary stack pointer. This is updated.
3183 * @param pCtx Where to get the current stack mode.
3184 * @param cbItem The size of the stack item to pop.
3185 */
3186DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3187{
3188 RTGCPTR GCPtrTop;
3189 if (pCtx->ssHid.Attr.n.u1Long)
3190 {
3191 GCPtrTop = pTmpRsp->u;
3192 pTmpRsp->u += cbItem;
3193 }
3194 else if (pCtx->ssHid.Attr.n.u1DefBig)
3195 {
3196 GCPtrTop = pTmpRsp->DWords.dw0;
3197 pTmpRsp->DWords.dw0 += cbItem;
3198 }
3199 else
3200 {
3201 GCPtrTop = pTmpRsp->Words.w0;
3202 pTmpRsp->Words.w0 += cbItem;
3203 }
3204 return GCPtrTop;
3205}
3206
3207
3208/**
3209 * Checks if an Intel CPUID feature bit is set.
3210 *
3211 * @returns true / false.
3212 *
3213 * @param pIemCpu The IEM per CPU data.
3214 * @param fEdx The EDX bit to test, or 0 if ECX.
3215 * @param fEcx The ECX bit to test, or 0 if EDX.
3216 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3217 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3218 */
3219static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3220{
3221 uint32_t uEax, uEbx, uEcx, uEdx;
3222 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3223 return (fEcx && (uEcx & fEcx))
3224 || (fEdx && (uEdx & fEdx));
3225}
3226
3227
3228/**
3229 * Checks if an AMD CPUID feature bit is set.
3230 *
3231 * @returns true / false.
3232 *
3233 * @param pIemCpu The IEM per CPU data.
3234 * @param fEdx The EDX bit to test, or 0 if ECX.
3235 * @param fEcx The ECX bit to test, or 0 if EDX.
3236 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3237 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3238 */
3239static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3240{
3241 uint32_t uEax, uEbx, uEcx, uEdx;
3242 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3243 return (fEcx && (uEcx & fEcx))
3244 || (fEdx && (uEdx & fEdx));
3245}
3246
3247/** @} */
3248
3249
3250/** @name FPU access and helpers.
3251 *
3252 * @{
3253 */
3254
3255
3256/**
3257 * Hook for preparing to use the host FPU.
3258 *
3259 * This is necessary in ring-0 and raw-mode context.
3260 *
3261 * @param pIemCpu The IEM per CPU data.
3262 */
3263DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3264{
3265#ifdef IN_RING3
3266 NOREF(pIemCpu);
3267#else
3268# error "Implement me"
3269#endif
3270}
3271
3272
3273/**
3274 * Stores a QNaN value into a FPU register.
3275 *
3276 * @param pReg Pointer to the register.
3277 */
3278DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3279{
3280 pReg->au32[0] = UINT32_C(0x00000000);
3281 pReg->au32[1] = UINT32_C(0xc0000000);
3282 pReg->au16[4] = UINT16_C(0xffff);
3283}
3284
3285
3286/**
3287 * Updates the FOP, FPU.CS and FPUIP registers.
3288 *
3289 * @param pIemCpu The IEM per CPU data.
3290 * @param pCtx The CPU context.
3291 */
3292DECLINLINE(void) iemFpuUpdateOpcodeAndIP(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3293{
3294 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3295 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3296 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3297 pCtx->fpu.CS = pCtx->cs;
3298 pCtx->fpu.FPUIP = pCtx->rip;
3299}
3300
3301
3302/**
3303 * Updates the FPU.DS and FPUDP registers.
3304 *
3305 * @param pIemCpu The IEM per CPU data.
3306 * @param pCtx The CPU context.
3307 * @param iEffSeg The effective segment register.
3308 * @param GCPtrEff The effective address relative to @a iEffSeg.
3309 */
3310DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3311{
3312 RTSEL sel;
3313 switch (iEffSeg)
3314 {
3315 case X86_SREG_DS: sel = pCtx->ds; break;
3316 case X86_SREG_SS: sel = pCtx->ss; break;
3317 case X86_SREG_CS: sel = pCtx->cs; break;
3318 case X86_SREG_ES: sel = pCtx->es; break;
3319 case X86_SREG_FS: sel = pCtx->fs; break;
3320 case X86_SREG_GS: sel = pCtx->gs; break;
3321 default:
3322 AssertMsgFailed(("%d\n", iEffSeg));
3323 sel = pCtx->ds;
3324 }
3325 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3326 pCtx->fpu.DS = sel;
3327 pCtx->fpu.FPUDP = GCPtrEff;
3328}
3329
3330
3331/**
3332 * Rotates the stack registers in the push direction.
3333 *
3334 * @param pCtx The CPU context.
3335 * @remarks This is a complete waste of time, but fxsave stores the registers in
3336 * stack order.
3337 */
3338DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3339{
3340 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3341 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3342 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3343 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3344 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3345 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3346 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3347 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3348 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3349}
3350
3351
3352/**
3353 * Rotates the stack registers in the pop direction.
3354 *
3355 * @param pCtx The CPU context.
3356 * @remarks This is a complete waste of time, but fxsave stores the registers in
3357 * stack order.
3358 */
3359DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3360{
3361 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3362 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3363 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3364 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3365 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3366 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3367 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3368 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3369 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3370}
3371
3372
3373#if 0
3374/**
3375 *
3376 * @param pIemCpu The IEM per CPU data.
3377 * @param pResult The FPU operation result to push.
3378 * @param pCtx The CPU context.
3379 * @param iDstReg The destination register,
3380 * @param cStackAdj The stack adjustment on successful operation.
3381 * Note that this is an unsigned value.
3382 * @param fFlags Flags.
3383 */
3384static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx, uint16_t iDstReg,
3385 uint8_t cStackAdj, )
3386{
3387 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3388 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3389
3390 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3391 if (!(RT_BIT(iNewTop) & pCtx->fpu.FTW))
3392 {
3393 /* No stack error. */
3394 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3395 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3396 if (!fXcpts)
3397 {
3398 /* No unmasked exceptions, just store the result. */
3399 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3400 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3401 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3402 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3403 }
3404 else
3405 {
3406 AssertFailed();
3407 }
3408
3409 }
3410 else if (pCtx->fpu.FCW & X86_FCW_IM)
3411 {
3412 /* Masked stack overflow. */
3413 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3414 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3415 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3416 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3417 }
3418 else
3419 {
3420 /* Stack overflow exception. */
3421 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3422 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3423 return;
3424 }
3425
3426 iemFpuRotateStackPush(pCtx);
3427}
3428
3429
3430/**
3431 * Writes a FPU result to the FPU stack after inspecting the resulting
3432 * statuses.
3433 *
3434 * @param pIemCpu The IEM per CPU data.
3435 * @param pResult The FPU operation result to push.
3436 * @param iReg The stack relative FPU register number.
3437 */
3438static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iReg)
3439{
3440 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3441 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3442
3443 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iReg) & X86_FSW_TOP_SMASK;
3444
3445 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3446 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3447 if (!fXcpts)
3448 {
3449 /* No unmasked exceptions, just store the result. */
3450 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3451 pCtx->fpu.FSW |= (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3452 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3453 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3454 }
3455 else
3456 {
3457 AssertFailed();
3458 }
3459}
3460#endif
3461
3462
3463/**
3464 * Pushes a FPU result onto the FPU stack after inspecting the resulting
3465 * statuses.
3466 *
3467 * @param pIemCpu The IEM per CPU data.
3468 * @param pResult The FPU operation result to push.
3469 */
3470static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3471{
3472 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3473 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3474
3475 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3476 if (!(RT_BIT(iNewTop) & pCtx->fpu.FTW))
3477 {
3478 /* No stack error. */
3479 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3480 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3481 if (!fXcpts)
3482 {
3483 /* No unmasked exceptions, just store the result. */
3484 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3485 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3486 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3487 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3488 }
3489 else
3490 {
3491 AssertFailed();
3492 }
3493
3494 }
3495 else if (pCtx->fpu.FCW & X86_FCW_IM)
3496 {
3497 /* Masked stack overflow. */
3498 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3499 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3500 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3501 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3502 }
3503 else
3504 {
3505 /* Stack overflow exception. */
3506 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3507 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3508 return;
3509 }
3510
3511 iemFpuRotateStackPush(pCtx);
3512}
3513
3514
3515/**
3516 * Pushes a FPU result onto the FPU stack after inspecting the resulting
3517 * statuses, and sets FPU.DS and FPUDP.
3518 *
3519 * @param pIemCpu The IEM per CPU data.
3520 * @param pResult The FPU operation result to push.
3521 * @param iEffSeg The effective segment register.
3522 * @param GCPtrEff The effective address relative to @a iEffSeg.
3523 */
3524static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3525{
3526 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3527 iemFpuPushResult(pIemCpu, pResult);
3528}
3529
3530
3531static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3532{
3533}
3534
3535static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3536{
3537 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3538 //iemFpuStoreResult(pIemCpu, pResult);
3539}
3540
3541
3542/** @} */
3543
3544
3545/** @name Memory access.
3546 *
3547 * @{
3548 */
3549
3550
3551/**
3552 * Checks if the given segment can be written to, raise the appropriate
3553 * exception if not.
3554 *
3555 * @returns VBox strict status code.
3556 *
3557 * @param pIemCpu The IEM per CPU data.
3558 * @param pHid Pointer to the hidden register.
3559 * @param iSegReg The register number.
3560 */
3561static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3562{
3563 if (!pHid->Attr.n.u1Present)
3564 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3565
3566 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3567 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3568 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3569 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
3570
3571 /** @todo DPL/RPL/CPL? */
3572
3573 return VINF_SUCCESS;
3574}
3575
3576
3577/**
3578 * Checks if the given segment can be read from, raise the appropriate
3579 * exception if not.
3580 *
3581 * @returns VBox strict status code.
3582 *
3583 * @param pIemCpu The IEM per CPU data.
3584 * @param pHid Pointer to the hidden register.
3585 * @param iSegReg The register number.
3586 */
3587static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3588{
3589 if (!pHid->Attr.n.u1Present)
3590 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3591
3592 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
3593 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3594 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
3595
3596 /** @todo DPL/RPL/CPL? */
3597
3598 return VINF_SUCCESS;
3599}
3600
3601
3602/**
3603 * Applies the segment limit, base and attributes.
3604 *
3605 * This may raise a \#GP or \#SS.
3606 *
3607 * @returns VBox strict status code.
3608 *
3609 * @param pIemCpu The IEM per CPU data.
3610 * @param fAccess The kind of access which is being performed.
3611 * @param iSegReg The index of the segment register to apply.
3612 * This is UINT8_MAX if none (for IDT, GDT, LDT,
3613 * TSS, ++).
3614 * @param pGCPtrMem Pointer to the guest memory address to apply
3615 * segmentation to. Input and output parameter.
3616 */
3617static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
3618 size_t cbMem, PRTGCPTR pGCPtrMem)
3619{
3620 if (iSegReg == UINT8_MAX)
3621 return VINF_SUCCESS;
3622
3623 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
3624 switch (pIemCpu->enmCpuMode)
3625 {
3626 case IEMMODE_16BIT:
3627 case IEMMODE_32BIT:
3628 {
3629 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
3630 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
3631
3632 Assert(pSel->Attr.n.u1Present);
3633 Assert(pSel->Attr.n.u1DescType);
3634 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
3635 {
3636 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3637 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3638 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3639
3640 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3641 {
3642 /** @todo CPL check. */
3643 }
3644
3645 /*
3646 * There are two kinds of data selectors, normal and expand down.
3647 */
3648 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
3649 {
3650 if ( GCPtrFirst32 > pSel->u32Limit
3651 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3652 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3653
3654 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3655 }
3656 else
3657 {
3658 /** @todo implement expand down segments. */
3659 AssertFailed(/** @todo implement this */);
3660 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
3661 }
3662 }
3663 else
3664 {
3665
3666 /*
3667 * Code selector and usually be used to read thru, writing is
3668 * only permitted in real and V8086 mode.
3669 */
3670 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3671 || ( (fAccess & IEM_ACCESS_TYPE_READ)
3672 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
3673 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
3674 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3675
3676 if ( GCPtrFirst32 > pSel->u32Limit
3677 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3678 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3679
3680 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3681 {
3682 /** @todo CPL check. */
3683 }
3684
3685 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3686 }
3687 return VINF_SUCCESS;
3688 }
3689
3690 case IEMMODE_64BIT:
3691 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
3692 *pGCPtrMem += pSel->u64Base;
3693 return VINF_SUCCESS;
3694
3695 default:
3696 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
3697 }
3698}
3699
3700
3701/**
3702 * Translates a virtual address to a physical physical address and checks if we
3703 * can access the page as specified.
3704 *
3705 * @param pIemCpu The IEM per CPU data.
3706 * @param GCPtrMem The virtual address.
3707 * @param fAccess The intended access.
3708 * @param pGCPhysMem Where to return the physical address.
3709 */
3710static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
3711 PRTGCPHYS pGCPhysMem)
3712{
3713 /** @todo Need a different PGM interface here. We're currently using
3714 * generic / REM interfaces. this won't cut it for R0 & RC. */
3715 RTGCPHYS GCPhys;
3716 uint64_t fFlags;
3717 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
3718 if (RT_FAILURE(rc))
3719 {
3720 /** @todo Check unassigned memory in unpaged mode. */
3721 /** @todo Reserved bits in page tables. Requires new PGM interface. */
3722 *pGCPhysMem = NIL_RTGCPHYS;
3723 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
3724 }
3725
3726 /* If the page is writable and does not have the no-exec bit set, all
3727 access is allowed. Otherwise we'll have to check more carefully... */
3728 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
3729 {
3730 /* Write to read only memory? */
3731 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3732 && !(fFlags & X86_PTE_RW)
3733 && ( pIemCpu->uCpl != 0
3734 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
3735 {
3736 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page\n", GCPtrMem));
3737 *pGCPhysMem = NIL_RTGCPHYS;
3738 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
3739 }
3740
3741 /* Kernel memory accessed by userland? */
3742 if ( !(fFlags & X86_PTE_US)
3743 && pIemCpu->uCpl == 3
3744 && !(fAccess & IEM_ACCESS_WHAT_SYS))
3745 {
3746 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page\n", GCPtrMem));
3747 *pGCPhysMem = NIL_RTGCPHYS;
3748 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3749 }
3750
3751 /* Executing non-executable memory? */
3752 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
3753 && (fFlags & X86_PTE_PAE_NX)
3754 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
3755 {
3756 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX\n", GCPtrMem));
3757 *pGCPhysMem = NIL_RTGCPHYS;
3758 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
3759 VERR_ACCESS_DENIED);
3760 }
3761 }
3762
3763 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
3764 *pGCPhysMem = GCPhys;
3765 return VINF_SUCCESS;
3766}
3767
3768
3769
3770/**
3771 * Maps a physical page.
3772 *
3773 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3774 * @param pIemCpu The IEM per CPU data.
3775 * @param GCPhysMem The physical address.
3776 * @param fAccess The intended access.
3777 * @param ppvMem Where to return the mapping address.
3778 */
3779static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
3780{
3781#ifdef IEM_VERIFICATION_MODE
3782 /* Force the alternative path so we can ignore writes. */
3783 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
3784 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3785#endif
3786
3787 /*
3788 * If we can map the page without trouble, do a block processing
3789 * until the end of the current page.
3790 */
3791 /** @todo need some better API. */
3792 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
3793 GCPhysMem,
3794 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3795 ppvMem);
3796}
3797
3798
3799/**
3800 * Unmap a page previously mapped by iemMemPageMap.
3801 *
3802 * This is currently a dummy function.
3803 *
3804 * @param pIemCpu The IEM per CPU data.
3805 * @param GCPhysMem The physical address.
3806 * @param fAccess The intended access.
3807 * @param pvMem What iemMemPageMap returned.
3808 */
3809DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
3810{
3811 NOREF(pIemCpu);
3812 NOREF(GCPhysMem);
3813 NOREF(fAccess);
3814 NOREF(pvMem);
3815}
3816
3817
3818/**
3819 * Looks up a memory mapping entry.
3820 *
3821 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
3822 * @param pIemCpu The IEM per CPU data.
3823 * @param pvMem The memory address.
3824 * @param fAccess The access to.
3825 */
3826DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3827{
3828 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
3829 if ( pIemCpu->aMemMappings[0].pv == pvMem
3830 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3831 return 0;
3832 if ( pIemCpu->aMemMappings[1].pv == pvMem
3833 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3834 return 1;
3835 if ( pIemCpu->aMemMappings[2].pv == pvMem
3836 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3837 return 2;
3838 return VERR_NOT_FOUND;
3839}
3840
3841
3842/**
3843 * Finds a free memmap entry when using iNextMapping doesn't work.
3844 *
3845 * @returns Memory mapping index, 1024 on failure.
3846 * @param pIemCpu The IEM per CPU data.
3847 */
3848static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
3849{
3850 /*
3851 * The easy case.
3852 */
3853 if (pIemCpu->cActiveMappings == 0)
3854 {
3855 pIemCpu->iNextMapping = 1;
3856 return 0;
3857 }
3858
3859 /* There should be enough mappings for all instructions. */
3860 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
3861
3862 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
3863 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
3864 return i;
3865
3866 AssertFailedReturn(1024);
3867}
3868
3869
3870/**
3871 * Commits a bounce buffer that needs writing back and unmaps it.
3872 *
3873 * @returns Strict VBox status code.
3874 * @param pIemCpu The IEM per CPU data.
3875 * @param iMemMap The index of the buffer to commit.
3876 */
3877static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
3878{
3879 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
3880 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
3881
3882 /*
3883 * Do the writing.
3884 */
3885 int rc;
3886 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
3887 && !IEM_VERIFICATION_ENABLED(pIemCpu))
3888 {
3889 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3890 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3891 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3892 if (!pIemCpu->fByPassHandlers)
3893 {
3894 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3895 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3896 pbBuf,
3897 cbFirst);
3898 if (cbSecond && rc == VINF_SUCCESS)
3899 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3900 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3901 pbBuf + cbFirst,
3902 cbSecond);
3903 }
3904 else
3905 {
3906 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3907 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3908 pbBuf,
3909 cbFirst);
3910 if (cbSecond && rc == VINF_SUCCESS)
3911 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3912 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3913 pbBuf + cbFirst,
3914 cbSecond);
3915 }
3916 }
3917 else
3918 rc = VINF_SUCCESS;
3919
3920#ifdef IEM_VERIFICATION_MODE
3921 /*
3922 * Record the write(s).
3923 */
3924 if (!pIemCpu->fNoRem)
3925 {
3926 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3927 if (pEvtRec)
3928 {
3929 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3930 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
3931 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3932 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
3933 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3934 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3935 }
3936 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
3937 {
3938 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3939 if (pEvtRec)
3940 {
3941 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3942 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
3943 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3944 memcpy(pEvtRec->u.RamWrite.ab,
3945 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
3946 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
3947 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3948 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3949 }
3950 }
3951 }
3952#endif
3953
3954 /*
3955 * Free the mapping entry.
3956 */
3957 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
3958 Assert(pIemCpu->cActiveMappings != 0);
3959 pIemCpu->cActiveMappings--;
3960 return rc;
3961}
3962
3963
3964/**
3965 * iemMemMap worker that deals with a request crossing pages.
3966 */
3967static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
3968 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
3969{
3970 /*
3971 * Do the address translations.
3972 */
3973 RTGCPHYS GCPhysFirst;
3974 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
3975 if (rcStrict != VINF_SUCCESS)
3976 return rcStrict;
3977
3978 RTGCPHYS GCPhysSecond;
3979 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
3980 if (rcStrict != VINF_SUCCESS)
3981 return rcStrict;
3982 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
3983
3984 /*
3985 * Read in the current memory content if it's a read of execute access.
3986 */
3987 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3988 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
3989 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
3990
3991 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
3992 {
3993 int rc;
3994 if (!pIemCpu->fByPassHandlers)
3995 {
3996 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
3997 if (rc != VINF_SUCCESS)
3998 return rc;
3999 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
4000 if (rc != VINF_SUCCESS)
4001 return rc;
4002 }
4003 else
4004 {
4005 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
4006 if (rc != VINF_SUCCESS)
4007 return rc;
4008 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
4009 if (rc != VINF_SUCCESS)
4010 return rc;
4011 }
4012
4013#ifdef IEM_VERIFICATION_MODE
4014 if (!pIemCpu->fNoRem)
4015 {
4016 /*
4017 * Record the reads.
4018 */
4019 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4020 if (pEvtRec)
4021 {
4022 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4023 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4024 pEvtRec->u.RamRead.cb = cbFirstPage;
4025 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4026 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4027 }
4028 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4029 if (pEvtRec)
4030 {
4031 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4032 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
4033 pEvtRec->u.RamRead.cb = cbSecondPage;
4034 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4035 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4036 }
4037 }
4038#endif
4039 }
4040#ifdef VBOX_STRICT
4041 else
4042 memset(pbBuf, 0xcc, cbMem);
4043#endif
4044#ifdef VBOX_STRICT
4045 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4046 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4047#endif
4048
4049 /*
4050 * Commit the bounce buffer entry.
4051 */
4052 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4053 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
4054 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
4055 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
4056 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
4057 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4058 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4059 pIemCpu->cActiveMappings++;
4060
4061 *ppvMem = pbBuf;
4062 return VINF_SUCCESS;
4063}
4064
4065
4066/**
4067 * iemMemMap woker that deals with iemMemPageMap failures.
4068 */
4069static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
4070 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
4071{
4072 /*
4073 * Filter out conditions we can handle and the ones which shouldn't happen.
4074 */
4075 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
4076 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
4077 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
4078 {
4079 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
4080 return rcMap;
4081 }
4082 pIemCpu->cPotentialExits++;
4083
4084 /*
4085 * Read in the current memory content if it's a read of execute access.
4086 */
4087 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4088 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
4089 {
4090 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
4091 memset(pbBuf, 0xff, cbMem);
4092 else
4093 {
4094 int rc;
4095 if (!pIemCpu->fByPassHandlers)
4096 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
4097 else
4098 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
4099 if (rc != VINF_SUCCESS)
4100 return rc;
4101 }
4102
4103#ifdef IEM_VERIFICATION_MODE
4104 if (!pIemCpu->fNoRem)
4105 {
4106 /*
4107 * Record the read.
4108 */
4109 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4110 if (pEvtRec)
4111 {
4112 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4113 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4114 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
4115 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4116 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4117 }
4118 }
4119#endif
4120 }
4121#ifdef VBOX_STRICT
4122 else
4123 memset(pbBuf, 0xcc, cbMem);
4124#endif
4125#ifdef VBOX_STRICT
4126 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4127 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4128#endif
4129
4130 /*
4131 * Commit the bounce buffer entry.
4132 */
4133 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4134 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
4135 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
4136 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
4137 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
4138 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4139 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4140 pIemCpu->cActiveMappings++;
4141
4142 *ppvMem = pbBuf;
4143 return VINF_SUCCESS;
4144}
4145
4146
4147
4148/**
4149 * Maps the specified guest memory for the given kind of access.
4150 *
4151 * This may be using bounce buffering of the memory if it's crossing a page
4152 * boundary or if there is an access handler installed for any of it. Because
4153 * of lock prefix guarantees, we're in for some extra clutter when this
4154 * happens.
4155 *
4156 * This may raise a \#GP, \#SS, \#PF or \#AC.
4157 *
4158 * @returns VBox strict status code.
4159 *
4160 * @param pIemCpu The IEM per CPU data.
4161 * @param ppvMem Where to return the pointer to the mapped
4162 * memory.
4163 * @param cbMem The number of bytes to map. This is usually 1,
4164 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
4165 * string operations it can be up to a page.
4166 * @param iSegReg The index of the segment register to use for
4167 * this access. The base and limits are checked.
4168 * Use UINT8_MAX to indicate that no segmentation
4169 * is required (for IDT, GDT and LDT accesses).
4170 * @param GCPtrMem The address of the guest memory.
4171 * @param a_fAccess How the memory is being accessed. The
4172 * IEM_ACCESS_TYPE_XXX bit is used to figure out
4173 * how to map the memory, while the
4174 * IEM_ACCESS_WHAT_XXX bit is used when raising
4175 * exceptions.
4176 */
4177static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
4178{
4179 /*
4180 * Check the input and figure out which mapping entry to use.
4181 */
4182 Assert(cbMem <= 32 || cbMem == 512);
4183 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
4184
4185 unsigned iMemMap = pIemCpu->iNextMapping;
4186 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
4187 {
4188 iMemMap = iemMemMapFindFree(pIemCpu);
4189 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
4190 }
4191
4192 /*
4193 * Map the memory, checking that we can actually access it. If something
4194 * slightly complicated happens, fall back on bounce buffering.
4195 */
4196 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
4197 if (rcStrict != VINF_SUCCESS)
4198 return rcStrict;
4199
4200 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
4201 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
4202
4203 RTGCPHYS GCPhysFirst;
4204 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
4205 if (rcStrict != VINF_SUCCESS)
4206 return rcStrict;
4207
4208 void *pvMem;
4209 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
4210 if (rcStrict != VINF_SUCCESS)
4211 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
4212
4213 /*
4214 * Fill in the mapping table entry.
4215 */
4216 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
4217 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
4218 pIemCpu->iNextMapping = iMemMap + 1;
4219 pIemCpu->cActiveMappings++;
4220
4221 *ppvMem = pvMem;
4222 return VINF_SUCCESS;
4223}
4224
4225
4226/**
4227 * Commits the guest memory if bounce buffered and unmaps it.
4228 *
4229 * @returns Strict VBox status code.
4230 * @param pIemCpu The IEM per CPU data.
4231 * @param pvMem The mapping.
4232 * @param fAccess The kind of access.
4233 */
4234static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4235{
4236 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
4237 AssertReturn(iMemMap >= 0, iMemMap);
4238
4239 /*
4240 * If it's bounce buffered, we need to write back the buffer.
4241 */
4242 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4243 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4244 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
4245
4246 /* Free the entry. */
4247 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4248 Assert(pIemCpu->cActiveMappings != 0);
4249 pIemCpu->cActiveMappings--;
4250 return VINF_SUCCESS;
4251}
4252
4253
4254/**
4255 * Fetches a data byte.
4256 *
4257 * @returns Strict VBox status code.
4258 * @param pIemCpu The IEM per CPU data.
4259 * @param pu8Dst Where to return the byte.
4260 * @param iSegReg The index of the segment register to use for
4261 * this access. The base and limits are checked.
4262 * @param GCPtrMem The address of the guest memory.
4263 */
4264static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4265{
4266 /* The lazy approach for now... */
4267 uint8_t const *pu8Src;
4268 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4269 if (rc == VINF_SUCCESS)
4270 {
4271 *pu8Dst = *pu8Src;
4272 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4273 }
4274 return rc;
4275}
4276
4277
4278/**
4279 * Fetches a data word.
4280 *
4281 * @returns Strict VBox status code.
4282 * @param pIemCpu The IEM per CPU data.
4283 * @param pu16Dst Where to return the word.
4284 * @param iSegReg The index of the segment register to use for
4285 * this access. The base and limits are checked.
4286 * @param GCPtrMem The address of the guest memory.
4287 */
4288static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4289{
4290 /* The lazy approach for now... */
4291 uint16_t const *pu16Src;
4292 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4293 if (rc == VINF_SUCCESS)
4294 {
4295 *pu16Dst = *pu16Src;
4296 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
4297 }
4298 return rc;
4299}
4300
4301
4302/**
4303 * Fetches a data dword.
4304 *
4305 * @returns Strict VBox status code.
4306 * @param pIemCpu The IEM per CPU data.
4307 * @param pu32Dst Where to return the dword.
4308 * @param iSegReg The index of the segment register to use for
4309 * this access. The base and limits are checked.
4310 * @param GCPtrMem The address of the guest memory.
4311 */
4312static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4313{
4314 /* The lazy approach for now... */
4315 uint32_t const *pu32Src;
4316 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4317 if (rc == VINF_SUCCESS)
4318 {
4319 *pu32Dst = *pu32Src;
4320 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
4321 }
4322 return rc;
4323}
4324
4325
4326#ifdef SOME_UNUSED_FUNCTION
4327/**
4328 * Fetches a data dword and sign extends it to a qword.
4329 *
4330 * @returns Strict VBox status code.
4331 * @param pIemCpu The IEM per CPU data.
4332 * @param pu64Dst Where to return the sign extended value.
4333 * @param iSegReg The index of the segment register to use for
4334 * this access. The base and limits are checked.
4335 * @param GCPtrMem The address of the guest memory.
4336 */
4337static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4338{
4339 /* The lazy approach for now... */
4340 int32_t const *pi32Src;
4341 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4342 if (rc == VINF_SUCCESS)
4343 {
4344 *pu64Dst = *pi32Src;
4345 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
4346 }
4347#ifdef __GNUC__ /* warning: GCC may be a royal pain */
4348 else
4349 *pu64Dst = 0;
4350#endif
4351 return rc;
4352}
4353#endif
4354
4355
4356/**
4357 * Fetches a data qword.
4358 *
4359 * @returns Strict VBox status code.
4360 * @param pIemCpu The IEM per CPU data.
4361 * @param pu64Dst Where to return the qword.
4362 * @param iSegReg The index of the segment register to use for
4363 * this access. The base and limits are checked.
4364 * @param GCPtrMem The address of the guest memory.
4365 */
4366static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4367{
4368 /* The lazy approach for now... */
4369 uint64_t const *pu64Src;
4370 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4371 if (rc == VINF_SUCCESS)
4372 {
4373 *pu64Dst = *pu64Src;
4374 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
4375 }
4376 return rc;
4377}
4378
4379
4380/**
4381 * Fetches a descriptor register (lgdt, lidt).
4382 *
4383 * @returns Strict VBox status code.
4384 * @param pIemCpu The IEM per CPU data.
4385 * @param pcbLimit Where to return the limit.
4386 * @param pGCPTrBase Where to return the base.
4387 * @param iSegReg The index of the segment register to use for
4388 * this access. The base and limits are checked.
4389 * @param GCPtrMem The address of the guest memory.
4390 * @param enmOpSize The effective operand size.
4391 */
4392static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
4393 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
4394{
4395 uint8_t const *pu8Src;
4396 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
4397 (void **)&pu8Src,
4398 enmOpSize == IEMMODE_64BIT
4399 ? 2 + 8
4400 : enmOpSize == IEMMODE_32BIT
4401 ? 2 + 4
4402 : 2 + 3,
4403 iSegReg,
4404 GCPtrMem,
4405 IEM_ACCESS_DATA_R);
4406 if (rcStrict == VINF_SUCCESS)
4407 {
4408 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
4409 switch (enmOpSize)
4410 {
4411 case IEMMODE_16BIT:
4412 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
4413 break;
4414 case IEMMODE_32BIT:
4415 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
4416 break;
4417 case IEMMODE_64BIT:
4418 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
4419 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
4420 break;
4421
4422 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4423 }
4424 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4425 }
4426 return rcStrict;
4427}
4428
4429
4430
4431/**
4432 * Stores a data byte.
4433 *
4434 * @returns Strict VBox status code.
4435 * @param pIemCpu The IEM per CPU data.
4436 * @param iSegReg The index of the segment register to use for
4437 * this access. The base and limits are checked.
4438 * @param GCPtrMem The address of the guest memory.
4439 * @param u8Value The value to store.
4440 */
4441static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
4442{
4443 /* The lazy approach for now... */
4444 uint8_t *pu8Dst;
4445 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4446 if (rc == VINF_SUCCESS)
4447 {
4448 *pu8Dst = u8Value;
4449 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
4450 }
4451 return rc;
4452}
4453
4454
4455/**
4456 * Stores a data word.
4457 *
4458 * @returns Strict VBox status code.
4459 * @param pIemCpu The IEM per CPU data.
4460 * @param iSegReg The index of the segment register to use for
4461 * this access. The base and limits are checked.
4462 * @param GCPtrMem The address of the guest memory.
4463 * @param u16Value The value to store.
4464 */
4465static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
4466{
4467 /* The lazy approach for now... */
4468 uint16_t *pu16Dst;
4469 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4470 if (rc == VINF_SUCCESS)
4471 {
4472 *pu16Dst = u16Value;
4473 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
4474 }
4475 return rc;
4476}
4477
4478
4479/**
4480 * Stores a data dword.
4481 *
4482 * @returns Strict VBox status code.
4483 * @param pIemCpu The IEM per CPU data.
4484 * @param iSegReg The index of the segment register to use for
4485 * this access. The base and limits are checked.
4486 * @param GCPtrMem The address of the guest memory.
4487 * @param u32Value The value to store.
4488 */
4489static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
4490{
4491 /* The lazy approach for now... */
4492 uint32_t *pu32Dst;
4493 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4494 if (rc == VINF_SUCCESS)
4495 {
4496 *pu32Dst = u32Value;
4497 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
4498 }
4499 return rc;
4500}
4501
4502
4503/**
4504 * Stores a data qword.
4505 *
4506 * @returns Strict VBox status code.
4507 * @param pIemCpu The IEM per CPU data.
4508 * @param iSegReg The index of the segment register to use for
4509 * this access. The base and limits are checked.
4510 * @param GCPtrMem The address of the guest memory.
4511 * @param u64Value The value to store.
4512 */
4513static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
4514{
4515 /* The lazy approach for now... */
4516 uint64_t *pu64Dst;
4517 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4518 if (rc == VINF_SUCCESS)
4519 {
4520 *pu64Dst = u64Value;
4521 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
4522 }
4523 return rc;
4524}
4525
4526
4527/**
4528 * Pushes a word onto the stack.
4529 *
4530 * @returns Strict VBox status code.
4531 * @param pIemCpu The IEM per CPU data.
4532 * @param u16Value The value to push.
4533 */
4534static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
4535{
4536 /* Increment the stack pointer. */
4537 uint64_t uNewRsp;
4538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4539 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
4540
4541 /* Write the word the lazy way. */
4542 uint16_t *pu16Dst;
4543 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4544 if (rc == VINF_SUCCESS)
4545 {
4546 *pu16Dst = u16Value;
4547 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4548 }
4549
4550 /* Commit the new RSP value unless we an access handler made trouble. */
4551 if (rc == VINF_SUCCESS)
4552 pCtx->rsp = uNewRsp;
4553
4554 return rc;
4555}
4556
4557
4558/**
4559 * Pushes a dword onto the stack.
4560 *
4561 * @returns Strict VBox status code.
4562 * @param pIemCpu The IEM per CPU data.
4563 * @param u32Value The value to push.
4564 */
4565static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
4566{
4567 /* Increment the stack pointer. */
4568 uint64_t uNewRsp;
4569 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4570 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
4571
4572 /* Write the word the lazy way. */
4573 uint32_t *pu32Dst;
4574 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4575 if (rc == VINF_SUCCESS)
4576 {
4577 *pu32Dst = u32Value;
4578 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4579 }
4580
4581 /* Commit the new RSP value unless we an access handler made trouble. */
4582 if (rc == VINF_SUCCESS)
4583 pCtx->rsp = uNewRsp;
4584
4585 return rc;
4586}
4587
4588
4589/**
4590 * Pushes a qword onto the stack.
4591 *
4592 * @returns Strict VBox status code.
4593 * @param pIemCpu The IEM per CPU data.
4594 * @param u64Value The value to push.
4595 */
4596static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
4597{
4598 /* Increment the stack pointer. */
4599 uint64_t uNewRsp;
4600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4601 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
4602
4603 /* Write the word the lazy way. */
4604 uint64_t *pu64Dst;
4605 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4606 if (rc == VINF_SUCCESS)
4607 {
4608 *pu64Dst = u64Value;
4609 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4610 }
4611
4612 /* Commit the new RSP value unless we an access handler made trouble. */
4613 if (rc == VINF_SUCCESS)
4614 pCtx->rsp = uNewRsp;
4615
4616 return rc;
4617}
4618
4619
4620/**
4621 * Pops a word from the stack.
4622 *
4623 * @returns Strict VBox status code.
4624 * @param pIemCpu The IEM per CPU data.
4625 * @param pu16Value Where to store the popped value.
4626 */
4627static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
4628{
4629 /* Increment the stack pointer. */
4630 uint64_t uNewRsp;
4631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4632 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
4633
4634 /* Write the word the lazy way. */
4635 uint16_t const *pu16Src;
4636 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4637 if (rc == VINF_SUCCESS)
4638 {
4639 *pu16Value = *pu16Src;
4640 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4641
4642 /* Commit the new RSP value. */
4643 if (rc == VINF_SUCCESS)
4644 pCtx->rsp = uNewRsp;
4645 }
4646
4647 return rc;
4648}
4649
4650
4651/**
4652 * Pops a dword from the stack.
4653 *
4654 * @returns Strict VBox status code.
4655 * @param pIemCpu The IEM per CPU data.
4656 * @param pu32Value Where to store the popped value.
4657 */
4658static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
4659{
4660 /* Increment the stack pointer. */
4661 uint64_t uNewRsp;
4662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4663 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
4664
4665 /* Write the word the lazy way. */
4666 uint32_t const *pu32Src;
4667 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4668 if (rc == VINF_SUCCESS)
4669 {
4670 *pu32Value = *pu32Src;
4671 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4672
4673 /* Commit the new RSP value. */
4674 if (rc == VINF_SUCCESS)
4675 pCtx->rsp = uNewRsp;
4676 }
4677
4678 return rc;
4679}
4680
4681
4682/**
4683 * Pops a qword from the stack.
4684 *
4685 * @returns Strict VBox status code.
4686 * @param pIemCpu The IEM per CPU data.
4687 * @param pu64Value Where to store the popped value.
4688 */
4689static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
4690{
4691 /* Increment the stack pointer. */
4692 uint64_t uNewRsp;
4693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4694 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
4695
4696 /* Write the word the lazy way. */
4697 uint64_t const *pu64Src;
4698 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4699 if (rc == VINF_SUCCESS)
4700 {
4701 *pu64Value = *pu64Src;
4702 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4703
4704 /* Commit the new RSP value. */
4705 if (rc == VINF_SUCCESS)
4706 pCtx->rsp = uNewRsp;
4707 }
4708
4709 return rc;
4710}
4711
4712
4713/**
4714 * Pushes a word onto the stack, using a temporary stack pointer.
4715 *
4716 * @returns Strict VBox status code.
4717 * @param pIemCpu The IEM per CPU data.
4718 * @param u16Value The value to push.
4719 * @param pTmpRsp Pointer to the temporary stack pointer.
4720 */
4721static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
4722{
4723 /* Increment the stack pointer. */
4724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4725 RTUINT64U NewRsp = *pTmpRsp;
4726 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
4727
4728 /* Write the word the lazy way. */
4729 uint16_t *pu16Dst;
4730 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4731 if (rc == VINF_SUCCESS)
4732 {
4733 *pu16Dst = u16Value;
4734 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4735 }
4736
4737 /* Commit the new RSP value unless we an access handler made trouble. */
4738 if (rc == VINF_SUCCESS)
4739 *pTmpRsp = NewRsp;
4740
4741 return rc;
4742}
4743
4744
4745/**
4746 * Pushes a dword onto the stack, using a temporary stack pointer.
4747 *
4748 * @returns Strict VBox status code.
4749 * @param pIemCpu The IEM per CPU data.
4750 * @param u32Value The value to push.
4751 * @param pTmpRsp Pointer to the temporary stack pointer.
4752 */
4753static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
4754{
4755 /* Increment the stack pointer. */
4756 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4757 RTUINT64U NewRsp = *pTmpRsp;
4758 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
4759
4760 /* Write the word the lazy way. */
4761 uint32_t *pu32Dst;
4762 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4763 if (rc == VINF_SUCCESS)
4764 {
4765 *pu32Dst = u32Value;
4766 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4767 }
4768
4769 /* Commit the new RSP value unless we an access handler made trouble. */
4770 if (rc == VINF_SUCCESS)
4771 *pTmpRsp = NewRsp;
4772
4773 return rc;
4774}
4775
4776
4777#ifdef SOME_UNUSED_FUNCTION
4778/**
4779 * Pushes a dword onto the stack, using a temporary stack pointer.
4780 *
4781 * @returns Strict VBox status code.
4782 * @param pIemCpu The IEM per CPU data.
4783 * @param u64Value The value to push.
4784 * @param pTmpRsp Pointer to the temporary stack pointer.
4785 */
4786static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
4787{
4788 /* Increment the stack pointer. */
4789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4790 RTUINT64U NewRsp = *pTmpRsp;
4791 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
4792
4793 /* Write the word the lazy way. */
4794 uint64_t *pu64Dst;
4795 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4796 if (rc == VINF_SUCCESS)
4797 {
4798 *pu64Dst = u64Value;
4799 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4800 }
4801
4802 /* Commit the new RSP value unless we an access handler made trouble. */
4803 if (rc == VINF_SUCCESS)
4804 *pTmpRsp = NewRsp;
4805
4806 return rc;
4807}
4808#endif
4809
4810
4811/**
4812 * Pops a word from the stack, using a temporary stack pointer.
4813 *
4814 * @returns Strict VBox status code.
4815 * @param pIemCpu The IEM per CPU data.
4816 * @param pu16Value Where to store the popped value.
4817 * @param pTmpRsp Pointer to the temporary stack pointer.
4818 */
4819static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
4820{
4821 /* Increment the stack pointer. */
4822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4823 RTUINT64U NewRsp = *pTmpRsp;
4824 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
4825
4826 /* Write the word the lazy way. */
4827 uint16_t const *pu16Src;
4828 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4829 if (rc == VINF_SUCCESS)
4830 {
4831 *pu16Value = *pu16Src;
4832 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4833
4834 /* Commit the new RSP value. */
4835 if (rc == VINF_SUCCESS)
4836 *pTmpRsp = NewRsp;
4837 }
4838
4839 return rc;
4840}
4841
4842
4843/**
4844 * Pops a dword from the stack, using a temporary stack pointer.
4845 *
4846 * @returns Strict VBox status code.
4847 * @param pIemCpu The IEM per CPU data.
4848 * @param pu32Value Where to store the popped value.
4849 * @param pTmpRsp Pointer to the temporary stack pointer.
4850 */
4851static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
4852{
4853 /* Increment the stack pointer. */
4854 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4855 RTUINT64U NewRsp = *pTmpRsp;
4856 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
4857
4858 /* Write the word the lazy way. */
4859 uint32_t const *pu32Src;
4860 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4861 if (rc == VINF_SUCCESS)
4862 {
4863 *pu32Value = *pu32Src;
4864 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4865
4866 /* Commit the new RSP value. */
4867 if (rc == VINF_SUCCESS)
4868 *pTmpRsp = NewRsp;
4869 }
4870
4871 return rc;
4872}
4873
4874
4875/**
4876 * Pops a qword from the stack, using a temporary stack pointer.
4877 *
4878 * @returns Strict VBox status code.
4879 * @param pIemCpu The IEM per CPU data.
4880 * @param pu64Value Where to store the popped value.
4881 * @param pTmpRsp Pointer to the temporary stack pointer.
4882 */
4883static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
4884{
4885 /* Increment the stack pointer. */
4886 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4887 RTUINT64U NewRsp = *pTmpRsp;
4888 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4889
4890 /* Write the word the lazy way. */
4891 uint64_t const *pu64Src;
4892 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4893 if (rcStrict == VINF_SUCCESS)
4894 {
4895 *pu64Value = *pu64Src;
4896 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4897
4898 /* Commit the new RSP value. */
4899 if (rcStrict == VINF_SUCCESS)
4900 *pTmpRsp = NewRsp;
4901 }
4902
4903 return rcStrict;
4904}
4905
4906
4907/**
4908 * Begin a special stack push (used by interrupt, exceptions and such).
4909 *
4910 * This will raise #SS or #PF if appropriate.
4911 *
4912 * @returns Strict VBox status code.
4913 * @param pIemCpu The IEM per CPU data.
4914 * @param cbMem The number of bytes to push onto the stack.
4915 * @param ppvMem Where to return the pointer to the stack memory.
4916 * As with the other memory functions this could be
4917 * direct access or bounce buffered access, so
4918 * don't commit register until the commit call
4919 * succeeds.
4920 * @param puNewRsp Where to return the new RSP value. This must be
4921 * passed unchanged to
4922 * iemMemStackPushCommitSpecial().
4923 */
4924static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
4925{
4926 Assert(cbMem < UINT8_MAX);
4927 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4928 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
4929 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4930}
4931
4932
4933/**
4934 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
4935 *
4936 * This will update the rSP.
4937 *
4938 * @returns Strict VBox status code.
4939 * @param pIemCpu The IEM per CPU data.
4940 * @param pvMem The pointer returned by
4941 * iemMemStackPushBeginSpecial().
4942 * @param uNewRsp The new RSP value returned by
4943 * iemMemStackPushBeginSpecial().
4944 */
4945static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
4946{
4947 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
4948 if (rcStrict == VINF_SUCCESS)
4949 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
4950 return rcStrict;
4951}
4952
4953
4954/**
4955 * Begin a special stack pop (used by iret, retf and such).
4956 *
4957 * This will raise \#SS or \#PF if appropriate.
4958 *
4959 * @returns Strict VBox status code.
4960 * @param pIemCpu The IEM per CPU data.
4961 * @param cbMem The number of bytes to push onto the stack.
4962 * @param ppvMem Where to return the pointer to the stack memory.
4963 * @param puNewRsp Where to return the new RSP value. This must be
4964 * passed unchanged to
4965 * iemMemStackPopCommitSpecial() or applied
4966 * manually if iemMemStackPopDoneSpecial() is used.
4967 */
4968static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4969{
4970 Assert(cbMem < UINT8_MAX);
4971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4972 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
4973 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4974}
4975
4976
4977/**
4978 * Continue a special stack pop (used by iret).
4979 *
4980 * This will raise \#SS or \#PF if appropriate.
4981 *
4982 * @returns Strict VBox status code.
4983 * @param pIemCpu The IEM per CPU data.
4984 * @param cbMem The number of bytes to push onto the stack.
4985 * @param ppvMem Where to return the pointer to the stack memory.
4986 * @param puNewRsp Where to return the new RSP value. This must be
4987 * passed unchanged to
4988 * iemMemStackPopCommitSpecial() or applied
4989 * manually if iemMemStackPopDoneSpecial() is used.
4990 */
4991static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4992{
4993 Assert(cbMem < UINT8_MAX);
4994 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4995 RTUINT64U NewRsp;
4996 NewRsp.u = *puNewRsp;
4997 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4998 *puNewRsp = NewRsp.u;
4999 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5000}
5001
5002
5003/**
5004 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
5005 *
5006 * This will update the rSP.
5007 *
5008 * @returns Strict VBox status code.
5009 * @param pIemCpu The IEM per CPU data.
5010 * @param pvMem The pointer returned by
5011 * iemMemStackPopBeginSpecial().
5012 * @param uNewRsp The new RSP value returned by
5013 * iemMemStackPopBeginSpecial().
5014 */
5015static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
5016{
5017 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5018 if (rcStrict == VINF_SUCCESS)
5019 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5020 return rcStrict;
5021}
5022
5023
5024/**
5025 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
5026 * iemMemStackPopContinueSpecial).
5027 *
5028 * The caller will manually commit the rSP.
5029 *
5030 * @returns Strict VBox status code.
5031 * @param pIemCpu The IEM per CPU data.
5032 * @param pvMem The pointer returned by
5033 * iemMemStackPopBeginSpecial() or
5034 * iemMemStackPopContinueSpecial().
5035 */
5036static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
5037{
5038 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5039}
5040
5041
5042/**
5043 * Fetches a system table dword.
5044 *
5045 * @returns Strict VBox status code.
5046 * @param pIemCpu The IEM per CPU data.
5047 * @param pu32Dst Where to return the dword.
5048 * @param iSegReg The index of the segment register to use for
5049 * this access. The base and limits are checked.
5050 * @param GCPtrMem The address of the guest memory.
5051 */
5052static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5053{
5054 /* The lazy approach for now... */
5055 uint32_t const *pu32Src;
5056 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5057 if (rc == VINF_SUCCESS)
5058 {
5059 *pu32Dst = *pu32Src;
5060 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
5061 }
5062 return rc;
5063}
5064
5065
5066/**
5067 * Fetches a system table qword.
5068 *
5069 * @returns Strict VBox status code.
5070 * @param pIemCpu The IEM per CPU data.
5071 * @param pu64Dst Where to return the qword.
5072 * @param iSegReg The index of the segment register to use for
5073 * this access. The base and limits are checked.
5074 * @param GCPtrMem The address of the guest memory.
5075 */
5076static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5077{
5078 /* The lazy approach for now... */
5079 uint64_t const *pu64Src;
5080 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5081 if (rc == VINF_SUCCESS)
5082 {
5083 *pu64Dst = *pu64Src;
5084 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
5085 }
5086 return rc;
5087}
5088
5089
5090/**
5091 * Fetches a descriptor table entry.
5092 *
5093 * @returns Strict VBox status code.
5094 * @param pIemCpu The IEM per CPU.
5095 * @param pDesc Where to return the descriptor table entry.
5096 * @param uSel The selector which table entry to fetch.
5097 */
5098static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
5099{
5100 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5101
5102 /** @todo did the 286 require all 8 bytes to be accessible? */
5103 /*
5104 * Get the selector table base and check bounds.
5105 */
5106 RTGCPTR GCPtrBase;
5107 if (uSel & X86_SEL_LDT)
5108 {
5109 if ( !pCtx->ldtrHid.Attr.n.u1Present
5110 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
5111 {
5112 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
5113 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
5114 /** @todo is this the right exception? */
5115 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5116 }
5117
5118 Assert(pCtx->ldtrHid.Attr.n.u1Present);
5119 GCPtrBase = pCtx->ldtrHid.u64Base;
5120 }
5121 else
5122 {
5123 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
5124 {
5125 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
5126 /** @todo is this the right exception? */
5127 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5128 }
5129 GCPtrBase = pCtx->gdtr.pGdt;
5130 }
5131
5132 /*
5133 * Read the legacy descriptor and maybe the long mode extensions if
5134 * required.
5135 */
5136 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5137 if (rcStrict == VINF_SUCCESS)
5138 {
5139 if ( !IEM_IS_LONG_MODE(pIemCpu)
5140 || pDesc->Legacy.Gen.u1DescType)
5141 pDesc->Long.au64[1] = 0;
5142 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
5143 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5144 else
5145 {
5146 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
5147 /** @todo is this the right exception? */
5148 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5149 }
5150 }
5151 return rcStrict;
5152}
5153
5154
5155/**
5156 * Marks the selector descriptor as accessed (only non-system descriptors).
5157 *
5158 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
5159 * will therefore skip the limit checks.
5160 *
5161 * @returns Strict VBox status code.
5162 * @param pIemCpu The IEM per CPU.
5163 * @param uSel The selector.
5164 */
5165static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
5166{
5167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5168
5169 /*
5170 * Get the selector table base and calculate the entry address.
5171 */
5172 RTGCPTR GCPtr = uSel & X86_SEL_LDT
5173 ? pCtx->ldtrHid.u64Base
5174 : pCtx->gdtr.pGdt;
5175 GCPtr += uSel & X86_SEL_MASK;
5176
5177 /*
5178 * ASMAtomicBitSet will assert if the address is misaligned, so do some
5179 * ugly stuff to avoid this. This will make sure it's an atomic access
5180 * as well more or less remove any question about 8-bit or 32-bit accesss.
5181 */
5182 VBOXSTRICTRC rcStrict;
5183 uint32_t volatile *pu32;
5184 if ((GCPtr & 3) == 0)
5185 {
5186 /* The normal case, map the 32-bit bits around the accessed bit (40). */
5187 GCPtr += 2 + 2;
5188 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5189 if (rcStrict != VINF_SUCCESS)
5190 return rcStrict;
5191 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
5192 }
5193 else
5194 {
5195 /* The misaligned GDT/LDT case, map the whole thing. */
5196 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5197 if (rcStrict != VINF_SUCCESS)
5198 return rcStrict;
5199 switch ((uintptr_t)pu32 & 3)
5200 {
5201 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
5202 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
5203 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
5204 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
5205 }
5206 }
5207
5208 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
5209}
5210
5211/** @} */
5212
5213
5214/*
5215 * Include the C/C++ implementation of instruction.
5216 */
5217#include "IEMAllCImpl.cpp.h"
5218
5219
5220
5221/** @name "Microcode" macros.
5222 *
5223 * The idea is that we should be able to use the same code to interpret
5224 * instructions as well as recompiler instructions. Thus this obfuscation.
5225 *
5226 * @{
5227 */
5228#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
5229#define IEM_MC_END() }
5230#define IEM_MC_PAUSE() do {} while (0)
5231#define IEM_MC_CONTINUE() do {} while (0)
5232
5233/** Internal macro. */
5234#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5235 do \
5236 { \
5237 VBOXSTRICTRC rcStrict2 = a_Expr; \
5238 if (rcStrict2 != VINF_SUCCESS) \
5239 return rcStrict2; \
5240 } while (0)
5241
5242#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5243#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5244#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5245#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5246#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5247#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5248#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5249
5250#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5251#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
5252 do { \
5253 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
5254 return iemRaiseDeviceNotAvailable(pIemCpu); \
5255 } while (0)
5256#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
5257 do { \
5258 if (iemFRegFetchFsw(pIemCpu) & X86_FSW_ES) \
5259 return iemRaiseMathFault(pIemCpu); \
5260 } while (0)
5261#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
5262 do { \
5263 if (pIemCpu->uCpl != 0) \
5264 return iemRaiseGeneralProtectionFault0(pIemCpu); \
5265 } while (0)
5266
5267
5268#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5269#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5270#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5271#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5272#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5273#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
5274#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5275 uint32_t a_Name; \
5276 uint32_t *a_pName = &a_Name
5277#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
5278 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
5279
5280#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5281#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
5282
5283#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5284#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5285#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5286#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5287#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5288#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5289#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5290#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5291#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5292#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5293#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5294#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5295#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5296#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5297#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
5298#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5299#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
5300#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5301#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5302#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5303#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5304#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5305#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
5306#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5307#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5308#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = iemFRegFetchFsw(pIemCpu)
5309
5310#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
5311#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
5312#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
5313#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
5314#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
5315#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
5316#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
5317#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
5318#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
5319#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
5320
5321#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
5322#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
5323/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
5324 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
5325#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
5326#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
5327#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5328#define IEM_MC_REF_FPUREG_R80(a_pr80Dst, a_iSt) (a_pr80Dst) = &(pIemCpu)->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80
5329
5330#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
5331#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
5332#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
5333 do { \
5334 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5335 *pu32Reg += (a_u32Value); \
5336 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5337 } while (0)
5338#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
5339
5340#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
5341#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
5342#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
5343 do { \
5344 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5345 *pu32Reg -= (a_u32Value); \
5346 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5347 } while (0)
5348#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
5349
5350#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
5351#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
5352#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
5353#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
5354#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
5355#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
5356#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
5357
5358#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
5359#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
5360#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5361#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
5362
5363#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
5364#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
5365#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
5366
5367#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
5368#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5369
5370#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
5371#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
5372#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
5373
5374#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
5375#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
5376#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
5377
5378#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5379
5380#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5381
5382#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
5383#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
5384#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
5385 do { \
5386 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5387 *pu32Reg &= (a_u32Value); \
5388 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5389 } while (0)
5390#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
5391
5392#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
5393#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
5394#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
5395 do { \
5396 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5397 *pu32Reg |= (a_u32Value); \
5398 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5399 } while (0)
5400#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
5401
5402
5403#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
5404#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
5405#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
5406
5407
5408
5409#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
5410 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
5411#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
5412 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
5413#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
5414 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
5415
5416#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5417 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
5418#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5419 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5420
5421#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5422 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
5423#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5424 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5425
5426#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5427 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5428
5429#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5430 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5431#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5432 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5433
5434#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
5435 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
5436#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
5437 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
5438#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
5439 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r64Dst), (a_iSeg), (a_GCPtrMem)))
5440
5441
5442#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5443 do { \
5444 uint8_t u8Tmp; \
5445 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5446 (a_u16Dst) = u8Tmp; \
5447 } while (0)
5448#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5449 do { \
5450 uint8_t u8Tmp; \
5451 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5452 (a_u32Dst) = u8Tmp; \
5453 } while (0)
5454#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5455 do { \
5456 uint8_t u8Tmp; \
5457 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5458 (a_u64Dst) = u8Tmp; \
5459 } while (0)
5460#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5461 do { \
5462 uint16_t u16Tmp; \
5463 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5464 (a_u32Dst) = u16Tmp; \
5465 } while (0)
5466#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5467 do { \
5468 uint16_t u16Tmp; \
5469 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5470 (a_u64Dst) = u16Tmp; \
5471 } while (0)
5472#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5473 do { \
5474 uint32_t u32Tmp; \
5475 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5476 (a_u64Dst) = u32Tmp; \
5477 } while (0)
5478
5479#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5480 do { \
5481 uint8_t u8Tmp; \
5482 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5483 (a_u16Dst) = (int8_t)u8Tmp; \
5484 } while (0)
5485#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5486 do { \
5487 uint8_t u8Tmp; \
5488 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5489 (a_u32Dst) = (int8_t)u8Tmp; \
5490 } while (0)
5491#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5492 do { \
5493 uint8_t u8Tmp; \
5494 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5495 (a_u64Dst) = (int8_t)u8Tmp; \
5496 } while (0)
5497#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5498 do { \
5499 uint16_t u16Tmp; \
5500 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5501 (a_u32Dst) = (int16_t)u16Tmp; \
5502 } while (0)
5503#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5504 do { \
5505 uint16_t u16Tmp; \
5506 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5507 (a_u64Dst) = (int16_t)u16Tmp; \
5508 } while (0)
5509#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5510 do { \
5511 uint32_t u32Tmp; \
5512 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5513 (a_u64Dst) = (int32_t)u32Tmp; \
5514 } while (0)
5515
5516#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
5517 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
5518#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
5519 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
5520#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
5521 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
5522#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
5523 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
5524
5525#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
5526 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
5527
5528#define IEM_MC_PUSH_U16(a_u16Value) \
5529 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
5530#define IEM_MC_PUSH_U32(a_u32Value) \
5531 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
5532#define IEM_MC_PUSH_U64(a_u64Value) \
5533 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
5534
5535#define IEM_MC_POP_U16(a_pu16Value) \
5536 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
5537#define IEM_MC_POP_U32(a_pu32Value) \
5538 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
5539#define IEM_MC_POP_U64(a_pu64Value) \
5540 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
5541
5542/** Maps guest memory for direct or bounce buffered access.
5543 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5544 * @remarks May return.
5545 */
5546#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
5547 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5548
5549/** Maps guest memory for direct or bounce buffered access.
5550 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5551 * @remarks May return.
5552 */
5553#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
5554 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5555
5556/** Commits the memory and unmaps the guest memory.
5557 * @remarks May return.
5558 */
5559#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
5560 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
5561
5562/** Calculate efficient address from R/M. */
5563#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
5564 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
5565
5566#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
5567#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
5568#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
5569#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
5570#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
5571
5572/**
5573 * Defers the rest of the instruction emulation to a C implementation routine
5574 * and returns, only taking the standard parameters.
5575 *
5576 * @param a_pfnCImpl The pointer to the C routine.
5577 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5578 */
5579#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5580
5581/**
5582 * Defers the rest of instruction emulation to a C implementation routine and
5583 * returns, taking one argument in addition to the standard ones.
5584 *
5585 * @param a_pfnCImpl The pointer to the C routine.
5586 * @param a0 The argument.
5587 */
5588#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5589
5590/**
5591 * Defers the rest of the instruction emulation to a C implementation routine
5592 * and returns, taking two arguments in addition to the standard ones.
5593 *
5594 * @param a_pfnCImpl The pointer to the C routine.
5595 * @param a0 The first extra argument.
5596 * @param a1 The second extra argument.
5597 */
5598#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5599
5600/**
5601 * Defers the rest of the instruction emulation to a C implementation routine
5602 * and returns, taking two arguments in addition to the standard ones.
5603 *
5604 * @param a_pfnCImpl The pointer to the C routine.
5605 * @param a0 The first extra argument.
5606 * @param a1 The second extra argument.
5607 * @param a2 The third extra argument.
5608 */
5609#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5610
5611/**
5612 * Defers the rest of the instruction emulation to a C implementation routine
5613 * and returns, taking two arguments in addition to the standard ones.
5614 *
5615 * @param a_pfnCImpl The pointer to the C routine.
5616 * @param a0 The first extra argument.
5617 * @param a1 The second extra argument.
5618 * @param a2 The third extra argument.
5619 * @param a3 The fourth extra argument.
5620 * @param a4 The fifth extra argument.
5621 */
5622#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
5623
5624/**
5625 * Defers the entire instruction emulation to a C implementation routine and
5626 * returns, only taking the standard parameters.
5627 *
5628 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5629 *
5630 * @param a_pfnCImpl The pointer to the C routine.
5631 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5632 */
5633#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5634
5635/**
5636 * Defers the entire instruction emulation to a C implementation routine and
5637 * returns, taking one argument in addition to the standard ones.
5638 *
5639 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5640 *
5641 * @param a_pfnCImpl The pointer to the C routine.
5642 * @param a0 The argument.
5643 */
5644#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5645
5646/**
5647 * Defers the entire instruction emulation to a C implementation routine and
5648 * returns, taking two arguments in addition to the standard ones.
5649 *
5650 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5651 *
5652 * @param a_pfnCImpl The pointer to the C routine.
5653 * @param a0 The first extra argument.
5654 * @param a1 The second extra argument.
5655 */
5656#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5657
5658/**
5659 * Defers the entire instruction emulation to a C implementation routine and
5660 * returns, taking three arguments in addition to the standard ones.
5661 *
5662 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5663 *
5664 * @param a_pfnCImpl The pointer to the C routine.
5665 * @param a0 The first extra argument.
5666 * @param a1 The second extra argument.
5667 * @param a2 The third extra argument.
5668 */
5669#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5670
5671/**
5672 * Calls a FPU assembly implementation taking two visible arguments.
5673 *
5674 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5675 *
5676 * @param a_pfnAImpl Pointer to the assembly FPU routine.
5677 * @param a0 The first extra argument.
5678 * @param a1 The second extra argument.
5679 */
5680#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
5681 do { \
5682 iemFpuPrepareUsage(pIemCpu); \
5683 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
5684 } while (0)
5685
5686/**
5687 * Calls a FPU assembly implementation taking three visible arguments.
5688 *
5689 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5690 *
5691 * @param a_pfnAImpl Pointer to the assembly FPU routine.
5692 * @param a0 The first extra argument.
5693 * @param a1 The second extra argument.
5694 * @param a2 The third extra argument.
5695 */
5696#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
5697 do { \
5698 iemFpuPrepareUsage(pIemCpu); \
5699 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
5700 } while (0)
5701
5702/** Pushes FPU result onto the stack. */
5703#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
5704 iemFpuPushResult(pIemCpu, &a_FpuData)
5705/** Pushes FPU result onto the stack and sets the FPUDP. */
5706#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
5707 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
5708
5709/** Stores FPU result in a stack register. */
5710#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
5711 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
5712/** Stores FPU result in a stack register and sets the FPUDP. */
5713#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
5714 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
5715
5716
5717#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
5718#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
5719#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
5720#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
5721#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
5722 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5723 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5724#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
5725 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5726 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5727#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
5728 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5729 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5730 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5731#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
5732 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5733 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5734 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5735#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
5736#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
5737#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
5738#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5739 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5740 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5741#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5742 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5743 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5744#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5745 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5746 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5747#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5748 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5749 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5750#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5751 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5752 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5753#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5754 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5755 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5756#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
5757#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
5758#define IEM_MC_ELSE() } else {
5759#define IEM_MC_ENDIF() } do {} while (0)
5760
5761/** @} */
5762
5763
5764/** @name Opcode Debug Helpers.
5765 * @{
5766 */
5767#ifdef DEBUG
5768# define IEMOP_MNEMONIC(a_szMnemonic) \
5769 Log2(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5770 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
5771# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
5772 Log2(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5773 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
5774#else
5775# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
5776# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
5777#endif
5778
5779/** @} */
5780
5781
5782/** @name Opcode Helpers.
5783 * @{
5784 */
5785
5786/** The instruction allows no lock prefixing (in this encoding), throw #UD if
5787 * lock prefixed. */
5788#define IEMOP_HLP_NO_LOCK_PREFIX() \
5789 do \
5790 { \
5791 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5792 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
5793 } while (0)
5794
5795/** The instruction is not available in 64-bit mode, throw #UD if we're in
5796 * 64-bit mode. */
5797#define IEMOP_HLP_NO_64BIT() \
5798 do \
5799 { \
5800 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5801 return IEMOP_RAISE_INVALID_OPCODE(); \
5802 } while (0)
5803
5804/** The instruction defaults to 64-bit operand size if 64-bit mode. */
5805#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
5806 do \
5807 { \
5808 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5809 iemRecalEffOpSize64Default(pIemCpu); \
5810 } while (0)
5811
5812
5813
5814/**
5815 * Calculates the effective address of a ModR/M memory operand.
5816 *
5817 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
5818 *
5819 * @return Strict VBox status code.
5820 * @param pIemCpu The IEM per CPU data.
5821 * @param bRm The ModRM byte.
5822 * @param pGCPtrEff Where to return the effective address.
5823 */
5824static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
5825{
5826 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
5827 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5828#define SET_SS_DEF() \
5829 do \
5830 { \
5831 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
5832 pIemCpu->iEffSeg = X86_SREG_SS; \
5833 } while (0)
5834
5835/** @todo Check the effective address size crap! */
5836 switch (pIemCpu->enmEffAddrMode)
5837 {
5838 case IEMMODE_16BIT:
5839 {
5840 uint16_t u16EffAddr;
5841
5842 /* Handle the disp16 form with no registers first. */
5843 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5844 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
5845 else
5846 {
5847 /* Get the displacment. */
5848 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5849 {
5850 case 0: u16EffAddr = 0; break;
5851 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
5852 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
5853 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5854 }
5855
5856 /* Add the base and index registers to the disp. */
5857 switch (bRm & X86_MODRM_RM_MASK)
5858 {
5859 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5860 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5861 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5862 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5863 case 4: u16EffAddr += pCtx->si; break;
5864 case 5: u16EffAddr += pCtx->di; break;
5865 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5866 case 7: u16EffAddr += pCtx->bx; break;
5867 }
5868 }
5869
5870 *pGCPtrEff = u16EffAddr;
5871 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5872 return VINF_SUCCESS;
5873 }
5874
5875 case IEMMODE_32BIT:
5876 {
5877 uint32_t u32EffAddr;
5878
5879 /* Handle the disp32 form with no registers first. */
5880 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5881 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
5882 else
5883 {
5884 /* Get the register (or SIB) value. */
5885 switch ((bRm & X86_MODRM_RM_MASK))
5886 {
5887 case 0: u32EffAddr = pCtx->eax; break;
5888 case 1: u32EffAddr = pCtx->ecx; break;
5889 case 2: u32EffAddr = pCtx->edx; break;
5890 case 3: u32EffAddr = pCtx->ebx; break;
5891 case 4: /* SIB */
5892 {
5893 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5894
5895 /* Get the index and scale it. */
5896 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
5897 {
5898 case 0: u32EffAddr = pCtx->eax; break;
5899 case 1: u32EffAddr = pCtx->ecx; break;
5900 case 2: u32EffAddr = pCtx->edx; break;
5901 case 3: u32EffAddr = pCtx->ebx; break;
5902 case 4: u32EffAddr = 0; /*none */ break;
5903 case 5: u32EffAddr = pCtx->ebp; break;
5904 case 6: u32EffAddr = pCtx->esi; break;
5905 case 7: u32EffAddr = pCtx->edi; break;
5906 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5907 }
5908 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5909
5910 /* add base */
5911 switch (bSib & X86_SIB_BASE_MASK)
5912 {
5913 case 0: u32EffAddr += pCtx->eax; break;
5914 case 1: u32EffAddr += pCtx->ecx; break;
5915 case 2: u32EffAddr += pCtx->edx; break;
5916 case 3: u32EffAddr += pCtx->ebx; break;
5917 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5918 case 5:
5919 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5920 {
5921 u32EffAddr += pCtx->ebp;
5922 SET_SS_DEF();
5923 }
5924 else
5925 {
5926 uint32_t u32Disp;
5927 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5928 u32EffAddr += u32Disp;
5929 }
5930 break;
5931 case 6: u32EffAddr += pCtx->esi; break;
5932 case 7: u32EffAddr += pCtx->edi; break;
5933 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5934 }
5935 break;
5936 }
5937 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5938 case 6: u32EffAddr = pCtx->esi; break;
5939 case 7: u32EffAddr = pCtx->edi; break;
5940 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5941 }
5942
5943 /* Get and add the displacement. */
5944 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5945 {
5946 case 0:
5947 break;
5948 case 1:
5949 {
5950 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
5951 u32EffAddr += i8Disp;
5952 break;
5953 }
5954 case 2:
5955 {
5956 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5957 u32EffAddr += u32Disp;
5958 break;
5959 }
5960 default:
5961 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5962 }
5963
5964 }
5965 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
5966 *pGCPtrEff = u32EffAddr;
5967 else
5968 {
5969 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
5970 *pGCPtrEff = u32EffAddr & UINT16_MAX;
5971 }
5972 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5973 return VINF_SUCCESS;
5974 }
5975
5976 case IEMMODE_64BIT:
5977 {
5978 uint64_t u64EffAddr;
5979
5980 /* Handle the rip+disp32 form with no registers first. */
5981 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5982 {
5983 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
5984 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
5985 }
5986 else
5987 {
5988 /* Get the register (or SIB) value. */
5989 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
5990 {
5991 case 0: u64EffAddr = pCtx->rax; break;
5992 case 1: u64EffAddr = pCtx->rcx; break;
5993 case 2: u64EffAddr = pCtx->rdx; break;
5994 case 3: u64EffAddr = pCtx->rbx; break;
5995 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
5996 case 6: u64EffAddr = pCtx->rsi; break;
5997 case 7: u64EffAddr = pCtx->rdi; break;
5998 case 8: u64EffAddr = pCtx->r8; break;
5999 case 9: u64EffAddr = pCtx->r9; break;
6000 case 10: u64EffAddr = pCtx->r10; break;
6001 case 11: u64EffAddr = pCtx->r11; break;
6002 case 13: u64EffAddr = pCtx->r13; break;
6003 case 14: u64EffAddr = pCtx->r14; break;
6004 case 15: u64EffAddr = pCtx->r15; break;
6005 /* SIB */
6006 case 4:
6007 case 12:
6008 {
6009 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
6010
6011 /* Get the index and scale it. */
6012 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
6013 {
6014 case 0: u64EffAddr = pCtx->rax; break;
6015 case 1: u64EffAddr = pCtx->rcx; break;
6016 case 2: u64EffAddr = pCtx->rdx; break;
6017 case 3: u64EffAddr = pCtx->rbx; break;
6018 case 4: u64EffAddr = 0; /*none */ break;
6019 case 5: u64EffAddr = pCtx->rbp; break;
6020 case 6: u64EffAddr = pCtx->rsi; break;
6021 case 7: u64EffAddr = pCtx->rdi; break;
6022 case 8: u64EffAddr = pCtx->r8; break;
6023 case 9: u64EffAddr = pCtx->r9; break;
6024 case 10: u64EffAddr = pCtx->r10; break;
6025 case 11: u64EffAddr = pCtx->r11; break;
6026 case 12: u64EffAddr = pCtx->r12; break;
6027 case 13: u64EffAddr = pCtx->r13; break;
6028 case 14: u64EffAddr = pCtx->r14; break;
6029 case 15: u64EffAddr = pCtx->r15; break;
6030 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6031 }
6032 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6033
6034 /* add base */
6035 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
6036 {
6037 case 0: u64EffAddr += pCtx->rax; break;
6038 case 1: u64EffAddr += pCtx->rcx; break;
6039 case 2: u64EffAddr += pCtx->rdx; break;
6040 case 3: u64EffAddr += pCtx->rbx; break;
6041 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
6042 case 6: u64EffAddr += pCtx->rsi; break;
6043 case 7: u64EffAddr += pCtx->rdi; break;
6044 case 8: u64EffAddr += pCtx->r8; break;
6045 case 9: u64EffAddr += pCtx->r9; break;
6046 case 10: u64EffAddr += pCtx->r10; break;
6047 case 11: u64EffAddr += pCtx->r11; break;
6048 case 14: u64EffAddr += pCtx->r14; break;
6049 case 15: u64EffAddr += pCtx->r15; break;
6050 /* complicated encodings */
6051 case 5:
6052 case 13:
6053 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6054 {
6055 if (!pIemCpu->uRexB)
6056 {
6057 u64EffAddr += pCtx->rbp;
6058 SET_SS_DEF();
6059 }
6060 else
6061 u64EffAddr += pCtx->r13;
6062 }
6063 else
6064 {
6065 uint32_t u32Disp;
6066 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6067 u64EffAddr += (int32_t)u32Disp;
6068 }
6069 break;
6070 }
6071 break;
6072 }
6073 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6074 }
6075
6076 /* Get and add the displacement. */
6077 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6078 {
6079 case 0:
6080 break;
6081 case 1:
6082 {
6083 int8_t i8Disp;
6084 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6085 u64EffAddr += i8Disp;
6086 break;
6087 }
6088 case 2:
6089 {
6090 uint32_t u32Disp;
6091 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6092 u64EffAddr += (int32_t)u32Disp;
6093 break;
6094 }
6095 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
6096 }
6097
6098 }
6099 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
6100 *pGCPtrEff = u64EffAddr;
6101 else
6102 *pGCPtrEff = u64EffAddr & UINT16_MAX;
6103 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6104 return VINF_SUCCESS;
6105 }
6106 }
6107
6108 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6109}
6110
6111/** @} */
6112
6113
6114
6115/*
6116 * Include the instructions
6117 */
6118#include "IEMAllInstructions.cpp.h"
6119
6120
6121
6122
6123#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6124
6125/**
6126 * Sets up execution verification mode.
6127 */
6128static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
6129{
6130 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6131 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
6132
6133 /*
6134 * Enable verification and/or logging.
6135 */
6136 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
6137 if ( pIemCpu->fNoRem
6138#if 0 /* auto enable on first paged protected mode interrupt */
6139 && pOrgCtx->eflags.Bits.u1IF
6140 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
6141 && TRPMHasTrap(pVCpu)
6142 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6143#endif
6144#if 0
6145 && pOrgCtx->cs == 0x10
6146 && ( pOrgCtx->rip == 0x90119e3e
6147 || pOrgCtx->rip == 0x901d9810
6148 )
6149#endif
6150#if 1 /* Auto enable DSL - FPU stuff. */
6151 && pOrgCtx->cs == 0x10
6152 && ( pOrgCtx->rip == 0xc02ec07f
6153 || pOrgCtx->rip == 0xc02ec082
6154 || pOrgCtx->rip == 0xc02ec0c9
6155 )
6156#endif
6157#if 0
6158 && pOrgCtx->rip == 0x9022bb3a
6159#endif
6160#if 0
6161 && 0
6162#endif
6163 )
6164 {
6165 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
6166 RTLogFlags(NULL, "enabled");
6167 pIemCpu->fNoRem = false;
6168 }
6169
6170 /*
6171 * Switch state.
6172 */
6173 if (IEM_VERIFICATION_ENABLED(pIemCpu))
6174 {
6175 static CPUMCTX s_DebugCtx; /* Ugly! */
6176
6177 s_DebugCtx = *pOrgCtx;
6178 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
6179 }
6180
6181 /*
6182 * See if there is an interrupt pending in TRPM and inject it if we can.
6183 */
6184 if ( pOrgCtx->eflags.Bits.u1IF
6185 && TRPMHasTrap(pVCpu)
6186 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6187 {
6188 uint8_t u8TrapNo;
6189 TRPMEVENT enmType;
6190 RTGCUINT uErrCode;
6191 RTGCPTR uCr2;
6192 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
6193 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
6194 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6195 TRPMResetTrap(pVCpu);
6196 }
6197
6198 /*
6199 * Reset the counters.
6200 */
6201 pIemCpu->cIOReads = 0;
6202 pIemCpu->cIOWrites = 0;
6203 pIemCpu->fUndefinedEFlags = 0;
6204
6205 if (IEM_VERIFICATION_ENABLED(pIemCpu))
6206 {
6207 /*
6208 * Free all verification records.
6209 */
6210 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
6211 pIemCpu->pIemEvtRecHead = NULL;
6212 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
6213 do
6214 {
6215 while (pEvtRec)
6216 {
6217 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
6218 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
6219 pIemCpu->pFreeEvtRec = pEvtRec;
6220 pEvtRec = pNext;
6221 }
6222 pEvtRec = pIemCpu->pOtherEvtRecHead;
6223 pIemCpu->pOtherEvtRecHead = NULL;
6224 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
6225 } while (pEvtRec);
6226 }
6227}
6228
6229
6230/**
6231 * Allocate an event record.
6232 * @returns Poitner to a record.
6233 */
6234static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
6235{
6236 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6237 return NULL;
6238
6239 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
6240 if (pEvtRec)
6241 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
6242 else
6243 {
6244 if (!pIemCpu->ppIemEvtRecNext)
6245 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
6246
6247 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
6248 if (!pEvtRec)
6249 return NULL;
6250 }
6251 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
6252 pEvtRec->pNext = NULL;
6253 return pEvtRec;
6254}
6255
6256
6257/**
6258 * IOMMMIORead notification.
6259 */
6260VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
6261{
6262 PVMCPU pVCpu = VMMGetCpu(pVM);
6263 if (!pVCpu)
6264 return;
6265 PIEMCPU pIemCpu = &pVCpu->iem.s;
6266 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6267 if (!pEvtRec)
6268 return;
6269 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6270 pEvtRec->u.RamRead.GCPhys = GCPhys;
6271 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
6272 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6273 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6274}
6275
6276
6277/**
6278 * IOMMMIOWrite notification.
6279 */
6280VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
6281{
6282 PVMCPU pVCpu = VMMGetCpu(pVM);
6283 if (!pVCpu)
6284 return;
6285 PIEMCPU pIemCpu = &pVCpu->iem.s;
6286 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6287 if (!pEvtRec)
6288 return;
6289 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6290 pEvtRec->u.RamWrite.GCPhys = GCPhys;
6291 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
6292 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
6293 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
6294 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
6295 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
6296 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6297 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6298}
6299
6300
6301/**
6302 * IOMIOPortRead notification.
6303 */
6304VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
6305{
6306 PVMCPU pVCpu = VMMGetCpu(pVM);
6307 if (!pVCpu)
6308 return;
6309 PIEMCPU pIemCpu = &pVCpu->iem.s;
6310 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6311 if (!pEvtRec)
6312 return;
6313 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6314 pEvtRec->u.IOPortRead.Port = Port;
6315 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
6316 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6317 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6318}
6319
6320/**
6321 * IOMIOPortWrite notification.
6322 */
6323VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6324{
6325 PVMCPU pVCpu = VMMGetCpu(pVM);
6326 if (!pVCpu)
6327 return;
6328 PIEMCPU pIemCpu = &pVCpu->iem.s;
6329 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6330 if (!pEvtRec)
6331 return;
6332 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6333 pEvtRec->u.IOPortWrite.Port = Port;
6334 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
6335 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6336 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6337 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6338}
6339
6340
6341VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
6342{
6343 AssertFailed();
6344}
6345
6346
6347VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
6348{
6349 AssertFailed();
6350}
6351
6352
6353/**
6354 * Fakes and records an I/O port read.
6355 *
6356 * @returns VINF_SUCCESS.
6357 * @param pIemCpu The IEM per CPU data.
6358 * @param Port The I/O port.
6359 * @param pu32Value Where to store the fake value.
6360 * @param cbValue The size of the access.
6361 */
6362static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6363{
6364 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6365 if (pEvtRec)
6366 {
6367 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6368 pEvtRec->u.IOPortRead.Port = Port;
6369 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
6370 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6371 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6372 }
6373 pIemCpu->cIOReads++;
6374 *pu32Value = 0xcccccccc;
6375 return VINF_SUCCESS;
6376}
6377
6378
6379/**
6380 * Fakes and records an I/O port write.
6381 *
6382 * @returns VINF_SUCCESS.
6383 * @param pIemCpu The IEM per CPU data.
6384 * @param Port The I/O port.
6385 * @param u32Value The value being written.
6386 * @param cbValue The size of the access.
6387 */
6388static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6389{
6390 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6391 if (pEvtRec)
6392 {
6393 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6394 pEvtRec->u.IOPortWrite.Port = Port;
6395 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
6396 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6397 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6398 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6399 }
6400 pIemCpu->cIOWrites++;
6401 return VINF_SUCCESS;
6402}
6403
6404
6405/**
6406 * Used to add extra details about a stub case.
6407 * @param pIemCpu The IEM per CPU state.
6408 */
6409static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
6410{
6411 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6412 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6413 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6414 char szRegs[4096];
6415 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6416 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6417 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6418 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6419 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6420 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6421 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6422 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6423 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6424 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6425 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6426 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6427 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6428 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6429 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6430 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6431 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6432 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6433 " efer=%016VR{efer}\n"
6434 " pat=%016VR{pat}\n"
6435 " sf_mask=%016VR{sf_mask}\n"
6436 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6437 " lstar=%016VR{lstar}\n"
6438 " star=%016VR{star} cstar=%016VR{cstar}\n"
6439 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6440 );
6441
6442 char szInstr1[256];
6443 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
6444 DBGF_DISAS_FLAGS_DEFAULT_MODE,
6445 szInstr1, sizeof(szInstr1), NULL);
6446 char szInstr2[256];
6447 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
6448 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6449 szInstr2, sizeof(szInstr2), NULL);
6450
6451 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
6452}
6453
6454
6455/**
6456 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
6457 * dump to the assertion info.
6458 *
6459 * @param pEvtRec The record to dump.
6460 */
6461static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
6462{
6463 switch (pEvtRec->enmEvent)
6464 {
6465 case IEMVERIFYEVENT_IOPORT_READ:
6466 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
6467 pEvtRec->u.IOPortWrite.Port,
6468 pEvtRec->u.IOPortWrite.cbValue);
6469 break;
6470 case IEMVERIFYEVENT_IOPORT_WRITE:
6471 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
6472 pEvtRec->u.IOPortWrite.Port,
6473 pEvtRec->u.IOPortWrite.cbValue,
6474 pEvtRec->u.IOPortWrite.u32Value);
6475 break;
6476 case IEMVERIFYEVENT_RAM_READ:
6477 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
6478 pEvtRec->u.RamRead.GCPhys,
6479 pEvtRec->u.RamRead.cb);
6480 break;
6481 case IEMVERIFYEVENT_RAM_WRITE:
6482 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
6483 pEvtRec->u.RamWrite.GCPhys,
6484 pEvtRec->u.RamWrite.cb,
6485 (int)pEvtRec->u.RamWrite.cb,
6486 pEvtRec->u.RamWrite.ab);
6487 break;
6488 default:
6489 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
6490 break;
6491 }
6492}
6493
6494
6495/**
6496 * Raises an assertion on the specified record, showing the given message with
6497 * a record dump attached.
6498 *
6499 * @param pIemCpu The IEM per CPU data.
6500 * @param pEvtRec1 The first record.
6501 * @param pEvtRec2 The second record.
6502 * @param pszMsg The message explaining why we're asserting.
6503 */
6504static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
6505{
6506 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6507 iemVerifyAssertAddRecordDump(pEvtRec1);
6508 iemVerifyAssertAddRecordDump(pEvtRec2);
6509 iemVerifyAssertMsg2(pIemCpu);
6510 RTAssertPanic();
6511}
6512
6513
6514/**
6515 * Raises an assertion on the specified record, showing the given message with
6516 * a record dump attached.
6517 *
6518 * @param pIemCpu The IEM per CPU data.
6519 * @param pEvtRec1 The first record.
6520 * @param pszMsg The message explaining why we're asserting.
6521 */
6522static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
6523{
6524 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6525 iemVerifyAssertAddRecordDump(pEvtRec);
6526 iemVerifyAssertMsg2(pIemCpu);
6527 RTAssertPanic();
6528}
6529
6530
6531/**
6532 * Verifies a write record.
6533 *
6534 * @param pIemCpu The IEM per CPU data.
6535 * @param pEvtRec The write record.
6536 */
6537static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
6538{
6539 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
6540 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
6541 if ( RT_FAILURE(rc)
6542 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
6543 {
6544 /* fend off ins */
6545 if ( !pIemCpu->cIOReads
6546 || pEvtRec->u.RamWrite.ab[0] != 0xcc
6547 || ( pEvtRec->u.RamWrite.cb != 1
6548 && pEvtRec->u.RamWrite.cb != 2
6549 && pEvtRec->u.RamWrite.cb != 4) )
6550 {
6551 /* fend off ROMs */
6552 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
6553 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
6554 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
6555 {
6556 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6557 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
6558 RTAssertMsg2Add("REM: %.*Rhxs\n"
6559 "IEM: %.*Rhxs\n",
6560 pEvtRec->u.RamWrite.cb, abBuf,
6561 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
6562 iemVerifyAssertAddRecordDump(pEvtRec);
6563 iemVerifyAssertMsg2(pIemCpu);
6564 RTAssertPanic();
6565 }
6566 }
6567 }
6568
6569}
6570
6571/**
6572 * Performs the post-execution verfication checks.
6573 */
6574static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
6575{
6576 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6577 return;
6578
6579 /*
6580 * Switch back the state.
6581 */
6582 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
6583 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
6584 Assert(pOrgCtx != pDebugCtx);
6585 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6586
6587 /*
6588 * Execute the instruction in REM.
6589 */
6590 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6591 EMRemLock(pVM);
6592 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
6593 AssertRC(rc);
6594 EMRemUnlock(pVM);
6595
6596 /*
6597 * Compare the register states.
6598 */
6599 unsigned cDiffs = 0;
6600 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
6601 {
6602 Log(("REM and IEM ends up with different registers!\n"));
6603
6604# define CHECK_FIELD(a_Field) \
6605 do \
6606 { \
6607 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6608 { \
6609 switch (sizeof(pOrgCtx->a_Field)) \
6610 { \
6611 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6612 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6613 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6614 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6615 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
6616 } \
6617 cDiffs++; \
6618 } \
6619 } while (0)
6620
6621# define CHECK_BIT_FIELD(a_Field) \
6622 do \
6623 { \
6624 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6625 { \
6626 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
6627 cDiffs++; \
6628 } \
6629 } while (0)
6630
6631# define CHECK_SEL(a_Sel) \
6632 do \
6633 { \
6634 CHECK_FIELD(a_Sel); \
6635 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
6636 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
6637 { \
6638 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
6639 cDiffs++; \
6640 } \
6641 CHECK_FIELD(a_Sel##Hid.u64Base); \
6642 CHECK_FIELD(a_Sel##Hid.u32Limit); \
6643 } while (0)
6644
6645 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
6646 {
6647 RTAssertMsg2Weak(" the FPU state differs\n");
6648 cDiffs++;
6649 CHECK_FIELD(fpu.FCW);
6650 CHECK_FIELD(fpu.FSW);
6651 CHECK_FIELD(fpu.FTW);
6652 CHECK_FIELD(fpu.FOP);
6653 CHECK_FIELD(fpu.FPUIP);
6654 CHECK_FIELD(fpu.CS);
6655 CHECK_FIELD(fpu.Rsrvd1);
6656 CHECK_FIELD(fpu.FPUDP);
6657 CHECK_FIELD(fpu.DS);
6658 CHECK_FIELD(fpu.Rsrvd2);
6659 CHECK_FIELD(fpu.MXCSR);
6660 CHECK_FIELD(fpu.MXCSR_MASK);
6661 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
6662 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
6663 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
6664 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
6665 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
6666 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
6667 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
6668 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
6669 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
6670 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
6671 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
6672 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
6673 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
6674 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
6675 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
6676 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
6677 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
6678 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
6679 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
6680 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
6681 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
6682 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
6683 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
6684 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
6685 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
6686 CHECK_FIELD(fpu.au32RsrvdRest[i]);
6687 }
6688 CHECK_FIELD(rip);
6689 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
6690 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
6691 {
6692 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
6693 CHECK_BIT_FIELD(rflags.Bits.u1CF);
6694 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
6695 CHECK_BIT_FIELD(rflags.Bits.u1PF);
6696 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
6697 CHECK_BIT_FIELD(rflags.Bits.u1AF);
6698 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
6699 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
6700 CHECK_BIT_FIELD(rflags.Bits.u1SF);
6701 CHECK_BIT_FIELD(rflags.Bits.u1TF);
6702 CHECK_BIT_FIELD(rflags.Bits.u1IF);
6703 CHECK_BIT_FIELD(rflags.Bits.u1DF);
6704 CHECK_BIT_FIELD(rflags.Bits.u1OF);
6705 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
6706 CHECK_BIT_FIELD(rflags.Bits.u1NT);
6707 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
6708 CHECK_BIT_FIELD(rflags.Bits.u1RF);
6709 CHECK_BIT_FIELD(rflags.Bits.u1VM);
6710 CHECK_BIT_FIELD(rflags.Bits.u1AC);
6711 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
6712 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
6713 CHECK_BIT_FIELD(rflags.Bits.u1ID);
6714 }
6715
6716 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
6717 CHECK_FIELD(rax);
6718 CHECK_FIELD(rcx);
6719 if (!pIemCpu->fIgnoreRaxRdx)
6720 CHECK_FIELD(rdx);
6721 CHECK_FIELD(rbx);
6722 CHECK_FIELD(rsp);
6723 CHECK_FIELD(rbp);
6724 CHECK_FIELD(rsi);
6725 CHECK_FIELD(rdi);
6726 CHECK_FIELD(r8);
6727 CHECK_FIELD(r9);
6728 CHECK_FIELD(r10);
6729 CHECK_FIELD(r11);
6730 CHECK_FIELD(r12);
6731 CHECK_FIELD(r13);
6732 CHECK_SEL(cs);
6733 CHECK_SEL(ss);
6734 CHECK_SEL(ds);
6735 CHECK_SEL(es);
6736 CHECK_SEL(fs);
6737 CHECK_SEL(gs);
6738 CHECK_FIELD(cr0);
6739 CHECK_FIELD(cr2);
6740 CHECK_FIELD(cr3);
6741 CHECK_FIELD(cr4);
6742 CHECK_FIELD(dr[0]);
6743 CHECK_FIELD(dr[1]);
6744 CHECK_FIELD(dr[2]);
6745 CHECK_FIELD(dr[3]);
6746 CHECK_FIELD(dr[6]);
6747 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
6748 CHECK_FIELD(dr[7]);
6749 CHECK_FIELD(gdtr.cbGdt);
6750 CHECK_FIELD(gdtr.pGdt);
6751 CHECK_FIELD(idtr.cbIdt);
6752 CHECK_FIELD(idtr.pIdt);
6753 CHECK_FIELD(ldtr);
6754 CHECK_FIELD(ldtrHid.u64Base);
6755 CHECK_FIELD(ldtrHid.u32Limit);
6756 CHECK_FIELD(ldtrHid.Attr.u);
6757 CHECK_FIELD(tr);
6758 CHECK_FIELD(trHid.u64Base);
6759 CHECK_FIELD(trHid.u32Limit);
6760 CHECK_FIELD(trHid.Attr.u);
6761 CHECK_FIELD(SysEnter.cs);
6762 CHECK_FIELD(SysEnter.eip);
6763 CHECK_FIELD(SysEnter.esp);
6764 CHECK_FIELD(msrEFER);
6765 CHECK_FIELD(msrSTAR);
6766 CHECK_FIELD(msrPAT);
6767 CHECK_FIELD(msrLSTAR);
6768 CHECK_FIELD(msrCSTAR);
6769 CHECK_FIELD(msrSFMASK);
6770 CHECK_FIELD(msrKERNELGSBASE);
6771
6772 if (cDiffs != 0)
6773 {
6774 if (LogIs3Enabled())
6775 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
6776 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
6777 iemVerifyAssertMsg2(pIemCpu);
6778 RTAssertPanic();
6779 }
6780# undef CHECK_FIELD
6781# undef CHECK_BIT_FIELD
6782 }
6783
6784 /*
6785 * If the register state compared fine, check the verification event
6786 * records.
6787 */
6788 if (cDiffs == 0)
6789 {
6790 /*
6791 * Compare verficiation event records.
6792 * - I/O port accesses should be a 1:1 match.
6793 */
6794 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
6795 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
6796 while (pIemRec && pOtherRec)
6797 {
6798 /* Since we might miss RAM writes and reads, ignore reads and check
6799 that any written memory is the same extra ones. */
6800 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
6801 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
6802 && pIemRec->pNext)
6803 {
6804 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6805 iemVerifyWriteRecord(pIemCpu, pIemRec);
6806 pIemRec = pIemRec->pNext;
6807 }
6808
6809 /* Do the compare. */
6810 if (pIemRec->enmEvent != pOtherRec->enmEvent)
6811 {
6812 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
6813 break;
6814 }
6815 bool fEquals;
6816 switch (pIemRec->enmEvent)
6817 {
6818 case IEMVERIFYEVENT_IOPORT_READ:
6819 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
6820 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
6821 break;
6822 case IEMVERIFYEVENT_IOPORT_WRITE:
6823 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
6824 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
6825 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
6826 break;
6827 case IEMVERIFYEVENT_RAM_READ:
6828 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
6829 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
6830 break;
6831 case IEMVERIFYEVENT_RAM_WRITE:
6832 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
6833 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
6834 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
6835 break;
6836 default:
6837 fEquals = false;
6838 break;
6839 }
6840 if (!fEquals)
6841 {
6842 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
6843 break;
6844 }
6845
6846 /* advance */
6847 pIemRec = pIemRec->pNext;
6848 pOtherRec = pOtherRec->pNext;
6849 }
6850
6851 /* Ignore extra writes and reads. */
6852 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
6853 {
6854 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6855 iemVerifyWriteRecord(pIemCpu, pIemRec);
6856 pIemRec = pIemRec->pNext;
6857 }
6858 if (pIemRec != NULL)
6859 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
6860 else if (pOtherRec != NULL)
6861 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
6862 }
6863 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6864
6865#if 0
6866 /*
6867 * HACK ALERT! You don't normally want to verify a whole boot sequence.
6868 */
6869 if (pIemCpu->cInstructions == 1)
6870 RTLogFlags(NULL, "disabled");
6871#endif
6872}
6873
6874#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6875
6876/* stubs */
6877static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6878{
6879 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
6880 return VERR_INTERNAL_ERROR;
6881}
6882
6883static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6884{
6885 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
6886 return VERR_INTERNAL_ERROR;
6887}
6888
6889#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6890
6891
6892/**
6893 * Execute one instruction.
6894 *
6895 * @return Strict VBox status code.
6896 * @param pVCpu The current virtual CPU.
6897 */
6898VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6899{
6900 PIEMCPU pIemCpu = &pVCpu->iem.s;
6901
6902#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6903 iemExecVerificationModeSetup(pIemCpu);
6904#endif
6905#ifdef LOG_ENABLED
6906 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6907 if (LogIs2Enabled())
6908 {
6909 char szInstr[256];
6910 uint32_t cbInstr = 0;
6911 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6912 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6913 szInstr, sizeof(szInstr), &cbInstr);
6914
6915 Log2(("**** "
6916 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6917 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6918 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6919 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
6920 " %s\n"
6921 ,
6922 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6923 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6924 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6925 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6926 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
6927 szInstr));
6928
6929 if (LogIs3Enabled())
6930 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
6931 }
6932#endif
6933
6934 /*
6935 * Do the decoding and emulation.
6936 */
6937 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6938 if (rcStrict != VINF_SUCCESS)
6939 {
6940#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6941 iemExecVerificationModeCheck(pIemCpu);
6942#endif
6943 return rcStrict;
6944 }
6945
6946 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6947 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6948 if (rcStrict == VINF_SUCCESS)
6949 pIemCpu->cInstructions++;
6950//#ifdef DEBUG
6951// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
6952//#endif
6953
6954 /* Execute the next instruction as well if a cli, pop ss or
6955 mov ss, Gr has just completed successfully. */
6956 if ( rcStrict == VINF_SUCCESS
6957 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6958 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
6959 {
6960 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6961 if (rcStrict == VINF_SUCCESS)
6962 {
6963 b; IEM_OPCODE_GET_NEXT_U8(&b);
6964 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6965 if (rcStrict == VINF_SUCCESS)
6966 pIemCpu->cInstructions++;
6967 }
6968 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
6969 }
6970
6971#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6972 /*
6973 * Assert some sanity.
6974 */
6975 iemExecVerificationModeCheck(pIemCpu);
6976#endif
6977 return rcStrict;
6978}
6979
6980
6981/**
6982 * Injects a trap, fault, abort, software interrupt or external interrupt.
6983 *
6984 * The parameter list matches TRPMQueryTrapAll pretty closely.
6985 *
6986 * @returns Strict VBox status code.
6987 * @param pVCpu The current virtual CPU.
6988 * @param u8TrapNo The trap number.
6989 * @param enmType What type is it (trap/fault/abort), software
6990 * interrupt or hardware interrupt.
6991 * @param uErrCode The error code if applicable.
6992 * @param uCr2 The CR2 value if applicable.
6993 */
6994VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
6995{
6996 iemInitDecoder(&pVCpu->iem.s);
6997
6998 uint32_t fFlags;
6999 switch (enmType)
7000 {
7001 case TRPM_HARDWARE_INT:
7002 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
7003 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
7004 uErrCode = uCr2 = 0;
7005 break;
7006
7007 case TRPM_SOFTWARE_INT:
7008 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
7009 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
7010 uErrCode = uCr2 = 0;
7011 break;
7012
7013 case TRPM_TRAP:
7014 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
7015 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
7016 if (u8TrapNo == X86_XCPT_PF)
7017 fFlags |= IEM_XCPT_FLAGS_CR2;
7018 switch (u8TrapNo)
7019 {
7020 case X86_XCPT_DF:
7021 case X86_XCPT_TS:
7022 case X86_XCPT_NP:
7023 case X86_XCPT_SS:
7024 case X86_XCPT_PF:
7025 case X86_XCPT_AC:
7026 fFlags |= IEM_XCPT_FLAGS_ERR;
7027 break;
7028 }
7029 break;
7030
7031 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7032 }
7033
7034 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
7035}
7036
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette