VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 40138

Last change on this file since 40138 was 40093, checked in by vboxsync, 13 years ago

IEM: fld m64i

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 253.2 KB
Line 
1/* $Id: IEMAll.cpp 40093 2012-02-13 13:05:21Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_IEM
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/dbgf.h>
54#ifdef IEM_VERIFICATION_MODE
55# include <VBox/vmm/rem.h>
56# include <VBox/vmm/mm.h>
57#endif
58#include "IEMInternal.h"
59#include <VBox/vmm/vm.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <iprt/assert.h>
64#include <iprt/string.h>
65#include <iprt/x86.h>
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/** @typedef PFNIEMOP
72 * Pointer to an opcode decoder function.
73 */
74
75/** @def FNIEMOP_DEF
76 * Define an opcode decoder function.
77 *
78 * We're using macors for this so that adding and removing parameters as well as
79 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
80 *
81 * @param a_Name The function name.
82 */
83
84
85#if defined(__GNUC__) && defined(RT_ARCH_X86)
86typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
87# define FNIEMOP_DEF(a_Name) \
88 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
89# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
90 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
91# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
92 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
93
94#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
95typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
96# define FNIEMOP_DEF(a_Name) \
97 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
98# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
99 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
100# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
101 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
102
103#elif defined(__GNUC__)
104typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
105# define FNIEMOP_DEF(a_Name) \
106 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
107# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
108 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
109# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
110 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
111
112#else
113typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
114# define FNIEMOP_DEF(a_Name) \
115 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
116# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
117 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
118# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
119 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
120
121#endif
122
123
124/**
125 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
126 */
127typedef union IEMSELDESC
128{
129 /** The legacy view. */
130 X86DESC Legacy;
131 /** The long mode view. */
132 X86DESC64 Long;
133} IEMSELDESC;
134/** Pointer to a selector descriptor table entry. */
135typedef IEMSELDESC *PIEMSELDESC;
136
137
138/*******************************************************************************
139* Defined Constants And Macros *
140*******************************************************************************/
141/** @name IEM status codes.
142 *
143 * Not quite sure how this will play out in the end, just aliasing safe status
144 * codes for now.
145 *
146 * @{ */
147#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
148/** @} */
149
150/** Temporary hack to disable the double execution. Will be removed in favor
151 * of a dedicated execution mode in EM. */
152//#define IEM_VERIFICATION_MODE_NO_REM
153
154/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
155 * due to GCC lacking knowledge about the value range of a switch. */
156#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
157
158/**
159 * Call an opcode decoder function.
160 *
161 * We're using macors for this so that adding and removing parameters can be
162 * done as we please. See FNIEMOP_DEF.
163 */
164#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
165
166/**
167 * Call a common opcode decoder function taking one extra argument.
168 *
169 * We're using macors for this so that adding and removing parameters can be
170 * done as we please. See FNIEMOP_DEF_1.
171 */
172#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
173
174/**
175 * Call a common opcode decoder function taking one extra argument.
176 *
177 * We're using macors for this so that adding and removing parameters can be
178 * done as we please. See FNIEMOP_DEF_1.
179 */
180#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
181
182/**
183 * Check if we're currently executing in real or virtual 8086 mode.
184 *
185 * @returns @c true if it is, @c false if not.
186 * @param a_pIemCpu The IEM state of the current CPU.
187 */
188#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
189
190/**
191 * Check if we're currently executing in long mode.
192 *
193 * @returns @c true if it is, @c false if not.
194 * @param a_pIemCpu The IEM state of the current CPU.
195 */
196#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
197
198/**
199 * Check if we're currently executing in real mode.
200 *
201 * @returns @c true if it is, @c false if not.
202 * @param a_pIemCpu The IEM state of the current CPU.
203 */
204#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
205
206/**
207 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
208 */
209#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
210
211/**
212 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
213 */
214#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
215
216/**
217 * Tests if at least on of the specified AMD CPUID features (extended) are
218 * marked present.
219 */
220#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
221
222/**
223 * Checks if a intel CPUID feature is present.
224 */
225#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
226 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
227 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
228
229/**
230 * Check if the address is canonical.
231 */
232#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
233
234
235/*******************************************************************************
236* Global Variables *
237*******************************************************************************/
238extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
239
240
241/** Function table for the ADD instruction. */
242static const IEMOPBINSIZES g_iemAImpl_add =
243{
244 iemAImpl_add_u8, iemAImpl_add_u8_locked,
245 iemAImpl_add_u16, iemAImpl_add_u16_locked,
246 iemAImpl_add_u32, iemAImpl_add_u32_locked,
247 iemAImpl_add_u64, iemAImpl_add_u64_locked
248};
249
250/** Function table for the ADC instruction. */
251static const IEMOPBINSIZES g_iemAImpl_adc =
252{
253 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
254 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
255 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
256 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
257};
258
259/** Function table for the SUB instruction. */
260static const IEMOPBINSIZES g_iemAImpl_sub =
261{
262 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
263 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
264 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
265 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
266};
267
268/** Function table for the SBB instruction. */
269static const IEMOPBINSIZES g_iemAImpl_sbb =
270{
271 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
272 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
273 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
274 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
275};
276
277/** Function table for the OR instruction. */
278static const IEMOPBINSIZES g_iemAImpl_or =
279{
280 iemAImpl_or_u8, iemAImpl_or_u8_locked,
281 iemAImpl_or_u16, iemAImpl_or_u16_locked,
282 iemAImpl_or_u32, iemAImpl_or_u32_locked,
283 iemAImpl_or_u64, iemAImpl_or_u64_locked
284};
285
286/** Function table for the XOR instruction. */
287static const IEMOPBINSIZES g_iemAImpl_xor =
288{
289 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
290 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
291 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
292 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
293};
294
295/** Function table for the AND instruction. */
296static const IEMOPBINSIZES g_iemAImpl_and =
297{
298 iemAImpl_and_u8, iemAImpl_and_u8_locked,
299 iemAImpl_and_u16, iemAImpl_and_u16_locked,
300 iemAImpl_and_u32, iemAImpl_and_u32_locked,
301 iemAImpl_and_u64, iemAImpl_and_u64_locked
302};
303
304/** Function table for the CMP instruction.
305 * @remarks Making operand order ASSUMPTIONS.
306 */
307static const IEMOPBINSIZES g_iemAImpl_cmp =
308{
309 iemAImpl_cmp_u8, NULL,
310 iemAImpl_cmp_u16, NULL,
311 iemAImpl_cmp_u32, NULL,
312 iemAImpl_cmp_u64, NULL
313};
314
315/** Function table for the TEST instruction.
316 * @remarks Making operand order ASSUMPTIONS.
317 */
318static const IEMOPBINSIZES g_iemAImpl_test =
319{
320 iemAImpl_test_u8, NULL,
321 iemAImpl_test_u16, NULL,
322 iemAImpl_test_u32, NULL,
323 iemAImpl_test_u64, NULL
324};
325
326/** Function table for the BT instruction. */
327static const IEMOPBINSIZES g_iemAImpl_bt =
328{
329 NULL, NULL,
330 iemAImpl_bt_u16, NULL,
331 iemAImpl_bt_u32, NULL,
332 iemAImpl_bt_u64, NULL
333};
334
335/** Function table for the BTC instruction. */
336static const IEMOPBINSIZES g_iemAImpl_btc =
337{
338 NULL, NULL,
339 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
340 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
341 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
342};
343
344/** Function table for the BTR instruction. */
345static const IEMOPBINSIZES g_iemAImpl_btr =
346{
347 NULL, NULL,
348 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
349 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
350 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
351};
352
353/** Function table for the BTS instruction. */
354static const IEMOPBINSIZES g_iemAImpl_bts =
355{
356 NULL, NULL,
357 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
358 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
359 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
360};
361
362/** Function table for the BSF instruction. */
363static const IEMOPBINSIZES g_iemAImpl_bsf =
364{
365 NULL, NULL,
366 iemAImpl_bsf_u16, NULL,
367 iemAImpl_bsf_u32, NULL,
368 iemAImpl_bsf_u64, NULL
369};
370
371/** Function table for the BSR instruction. */
372static const IEMOPBINSIZES g_iemAImpl_bsr =
373{
374 NULL, NULL,
375 iemAImpl_bsr_u16, NULL,
376 iemAImpl_bsr_u32, NULL,
377 iemAImpl_bsr_u64, NULL
378};
379
380/** Function table for the IMUL instruction. */
381static const IEMOPBINSIZES g_iemAImpl_imul_two =
382{
383 NULL, NULL,
384 iemAImpl_imul_two_u16, NULL,
385 iemAImpl_imul_two_u32, NULL,
386 iemAImpl_imul_two_u64, NULL
387};
388
389/** Group 1 /r lookup table. */
390static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
391{
392 &g_iemAImpl_add,
393 &g_iemAImpl_or,
394 &g_iemAImpl_adc,
395 &g_iemAImpl_sbb,
396 &g_iemAImpl_and,
397 &g_iemAImpl_sub,
398 &g_iemAImpl_xor,
399 &g_iemAImpl_cmp
400};
401
402/** Function table for the INC instruction. */
403static const IEMOPUNARYSIZES g_iemAImpl_inc =
404{
405 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
406 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
407 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
408 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
409};
410
411/** Function table for the DEC instruction. */
412static const IEMOPUNARYSIZES g_iemAImpl_dec =
413{
414 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
415 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
416 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
417 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
418};
419
420/** Function table for the NEG instruction. */
421static const IEMOPUNARYSIZES g_iemAImpl_neg =
422{
423 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
424 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
425 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
426 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
427};
428
429/** Function table for the NOT instruction. */
430static const IEMOPUNARYSIZES g_iemAImpl_not =
431{
432 iemAImpl_not_u8, iemAImpl_not_u8_locked,
433 iemAImpl_not_u16, iemAImpl_not_u16_locked,
434 iemAImpl_not_u32, iemAImpl_not_u32_locked,
435 iemAImpl_not_u64, iemAImpl_not_u64_locked
436};
437
438
439/** Function table for the ROL instruction. */
440static const IEMOPSHIFTSIZES g_iemAImpl_rol =
441{
442 iemAImpl_rol_u8,
443 iemAImpl_rol_u16,
444 iemAImpl_rol_u32,
445 iemAImpl_rol_u64
446};
447
448/** Function table for the ROR instruction. */
449static const IEMOPSHIFTSIZES g_iemAImpl_ror =
450{
451 iemAImpl_ror_u8,
452 iemAImpl_ror_u16,
453 iemAImpl_ror_u32,
454 iemAImpl_ror_u64
455};
456
457/** Function table for the RCL instruction. */
458static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
459{
460 iemAImpl_rcl_u8,
461 iemAImpl_rcl_u16,
462 iemAImpl_rcl_u32,
463 iemAImpl_rcl_u64
464};
465
466/** Function table for the RCR instruction. */
467static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
468{
469 iemAImpl_rcr_u8,
470 iemAImpl_rcr_u16,
471 iemAImpl_rcr_u32,
472 iemAImpl_rcr_u64
473};
474
475/** Function table for the SHL instruction. */
476static const IEMOPSHIFTSIZES g_iemAImpl_shl =
477{
478 iemAImpl_shl_u8,
479 iemAImpl_shl_u16,
480 iemAImpl_shl_u32,
481 iemAImpl_shl_u64
482};
483
484/** Function table for the SHR instruction. */
485static const IEMOPSHIFTSIZES g_iemAImpl_shr =
486{
487 iemAImpl_shr_u8,
488 iemAImpl_shr_u16,
489 iemAImpl_shr_u32,
490 iemAImpl_shr_u64
491};
492
493/** Function table for the SAR instruction. */
494static const IEMOPSHIFTSIZES g_iemAImpl_sar =
495{
496 iemAImpl_sar_u8,
497 iemAImpl_sar_u16,
498 iemAImpl_sar_u32,
499 iemAImpl_sar_u64
500};
501
502
503/** Function table for the MUL instruction. */
504static const IEMOPMULDIVSIZES g_iemAImpl_mul =
505{
506 iemAImpl_mul_u8,
507 iemAImpl_mul_u16,
508 iemAImpl_mul_u32,
509 iemAImpl_mul_u64
510};
511
512/** Function table for the IMUL instruction working implicitly on rAX. */
513static const IEMOPMULDIVSIZES g_iemAImpl_imul =
514{
515 iemAImpl_imul_u8,
516 iemAImpl_imul_u16,
517 iemAImpl_imul_u32,
518 iemAImpl_imul_u64
519};
520
521/** Function table for the DIV instruction. */
522static const IEMOPMULDIVSIZES g_iemAImpl_div =
523{
524 iemAImpl_div_u8,
525 iemAImpl_div_u16,
526 iemAImpl_div_u32,
527 iemAImpl_div_u64
528};
529
530/** Function table for the MUL instruction. */
531static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
532{
533 iemAImpl_idiv_u8,
534 iemAImpl_idiv_u16,
535 iemAImpl_idiv_u32,
536 iemAImpl_idiv_u64
537};
538
539/** Function table for the SHLD instruction */
540static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
541{
542 iemAImpl_shld_u16,
543 iemAImpl_shld_u32,
544 iemAImpl_shld_u64,
545};
546
547/** Function table for the SHRD instruction */
548static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
549{
550 iemAImpl_shrd_u16,
551 iemAImpl_shrd_u32,
552 iemAImpl_shrd_u64,
553};
554
555
556/*******************************************************************************
557* Internal Functions *
558*******************************************************************************/
559static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
560/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
561static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
562static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
563static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
564static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
565static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
566static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
567static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
568static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
569static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
570static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
571static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
572static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
573static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
574static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
575static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
576static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
577static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
578static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
579static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
580static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
581static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
582static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
583
584#ifdef IEM_VERIFICATION_MODE
585static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
586#endif
587static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
588static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
589
590
591/**
592 * Initializes the decoder state.
593 *
594 * @param pIemCpu The per CPU IEM state.
595 */
596DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
597{
598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
599
600 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
601 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
602 ? IEMMODE_64BIT
603 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
604 ? IEMMODE_32BIT
605 : IEMMODE_16BIT;
606 pIemCpu->enmCpuMode = enmMode;
607 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
608 pIemCpu->enmEffAddrMode = enmMode;
609 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
610 pIemCpu->enmEffOpSize = enmMode;
611 pIemCpu->fPrefixes = 0;
612 pIemCpu->uRexReg = 0;
613 pIemCpu->uRexB = 0;
614 pIemCpu->uRexIndex = 0;
615 pIemCpu->iEffSeg = X86_SREG_DS;
616 pIemCpu->offOpcode = 0;
617 pIemCpu->cbOpcode = 0;
618 pIemCpu->cActiveMappings = 0;
619 pIemCpu->iNextMapping = 0;
620}
621
622
623/**
624 * Prefetch opcodes the first time when starting executing.
625 *
626 * @returns Strict VBox status code.
627 * @param pIemCpu The IEM state.
628 */
629static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
630{
631#ifdef IEM_VERIFICATION_MODE
632 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
633#endif
634 iemInitDecoder(pIemCpu);
635
636 /*
637 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
638 *
639 * First translate CS:rIP to a physical address.
640 */
641 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
642 uint32_t cbToTryRead;
643 RTGCPTR GCPtrPC;
644 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
645 {
646 cbToTryRead = PAGE_SIZE;
647 GCPtrPC = pCtx->rip;
648 if (!IEM_IS_CANONICAL(GCPtrPC))
649 return iemRaiseGeneralProtectionFault0(pIemCpu);
650 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
651 }
652 else
653 {
654 uint32_t GCPtrPC32 = pCtx->eip;
655 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
656 if (GCPtrPC32 > pCtx->csHid.u32Limit)
657 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
658 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
659 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
660 }
661
662 RTGCPHYS GCPhys;
663 uint64_t fFlags;
664 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
665 if (RT_FAILURE(rc))
666 {
667 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
668 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
669 }
670 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
671 {
672 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
673 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
674 }
675 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
676 {
677 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
678 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
679 }
680 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
681 /** @todo Check reserved bits and such stuff. PGM is better at doing
682 * that, so do it when implementing the guest virtual address
683 * TLB... */
684
685#ifdef IEM_VERIFICATION_MODE
686 /*
687 * Optimistic optimization: Use unconsumed opcode bytes from the previous
688 * instruction.
689 */
690 /** @todo optimize this differently by not using PGMPhysRead. */
691 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
692 pIemCpu->GCPhysOpcodes = GCPhys;
693 if ( offPrevOpcodes < cbOldOpcodes
694 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
695 {
696 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
697 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
698 pIemCpu->cbOpcode = cbNew;
699 return VINF_SUCCESS;
700 }
701#endif
702
703 /*
704 * Read the bytes at this address.
705 */
706 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
707 if (cbToTryRead > cbLeftOnPage)
708 cbToTryRead = cbLeftOnPage;
709 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
710 cbToTryRead = sizeof(pIemCpu->abOpcode);
711 /** @todo patch manager */
712 if (!pIemCpu->fByPassHandlers)
713 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
714 else
715 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
716 if (rc != VINF_SUCCESS)
717 {
718 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc\n", GCPtrPC, rc));
719 return rc;
720 }
721 pIemCpu->cbOpcode = cbToTryRead;
722
723 return VINF_SUCCESS;
724}
725
726
727/**
728 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
729 * exception if it fails.
730 *
731 * @returns Strict VBox status code.
732 * @param pIemCpu The IEM state.
733 * @param cbMin Where to return the opcode byte.
734 */
735static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
736{
737 /*
738 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
739 *
740 * First translate CS:rIP to a physical address.
741 */
742 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
743 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
744 uint32_t cbToTryRead;
745 RTGCPTR GCPtrNext;
746 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
747 {
748 cbToTryRead = PAGE_SIZE;
749 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
750 if (!IEM_IS_CANONICAL(GCPtrNext))
751 return iemRaiseGeneralProtectionFault0(pIemCpu);
752 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
753 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
754 }
755 else
756 {
757 uint32_t GCPtrNext32 = pCtx->eip;
758 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
759 GCPtrNext32 += pIemCpu->cbOpcode;
760 if (GCPtrNext32 > pCtx->csHid.u32Limit)
761 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
762 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
763 if (cbToTryRead < cbMin - cbLeft)
764 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
765 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
766 }
767
768 RTGCPHYS GCPhys;
769 uint64_t fFlags;
770 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
771 if (RT_FAILURE(rc))
772 {
773 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
774 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
775 }
776 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
777 {
778 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
779 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
780 }
781 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
782 {
783 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
784 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
785 }
786 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
787 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
788 /** @todo Check reserved bits and such stuff. PGM is better at doing
789 * that, so do it when implementing the guest virtual address
790 * TLB... */
791
792 /*
793 * Read the bytes at this address.
794 */
795 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
796 if (cbToTryRead > cbLeftOnPage)
797 cbToTryRead = cbLeftOnPage;
798 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
799 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
800 Assert(cbToTryRead >= cbMin - cbLeft);
801 if (!pIemCpu->fByPassHandlers)
802 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
803 else
804 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
805 if (rc != VINF_SUCCESS)
806 {
807 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc\n", GCPtrNext, rc));
808 return rc;
809 }
810 pIemCpu->cbOpcode += cbToTryRead;
811 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
812
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
819 *
820 * @returns Strict VBox status code.
821 * @param pIemCpu The IEM state.
822 * @param pb Where to return the opcode byte.
823 */
824DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
825{
826 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
827 if (rcStrict == VINF_SUCCESS)
828 {
829 uint8_t offOpcode = pIemCpu->offOpcode;
830 *pb = pIemCpu->abOpcode[offOpcode];
831 pIemCpu->offOpcode = offOpcode + 1;
832 }
833 else
834 *pb = 0;
835 return rcStrict;
836}
837
838
839/**
840 * Fetches the next opcode byte.
841 *
842 * @returns Strict VBox status code.
843 * @param pIemCpu The IEM state.
844 * @param pu8 Where to return the opcode byte.
845 */
846DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
847{
848 uint8_t const offOpcode = pIemCpu->offOpcode;
849 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
850 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
851
852 *pu8 = pIemCpu->abOpcode[offOpcode];
853 pIemCpu->offOpcode = offOpcode + 1;
854 return VINF_SUCCESS;
855}
856
857
858/**
859 * Fetches the next opcode byte, returns automatically on failure.
860 *
861 * @param a_pu8 Where to return the opcode byte.
862 * @remark Implicitly references pIemCpu.
863 */
864#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
865 do \
866 { \
867 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
868 if (rcStrict2 != VINF_SUCCESS) \
869 return rcStrict2; \
870 } while (0)
871
872
873/**
874 * Fetches the next signed byte from the opcode stream.
875 *
876 * @returns Strict VBox status code.
877 * @param pIemCpu The IEM state.
878 * @param pi8 Where to return the signed byte.
879 */
880DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
881{
882 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
883}
884
885
886/**
887 * Fetches the next signed byte from the opcode stream, returning automatically
888 * on failure.
889 *
890 * @param pi8 Where to return the signed byte.
891 * @remark Implicitly references pIemCpu.
892 */
893#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
894 do \
895 { \
896 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
897 if (rcStrict2 != VINF_SUCCESS) \
898 return rcStrict2; \
899 } while (0)
900
901
902/**
903 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
904 *
905 * @returns Strict VBox status code.
906 * @param pIemCpu The IEM state.
907 * @param pu16 Where to return the opcode dword.
908 */
909DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
910{
911 uint8_t u8;
912 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
913 if (rcStrict == VINF_SUCCESS)
914 *pu16 = (int8_t)u8;
915 return rcStrict;
916}
917
918
919/**
920 * Fetches the next signed byte from the opcode stream, extending it to
921 * unsigned 16-bit.
922 *
923 * @returns Strict VBox status code.
924 * @param pIemCpu The IEM state.
925 * @param pu16 Where to return the unsigned word.
926 */
927DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
928{
929 uint8_t const offOpcode = pIemCpu->offOpcode;
930 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
931 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
932
933 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
934 pIemCpu->offOpcode = offOpcode + 1;
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Fetches the next signed byte from the opcode stream and sign-extending it to
941 * a word, returning automatically on failure.
942 *
943 * @param pu16 Where to return the word.
944 * @remark Implicitly references pIemCpu.
945 */
946#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
947 do \
948 { \
949 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
950 if (rcStrict2 != VINF_SUCCESS) \
951 return rcStrict2; \
952 } while (0)
953
954
955/**
956 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
957 *
958 * @returns Strict VBox status code.
959 * @param pIemCpu The IEM state.
960 * @param pu16 Where to return the opcode word.
961 */
962DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
963{
964 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
965 if (rcStrict == VINF_SUCCESS)
966 {
967 uint8_t offOpcode = pIemCpu->offOpcode;
968 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
969 pIemCpu->offOpcode = offOpcode + 2;
970 }
971 else
972 *pu16 = 0;
973 return rcStrict;
974}
975
976
977/**
978 * Fetches the next opcode word.
979 *
980 * @returns Strict VBox status code.
981 * @param pIemCpu The IEM state.
982 * @param pu16 Where to return the opcode word.
983 */
984DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
985{
986 uint8_t const offOpcode = pIemCpu->offOpcode;
987 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
988 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
989
990 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
991 pIemCpu->offOpcode = offOpcode + 2;
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Fetches the next opcode word, returns automatically on failure.
998 *
999 * @param a_pu16 Where to return the opcode word.
1000 * @remark Implicitly references pIemCpu.
1001 */
1002#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1003 do \
1004 { \
1005 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1006 if (rcStrict2 != VINF_SUCCESS) \
1007 return rcStrict2; \
1008 } while (0)
1009
1010
1011/**
1012 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1013 *
1014 * @returns Strict VBox status code.
1015 * @param pIemCpu The IEM state.
1016 * @param pu32 Where to return the opcode double word.
1017 */
1018DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1019{
1020 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1021 if (rcStrict == VINF_SUCCESS)
1022 {
1023 uint8_t offOpcode = pIemCpu->offOpcode;
1024 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1025 pIemCpu->offOpcode = offOpcode + 2;
1026 }
1027 else
1028 *pu32 = 0;
1029 return rcStrict;
1030}
1031
1032
1033/**
1034 * Fetches the next opcode word, zero extending it to a double word.
1035 *
1036 * @returns Strict VBox status code.
1037 * @param pIemCpu The IEM state.
1038 * @param pu32 Where to return the opcode double word.
1039 */
1040DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1041{
1042 uint8_t const offOpcode = pIemCpu->offOpcode;
1043 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1044 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1045
1046 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1047 pIemCpu->offOpcode = offOpcode + 2;
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Fetches the next opcode word and zero extends it to a double word, returns
1054 * automatically on failure.
1055 *
1056 * @param a_pu32 Where to return the opcode double word.
1057 * @remark Implicitly references pIemCpu.
1058 */
1059#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1060 do \
1061 { \
1062 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1063 if (rcStrict2 != VINF_SUCCESS) \
1064 return rcStrict2; \
1065 } while (0)
1066
1067
1068/**
1069 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1070 *
1071 * @returns Strict VBox status code.
1072 * @param pIemCpu The IEM state.
1073 * @param pu64 Where to return the opcode quad word.
1074 */
1075DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1076{
1077 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1078 if (rcStrict == VINF_SUCCESS)
1079 {
1080 uint8_t offOpcode = pIemCpu->offOpcode;
1081 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1082 pIemCpu->offOpcode = offOpcode + 2;
1083 }
1084 else
1085 *pu64 = 0;
1086 return rcStrict;
1087}
1088
1089
1090/**
1091 * Fetches the next opcode word, zero extending it to a quad word.
1092 *
1093 * @returns Strict VBox status code.
1094 * @param pIemCpu The IEM state.
1095 * @param pu64 Where to return the opcode quad word.
1096 */
1097DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1098{
1099 uint8_t const offOpcode = pIemCpu->offOpcode;
1100 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1101 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1102
1103 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1104 pIemCpu->offOpcode = offOpcode + 2;
1105 return VINF_SUCCESS;
1106}
1107
1108
1109/**
1110 * Fetches the next opcode word and zero extends it to a quad word, returns
1111 * automatically on failure.
1112 *
1113 * @param a_pu64 Where to return the opcode quad word.
1114 * @remark Implicitly references pIemCpu.
1115 */
1116#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1117 do \
1118 { \
1119 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1120 if (rcStrict2 != VINF_SUCCESS) \
1121 return rcStrict2; \
1122 } while (0)
1123
1124
1125/**
1126 * Fetches the next signed word from the opcode stream.
1127 *
1128 * @returns Strict VBox status code.
1129 * @param pIemCpu The IEM state.
1130 * @param pi16 Where to return the signed word.
1131 */
1132DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1133{
1134 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1135}
1136
1137
1138/**
1139 * Fetches the next signed word from the opcode stream, returning automatically
1140 * on failure.
1141 *
1142 * @param pi16 Where to return the signed word.
1143 * @remark Implicitly references pIemCpu.
1144 */
1145#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1146 do \
1147 { \
1148 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1149 if (rcStrict2 != VINF_SUCCESS) \
1150 return rcStrict2; \
1151 } while (0)
1152
1153
1154/**
1155 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1156 *
1157 * @returns Strict VBox status code.
1158 * @param pIemCpu The IEM state.
1159 * @param pu32 Where to return the opcode dword.
1160 */
1161DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1162{
1163 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1164 if (rcStrict == VINF_SUCCESS)
1165 {
1166 uint8_t offOpcode = pIemCpu->offOpcode;
1167 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1168 pIemCpu->abOpcode[offOpcode + 1],
1169 pIemCpu->abOpcode[offOpcode + 2],
1170 pIemCpu->abOpcode[offOpcode + 3]);
1171 pIemCpu->offOpcode = offOpcode + 4;
1172 }
1173 else
1174 *pu32 = 0;
1175 return rcStrict;
1176}
1177
1178
1179/**
1180 * Fetches the next opcode dword.
1181 *
1182 * @returns Strict VBox status code.
1183 * @param pIemCpu The IEM state.
1184 * @param pu32 Where to return the opcode double word.
1185 */
1186DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1187{
1188 uint8_t const offOpcode = pIemCpu->offOpcode;
1189 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1190 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1191
1192 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1193 pIemCpu->abOpcode[offOpcode + 1],
1194 pIemCpu->abOpcode[offOpcode + 2],
1195 pIemCpu->abOpcode[offOpcode + 3]);
1196 pIemCpu->offOpcode = offOpcode + 4;
1197 return VINF_SUCCESS;
1198}
1199
1200
1201/**
1202 * Fetches the next opcode dword, returns automatically on failure.
1203 *
1204 * @param a_pu32 Where to return the opcode dword.
1205 * @remark Implicitly references pIemCpu.
1206 */
1207#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1208 do \
1209 { \
1210 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1211 if (rcStrict2 != VINF_SUCCESS) \
1212 return rcStrict2; \
1213 } while (0)
1214
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pIemCpu The IEM state.
1221 * @param pu32 Where to return the opcode dword.
1222 */
1223DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pIemCpu->offOpcode;
1229 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1230 pIemCpu->abOpcode[offOpcode + 1],
1231 pIemCpu->abOpcode[offOpcode + 2],
1232 pIemCpu->abOpcode[offOpcode + 3]);
1233 pIemCpu->offOpcode = offOpcode + 4;
1234 }
1235 else
1236 *pu64 = 0;
1237 return rcStrict;
1238}
1239
1240
1241/**
1242 * Fetches the next opcode dword, zero extending it to a quad word.
1243 *
1244 * @returns Strict VBox status code.
1245 * @param pIemCpu The IEM state.
1246 * @param pu64 Where to return the opcode quad word.
1247 */
1248DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1249{
1250 uint8_t const offOpcode = pIemCpu->offOpcode;
1251 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1252 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1253
1254 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1255 pIemCpu->abOpcode[offOpcode + 1],
1256 pIemCpu->abOpcode[offOpcode + 2],
1257 pIemCpu->abOpcode[offOpcode + 3]);
1258 pIemCpu->offOpcode = offOpcode + 4;
1259 return VINF_SUCCESS;
1260}
1261
1262
1263/**
1264 * Fetches the next opcode dword and zero extends it to a quad word, returns
1265 * automatically on failure.
1266 *
1267 * @param a_pu64 Where to return the opcode quad word.
1268 * @remark Implicitly references pIemCpu.
1269 */
1270#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1271 do \
1272 { \
1273 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1274 if (rcStrict2 != VINF_SUCCESS) \
1275 return rcStrict2; \
1276 } while (0)
1277
1278
1279/**
1280 * Fetches the next signed double word from the opcode stream.
1281 *
1282 * @returns Strict VBox status code.
1283 * @param pIemCpu The IEM state.
1284 * @param pi32 Where to return the signed double word.
1285 */
1286DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1287{
1288 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1289}
1290
1291/**
1292 * Fetches the next signed double word from the opcode stream, returning
1293 * automatically on failure.
1294 *
1295 * @param pi32 Where to return the signed double word.
1296 * @remark Implicitly references pIemCpu.
1297 */
1298#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1299 do \
1300 { \
1301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1302 if (rcStrict2 != VINF_SUCCESS) \
1303 return rcStrict2; \
1304 } while (0)
1305
1306
1307/**
1308 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1309 *
1310 * @returns Strict VBox status code.
1311 * @param pIemCpu The IEM state.
1312 * @param pu64 Where to return the opcode qword.
1313 */
1314DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1315{
1316 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1317 if (rcStrict == VINF_SUCCESS)
1318 {
1319 uint8_t offOpcode = pIemCpu->offOpcode;
1320 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1321 pIemCpu->abOpcode[offOpcode + 1],
1322 pIemCpu->abOpcode[offOpcode + 2],
1323 pIemCpu->abOpcode[offOpcode + 3]);
1324 pIemCpu->offOpcode = offOpcode + 4;
1325 }
1326 else
1327 *pu64 = 0;
1328 return rcStrict;
1329}
1330
1331
1332/**
1333 * Fetches the next opcode dword, sign extending it into a quad word.
1334 *
1335 * @returns Strict VBox status code.
1336 * @param pIemCpu The IEM state.
1337 * @param pu64 Where to return the opcode quad word.
1338 */
1339DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1340{
1341 uint8_t const offOpcode = pIemCpu->offOpcode;
1342 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1343 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1344
1345 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1346 pIemCpu->abOpcode[offOpcode + 1],
1347 pIemCpu->abOpcode[offOpcode + 2],
1348 pIemCpu->abOpcode[offOpcode + 3]);
1349 *pu64 = i32;
1350 pIemCpu->offOpcode = offOpcode + 4;
1351 return VINF_SUCCESS;
1352}
1353
1354
1355/**
1356 * Fetches the next opcode double word and sign extends it to a quad word,
1357 * returns automatically on failure.
1358 *
1359 * @param a_pu64 Where to return the opcode quad word.
1360 * @remark Implicitly references pIemCpu.
1361 */
1362#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1363 do \
1364 { \
1365 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1366 if (rcStrict2 != VINF_SUCCESS) \
1367 return rcStrict2; \
1368 } while (0)
1369
1370
1371/**
1372 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pIemCpu The IEM state.
1376 * @param pu64 Where to return the opcode qword.
1377 */
1378DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1379{
1380 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1381 if (rcStrict == VINF_SUCCESS)
1382 {
1383 uint8_t offOpcode = pIemCpu->offOpcode;
1384 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1385 pIemCpu->abOpcode[offOpcode + 1],
1386 pIemCpu->abOpcode[offOpcode + 2],
1387 pIemCpu->abOpcode[offOpcode + 3],
1388 pIemCpu->abOpcode[offOpcode + 4],
1389 pIemCpu->abOpcode[offOpcode + 5],
1390 pIemCpu->abOpcode[offOpcode + 6],
1391 pIemCpu->abOpcode[offOpcode + 7]);
1392 pIemCpu->offOpcode = offOpcode + 8;
1393 }
1394 else
1395 *pu64 = 0;
1396 return rcStrict;
1397}
1398
1399
1400/**
1401 * Fetches the next opcode qword.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pIemCpu The IEM state.
1405 * @param pu64 Where to return the opcode qword.
1406 */
1407DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1408{
1409 uint8_t const offOpcode = pIemCpu->offOpcode;
1410 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1411 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1412
1413 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1414 pIemCpu->abOpcode[offOpcode + 1],
1415 pIemCpu->abOpcode[offOpcode + 2],
1416 pIemCpu->abOpcode[offOpcode + 3],
1417 pIemCpu->abOpcode[offOpcode + 4],
1418 pIemCpu->abOpcode[offOpcode + 5],
1419 pIemCpu->abOpcode[offOpcode + 6],
1420 pIemCpu->abOpcode[offOpcode + 7]);
1421 pIemCpu->offOpcode = offOpcode + 8;
1422 return VINF_SUCCESS;
1423}
1424
1425
1426/**
1427 * Fetches the next opcode quad word, returns automatically on failure.
1428 *
1429 * @param a_pu64 Where to return the opcode quad word.
1430 * @remark Implicitly references pIemCpu.
1431 */
1432#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1433 do \
1434 { \
1435 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1436 if (rcStrict2 != VINF_SUCCESS) \
1437 return rcStrict2; \
1438 } while (0)
1439
1440
1441/** @name Misc Worker Functions.
1442 * @{
1443 */
1444
1445
1446/**
1447 * Validates a new SS segment.
1448 *
1449 * @returns VBox strict status code.
1450 * @param pIemCpu The IEM per CPU instance data.
1451 * @param pCtx The CPU context.
1452 * @param NewSS The new SS selctor.
1453 * @param uCpl The CPL to load the stack for.
1454 * @param pDesc Where to return the descriptor.
1455 */
1456static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1457{
1458 NOREF(pCtx);
1459
1460 /* Null selectors are not allowed (we're not called for dispatching
1461 interrupts with SS=0 in long mode). */
1462 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1463 {
1464 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1465 return iemRaiseGeneralProtectionFault0(pIemCpu);
1466 }
1467
1468 /*
1469 * Read the descriptor.
1470 */
1471 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1472 if (rcStrict != VINF_SUCCESS)
1473 return rcStrict;
1474
1475 /*
1476 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1477 */
1478 if (!pDesc->Legacy.Gen.u1DescType)
1479 {
1480 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1482 }
1483
1484 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1485 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1486 {
1487 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1488 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1489 }
1490 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1491 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1492 {
1493 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1494 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1495 }
1496 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1497 if ((NewSS & X86_SEL_RPL) != uCpl)
1498 {
1499 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1500 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1501 }
1502 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1503 {
1504 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1505 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1506 }
1507
1508 /* Is it there? */
1509 /** @todo testcase: Is this checked before the canonical / limit check below? */
1510 if (!pDesc->Legacy.Gen.u1Present)
1511 {
1512 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1513 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1514 }
1515
1516 return VINF_SUCCESS;
1517}
1518
1519
1520/** @} */
1521
1522/** @name Raising Exceptions.
1523 *
1524 * @{
1525 */
1526
1527/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1528 * @{ */
1529/** CPU exception. */
1530#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1531/** External interrupt (from PIC, APIC, whatever). */
1532#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1533/** Software interrupt (int, into or bound). */
1534#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1535/** Takes an error code. */
1536#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1537/** Takes a CR2. */
1538#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1539/** Generated by the breakpoint instruction. */
1540#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1541/** @} */
1542
1543/**
1544 * Loads the specified stack far pointer from the TSS.
1545 *
1546 * @returns VBox strict status code.
1547 * @param pIemCpu The IEM per CPU instance data.
1548 * @param pCtx The CPU context.
1549 * @param uCpl The CPL to load the stack for.
1550 * @param pSelSS Where to return the new stack segment.
1551 * @param puEsp Where to return the new stack pointer.
1552 */
1553static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1554 PRTSEL pSelSS, uint32_t *puEsp)
1555{
1556 VBOXSTRICTRC rcStrict;
1557 Assert(uCpl < 4);
1558 *puEsp = 0; /* make gcc happy */
1559 *pSelSS = 0; /* make gcc happy */
1560
1561 switch (pCtx->trHid.Attr.n.u4Type)
1562 {
1563 /*
1564 * 16-bit TSS (X86TSS16).
1565 */
1566 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1567 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1568 {
1569 uint32_t off = uCpl * 4 + 2;
1570 if (off + 4 > pCtx->trHid.u32Limit)
1571 {
1572 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1573 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1574 }
1575
1576 uint32_t u32Tmp = 0; /* gcc maybe... */
1577 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1578 if (rcStrict == VINF_SUCCESS)
1579 {
1580 *puEsp = RT_LOWORD(u32Tmp);
1581 *pSelSS = RT_HIWORD(u32Tmp);
1582 return VINF_SUCCESS;
1583 }
1584 break;
1585 }
1586
1587 /*
1588 * 32-bit TSS (X86TSS32).
1589 */
1590 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1591 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1592 {
1593 uint32_t off = uCpl * 8 + 4;
1594 if (off + 7 > pCtx->trHid.u32Limit)
1595 {
1596 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1597 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1598 }
1599
1600 uint64_t u64Tmp;
1601 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1602 if (rcStrict == VINF_SUCCESS)
1603 {
1604 *puEsp = u64Tmp & UINT32_MAX;
1605 *pSelSS = (RTSEL)(u64Tmp >> 32);
1606 return VINF_SUCCESS;
1607 }
1608 break;
1609 }
1610
1611 default:
1612 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1613 }
1614 return rcStrict;
1615}
1616
1617
1618/**
1619 * Adjust the CPU state according to the exception being raised.
1620 *
1621 * @param pCtx The CPU context.
1622 * @param u8Vector The exception that has been raised.
1623 */
1624DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1625{
1626 switch (u8Vector)
1627 {
1628 case X86_XCPT_DB:
1629 pCtx->dr[7] &= ~X86_DR7_GD;
1630 break;
1631 /** @todo Read the AMD and Intel exception reference... */
1632 }
1633}
1634
1635
1636/**
1637 * Implements exceptions and interrupts for real mode.
1638 *
1639 * @returns VBox strict status code.
1640 * @param pIemCpu The IEM per CPU instance data.
1641 * @param pCtx The CPU context.
1642 * @param cbInstr The number of bytes to offset rIP by in the return
1643 * address.
1644 * @param u8Vector The interrupt / exception vector number.
1645 * @param fFlags The flags.
1646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1648 */
1649static VBOXSTRICTRC
1650iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1651 PCPUMCTX pCtx,
1652 uint8_t cbInstr,
1653 uint8_t u8Vector,
1654 uint32_t fFlags,
1655 uint16_t uErr,
1656 uint64_t uCr2)
1657{
1658 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1659 NOREF(uErr); NOREF(uCr2);
1660
1661 /*
1662 * Read the IDT entry.
1663 */
1664 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1665 {
1666 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1667 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1668 }
1669 RTFAR16 Idte;
1670 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1671 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1672 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1673 return rcStrict;
1674
1675 /*
1676 * Push the stack frame.
1677 */
1678 uint16_t *pu16Frame;
1679 uint64_t uNewRsp;
1680 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1681 if (rcStrict != VINF_SUCCESS)
1682 return rcStrict;
1683
1684 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1685 pu16Frame[1] = (uint16_t)pCtx->cs;
1686 pu16Frame[0] = pCtx->ip + cbInstr;
1687 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1688 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1689 return rcStrict;
1690
1691 /*
1692 * Load the vector address into cs:ip and make exception specific state
1693 * adjustments.
1694 */
1695 pCtx->cs = Idte.sel;
1696 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1697 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1698 pCtx->rip = Idte.off;
1699 pCtx->eflags.Bits.u1IF = 0;
1700
1701 /** @todo do we actually do this in real mode? */
1702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1703 iemRaiseXcptAdjustState(pCtx, u8Vector);
1704
1705 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1706}
1707
1708
1709/**
1710 * Implements exceptions and interrupts for protected mode.
1711 *
1712 * @returns VBox strict status code.
1713 * @param pIemCpu The IEM per CPU instance data.
1714 * @param pCtx The CPU context.
1715 * @param cbInstr The number of bytes to offset rIP by in the return
1716 * address.
1717 * @param u8Vector The interrupt / exception vector number.
1718 * @param fFlags The flags.
1719 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1720 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1721 */
1722static VBOXSTRICTRC
1723iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1724 PCPUMCTX pCtx,
1725 uint8_t cbInstr,
1726 uint8_t u8Vector,
1727 uint32_t fFlags,
1728 uint16_t uErr,
1729 uint64_t uCr2)
1730{
1731 NOREF(cbInstr);
1732
1733 /*
1734 * Read the IDT entry.
1735 */
1736 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1737 {
1738 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1739 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1740 }
1741 X86DESC Idte;
1742 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
1743 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1744 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1745 return rcStrict;
1746 Log4(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
1747 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1748 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
1749
1750 /*
1751 * Check the descriptor type, DPL and such.
1752 * ASSUMES this is done in the same order as described for call-gate calls.
1753 */
1754 if (Idte.Gate.u1DescType)
1755 {
1756 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1757 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1758 }
1759 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1760 switch (Idte.Gate.u4Type)
1761 {
1762 case X86_SEL_TYPE_SYS_UNDEFINED:
1763 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1764 case X86_SEL_TYPE_SYS_LDT:
1765 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1766 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1767 case X86_SEL_TYPE_SYS_UNDEFINED2:
1768 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1769 case X86_SEL_TYPE_SYS_UNDEFINED3:
1770 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1771 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1772 case X86_SEL_TYPE_SYS_UNDEFINED4:
1773 {
1774 /** @todo check what actually happens when the type is wrong...
1775 * esp. call gates. */
1776 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1777 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1778 }
1779
1780 case X86_SEL_TYPE_SYS_286_INT_GATE:
1781 case X86_SEL_TYPE_SYS_386_INT_GATE:
1782 fEflToClear |= X86_EFL_IF;
1783 break;
1784
1785 case X86_SEL_TYPE_SYS_TASK_GATE:
1786 /** @todo task gates. */
1787 AssertFailedReturn(VERR_NOT_SUPPORTED);
1788
1789 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1790 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1791 break;
1792
1793 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1794 }
1795
1796 /* Check DPL against CPL if applicable. */
1797 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1798 {
1799 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1800 {
1801 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1802 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1803 }
1804 }
1805
1806 /* Is it there? */
1807 if (!Idte.Gate.u1Present)
1808 {
1809 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1810 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1811 }
1812
1813 /* A null CS is bad. */
1814 RTSEL NewCS = Idte.Gate.u16Sel;
1815 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1816 {
1817 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1818 return iemRaiseGeneralProtectionFault0(pIemCpu);
1819 }
1820
1821 /* Fetch the descriptor for the new CS. */
1822 IEMSELDESC DescCS;
1823 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1824 if (rcStrict != VINF_SUCCESS)
1825 {
1826 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1827 return rcStrict;
1828 }
1829
1830 /* Must be a code segment. */
1831 if (!DescCS.Legacy.Gen.u1DescType)
1832 {
1833 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1834 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1835 }
1836 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1837 {
1838 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1839 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1840 }
1841
1842 /* Don't allow lowering the privilege level. */
1843 /** @todo Does the lowering of privileges apply to software interrupts
1844 * only? This has bearings on the more-privileged or
1845 * same-privilege stack behavior further down. A testcase would
1846 * be nice. */
1847 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1848 {
1849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1850 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1851 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1852 }
1853 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1854
1855 /* Check the new EIP against the new CS limit. */
1856 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1857 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1858 ? Idte.Gate.u16OffsetLow
1859 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1860 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1861 if (DescCS.Legacy.Gen.u1Granularity)
1862 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1863 if (uNewEip > cbLimitCS)
1864 {
1865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1866 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1867 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1868 }
1869
1870 /* Make sure the selector is present. */
1871 if (!DescCS.Legacy.Gen.u1Present)
1872 {
1873 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1874 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1875 }
1876
1877 /*
1878 * If the privilege level changes, we need to get a new stack from the TSS.
1879 * This in turns means validating the new SS and ESP...
1880 */
1881 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1882 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1883 if (uNewCpl != pIemCpu->uCpl)
1884 {
1885 RTSEL NewSS;
1886 uint32_t uNewEsp;
1887 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1888 if (rcStrict != VINF_SUCCESS)
1889 return rcStrict;
1890
1891 IEMSELDESC DescSS;
1892 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1893 if (rcStrict != VINF_SUCCESS)
1894 return rcStrict;
1895
1896 /* Check that there is sufficient space for the stack frame. */
1897 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1898 if (DescSS.Legacy.Gen.u1Granularity)
1899 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1900 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1901
1902 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1903 if ( uNewEsp - 1 > cbLimitSS
1904 || uNewEsp < cbStackFrame)
1905 {
1906 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1907 u8Vector, NewSS, uNewEsp, cbStackFrame));
1908 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1909 }
1910
1911 /*
1912 * Start making changes.
1913 */
1914
1915 /* Create the stack frame. */
1916 RTPTRUNION uStackFrame;
1917 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1918 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
1919 if (rcStrict != VINF_SUCCESS)
1920 return rcStrict;
1921 void * const pvStackFrame = uStackFrame.pv;
1922
1923 if (fFlags & IEM_XCPT_FLAGS_ERR)
1924 *uStackFrame.pu32++ = uErr;
1925 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1926 ? pCtx->eip + cbInstr : pCtx->eip;
1927 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1928 uStackFrame.pu32[2] = pCtx->eflags.u;
1929 uStackFrame.pu32[3] = pCtx->esp;
1930 uStackFrame.pu32[4] = pCtx->ss;
1931 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934
1935 /* Mark the selectors 'accessed' (hope this is the correct time). */
1936 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1937 * after pushing the stack frame? (Write protect the gdt + stack to
1938 * find out.) */
1939 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1940 {
1941 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1942 if (rcStrict != VINF_SUCCESS)
1943 return rcStrict;
1944 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1945 }
1946
1947 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1948 {
1949 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1950 if (rcStrict != VINF_SUCCESS)
1951 return rcStrict;
1952 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1953 }
1954
1955 /*
1956 * Start commint the register changes (joins with the DPL=CPL branch).
1957 */
1958 pCtx->ss = NewSS;
1959 pCtx->ssHid.u32Limit = cbLimitSS;
1960 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
1961 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1962 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1963 pIemCpu->uCpl = uNewCpl;
1964 }
1965 /*
1966 * Same privilege, no stack change and smaller stack frame.
1967 */
1968 else
1969 {
1970 uint64_t uNewRsp;
1971 RTPTRUNION uStackFrame;
1972 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
1973 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
1974 if (rcStrict != VINF_SUCCESS)
1975 return rcStrict;
1976 void * const pvStackFrame = uStackFrame.pv;
1977
1978 if (fFlags & IEM_XCPT_FLAGS_ERR)
1979 *uStackFrame.pu32++ = uErr;
1980 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1981 ? pCtx->eip + cbInstr : pCtx->eip;
1982 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1983 uStackFrame.pu32[2] = pCtx->eflags.u;
1984 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
1985 if (rcStrict != VINF_SUCCESS)
1986 return rcStrict;
1987
1988 /* Mark the CS selector as 'accessed'. */
1989 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1990 {
1991 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1992 if (rcStrict != VINF_SUCCESS)
1993 return rcStrict;
1994 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1995 }
1996
1997 /*
1998 * Start committing the register changes (joins with the other branch).
1999 */
2000 pCtx->rsp = uNewRsp;
2001 }
2002
2003 /* ... register committing continues. */
2004 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2005 pCtx->csHid.u32Limit = cbLimitCS;
2006 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2007 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2008
2009 pCtx->rip = uNewEip;
2010 pCtx->rflags.u &= ~fEflToClear;
2011
2012 if (fFlags & IEM_XCPT_FLAGS_CR2)
2013 pCtx->cr2 = uCr2;
2014
2015 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2016 iemRaiseXcptAdjustState(pCtx, u8Vector);
2017
2018 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2019}
2020
2021
2022/**
2023 * Implements exceptions and interrupts for V8086 mode.
2024 *
2025 * @returns VBox strict status code.
2026 * @param pIemCpu The IEM per CPU instance data.
2027 * @param pCtx The CPU context.
2028 * @param cbInstr The number of bytes to offset rIP by in the return
2029 * address.
2030 * @param u8Vector The interrupt / exception vector number.
2031 * @param fFlags The flags.
2032 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2033 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2034 */
2035static VBOXSTRICTRC
2036iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2037 PCPUMCTX pCtx,
2038 uint8_t cbInstr,
2039 uint8_t u8Vector,
2040 uint32_t fFlags,
2041 uint16_t uErr,
2042 uint64_t uCr2)
2043{
2044 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2045 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
2046 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2047}
2048
2049
2050/**
2051 * Implements exceptions and interrupts for long mode.
2052 *
2053 * @returns VBox strict status code.
2054 * @param pIemCpu The IEM per CPU instance data.
2055 * @param pCtx The CPU context.
2056 * @param cbInstr The number of bytes to offset rIP by in the return
2057 * address.
2058 * @param u8Vector The interrupt / exception vector number.
2059 * @param fFlags The flags.
2060 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2061 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2062 */
2063static VBOXSTRICTRC
2064iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2065 PCPUMCTX pCtx,
2066 uint8_t cbInstr,
2067 uint8_t u8Vector,
2068 uint32_t fFlags,
2069 uint16_t uErr,
2070 uint64_t uCr2)
2071{
2072 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2073 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
2074 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2075}
2076
2077
2078/**
2079 * Implements exceptions and interrupts.
2080 *
2081 * All exceptions and interrupts goes thru this function!
2082 *
2083 * @returns VBox strict status code.
2084 * @param pIemCpu The IEM per CPU instance data.
2085 * @param cbInstr The number of bytes to offset rIP by in the return
2086 * address.
2087 * @param u8Vector The interrupt / exception vector number.
2088 * @param fFlags The flags.
2089 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2090 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2091 */
2092DECL_NO_INLINE(static, VBOXSTRICTRC)
2093iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2094 uint8_t cbInstr,
2095 uint8_t u8Vector,
2096 uint32_t fFlags,
2097 uint16_t uErr,
2098 uint64_t uCr2)
2099{
2100 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2101
2102 /*
2103 * Do recursion accounting.
2104 */
2105 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2106 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2107 if (pIemCpu->cXcptRecursions == 0)
2108 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2109 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2110 else
2111 {
2112 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2113 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2114
2115 /** @todo double and tripple faults. */
2116 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
2117
2118 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2119 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2120 {
2121 ....
2122 } */
2123 }
2124 pIemCpu->cXcptRecursions++;
2125 pIemCpu->uCurXcpt = u8Vector;
2126 pIemCpu->fCurXcpt = fFlags;
2127
2128 /*
2129 * Extensive logging.
2130 */
2131#ifdef LOG_ENABLED
2132 if (LogIs3Enabled())
2133 {
2134 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2135 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2136 char szRegs[4096];
2137 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2138 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2139 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2140 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2141 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2142 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2143 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2144 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2145 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2146 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2147 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2148 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2149 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2150 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2151 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2152 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2153 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2154 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2155 " efer=%016VR{efer}\n"
2156 " pat=%016VR{pat}\n"
2157 " sf_mask=%016VR{sf_mask}\n"
2158 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2159 " lstar=%016VR{lstar}\n"
2160 " star=%016VR{star} cstar=%016VR{cstar}\n"
2161 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2162 );
2163
2164 char szInstr[256];
2165 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2166 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2167 szInstr, sizeof(szInstr), NULL);
2168 Log3(("%s%s\n", szRegs, szInstr));
2169 }
2170#endif /* LOG_ENABLED */
2171
2172 /*
2173 * Call the mode specific worker function.
2174 */
2175 VBOXSTRICTRC rcStrict;
2176 if (!(pCtx->cr0 & X86_CR0_PE))
2177 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2178 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2179 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2180 else if (!pCtx->eflags.Bits.u1VM)
2181 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2182 else
2183 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2184
2185 /*
2186 * Unwind.
2187 */
2188 pIemCpu->cXcptRecursions--;
2189 pIemCpu->uCurXcpt = uPrevXcpt;
2190 pIemCpu->fCurXcpt = fPrevXcpt;
2191 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
2192 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp));
2193 return rcStrict;
2194}
2195
2196
2197/** \#DE - 00. */
2198DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2199{
2200 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2201}
2202
2203
2204/** \#DB - 01. */
2205DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2206{
2207 /** @todo set/clear RF. */
2208 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2209}
2210
2211
2212/** \#UD - 06. */
2213DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2214{
2215 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2216}
2217
2218
2219/** \#NM - 07. */
2220DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2221{
2222 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2223}
2224
2225
2226#ifdef SOME_UNUSED_FUNCTION
2227/** \#TS(err) - 0a. */
2228DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2229{
2230 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2231}
2232#endif
2233
2234
2235/** \#TS(tr) - 0a. */
2236DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2237{
2238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2239 pIemCpu->CTX_SUFF(pCtx)->tr, 0);
2240}
2241
2242
2243/** \#NP(err) - 0b. */
2244DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2245{
2246 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2247}
2248
2249
2250/** \#NP(seg) - 0b. */
2251DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2252{
2253 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2254 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2255}
2256
2257
2258/** \#NP(sel) - 0b. */
2259DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2260{
2261 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2262 uSel & ~X86_SEL_RPL, 0);
2263}
2264
2265
2266/** \#SS(seg) - 0c. */
2267DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2268{
2269 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2270 uSel & ~X86_SEL_RPL, 0);
2271}
2272
2273
2274/** \#GP(n) - 0d. */
2275DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2276{
2277 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2278}
2279
2280
2281/** \#GP(0) - 0d. */
2282DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2283{
2284 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2285}
2286
2287
2288/** \#GP(sel) - 0d. */
2289DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2290{
2291 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2292 Sel & ~X86_SEL_RPL, 0);
2293}
2294
2295
2296/** \#GP(0) - 0d. */
2297DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2298{
2299 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2300}
2301
2302
2303/** \#GP(sel) - 0d. */
2304DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2305{
2306 NOREF(iSegReg); NOREF(fAccess);
2307 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2308 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2309}
2310
2311
2312/** \#GP(sel) - 0d. */
2313DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2314{
2315 NOREF(Sel);
2316 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2317}
2318
2319
2320/** \#GP(sel) - 0d. */
2321DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2322{
2323 NOREF(iSegReg); NOREF(fAccess);
2324 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2325}
2326
2327
2328/** \#PF(n) - 0e. */
2329DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2330{
2331 uint16_t uErr;
2332 switch (rc)
2333 {
2334 case VERR_PAGE_NOT_PRESENT:
2335 case VERR_PAGE_TABLE_NOT_PRESENT:
2336 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2337 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2338 uErr = 0;
2339 break;
2340
2341 default:
2342 AssertMsgFailed(("%Rrc\n", rc));
2343 case VERR_ACCESS_DENIED:
2344 uErr = X86_TRAP_PF_P;
2345 break;
2346
2347 /** @todo reserved */
2348 }
2349
2350 if (pIemCpu->uCpl == 3)
2351 uErr |= X86_TRAP_PF_US;
2352
2353 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2354 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2355 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2356 uErr |= X86_TRAP_PF_ID;
2357
2358 /* Note! RW access callers reporting a WRITE protection fault, will clear
2359 the READ flag before calling. So, read-modify-write accesses (RW)
2360 can safely be reported as READ faults. */
2361 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2362 uErr |= X86_TRAP_PF_RW;
2363
2364 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2365 uErr, GCPtrWhere);
2366}
2367
2368
2369/** \#MF(0) - 10. */
2370DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2371{
2372 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2373}
2374
2375
2376/** \#AC(0) - 11. */
2377DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2378{
2379 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2380}
2381
2382
2383/**
2384 * Macro for calling iemCImplRaiseDivideError().
2385 *
2386 * This enables us to add/remove arguments and force different levels of
2387 * inlining as we wish.
2388 *
2389 * @return Strict VBox status code.
2390 */
2391#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2392IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2393{
2394 NOREF(cbInstr);
2395 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2396}
2397
2398
2399/**
2400 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2401 *
2402 * This enables us to add/remove arguments and force different levels of
2403 * inlining as we wish.
2404 *
2405 * @return Strict VBox status code.
2406 */
2407#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2408IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2409{
2410 NOREF(cbInstr);
2411 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2412}
2413
2414
2415/**
2416 * Macro for calling iemCImplRaiseInvalidOpcode().
2417 *
2418 * This enables us to add/remove arguments and force different levels of
2419 * inlining as we wish.
2420 *
2421 * @return Strict VBox status code.
2422 */
2423#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2424IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2425{
2426 NOREF(cbInstr);
2427 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2428}
2429
2430
2431/** @} */
2432
2433
2434/*
2435 *
2436 * Helpers routines.
2437 * Helpers routines.
2438 * Helpers routines.
2439 *
2440 */
2441
2442/**
2443 * Recalculates the effective operand size.
2444 *
2445 * @param pIemCpu The IEM state.
2446 */
2447static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2448{
2449 switch (pIemCpu->enmCpuMode)
2450 {
2451 case IEMMODE_16BIT:
2452 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2453 break;
2454 case IEMMODE_32BIT:
2455 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2456 break;
2457 case IEMMODE_64BIT:
2458 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2459 {
2460 case 0:
2461 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2462 break;
2463 case IEM_OP_PRF_SIZE_OP:
2464 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2465 break;
2466 case IEM_OP_PRF_SIZE_REX_W:
2467 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2468 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2469 break;
2470 }
2471 break;
2472 default:
2473 AssertFailed();
2474 }
2475}
2476
2477
2478/**
2479 * Sets the default operand size to 64-bit and recalculates the effective
2480 * operand size.
2481 *
2482 * @param pIemCpu The IEM state.
2483 */
2484static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2485{
2486 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2487 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2488 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2489 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2490 else
2491 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2492}
2493
2494
2495/*
2496 *
2497 * Common opcode decoders.
2498 * Common opcode decoders.
2499 * Common opcode decoders.
2500 *
2501 */
2502#include <iprt/mem.h>
2503
2504/**
2505 * Used to add extra details about a stub case.
2506 * @param pIemCpu The IEM per CPU state.
2507 */
2508static void iemOpStubMsg2(PIEMCPU pIemCpu)
2509{
2510 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2511 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2512 char szRegs[4096];
2513 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2514 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2515 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2516 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2517 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2518 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2519 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2520 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2521 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2522 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2523 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2524 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2525 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2526 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2527 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2528 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2529 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2530 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2531 " efer=%016VR{efer}\n"
2532 " pat=%016VR{pat}\n"
2533 " sf_mask=%016VR{sf_mask}\n"
2534 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2535 " lstar=%016VR{lstar}\n"
2536 " star=%016VR{star} cstar=%016VR{cstar}\n"
2537 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2538 );
2539
2540 char szInstr[256];
2541 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2542 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2543 szInstr, sizeof(szInstr), NULL);
2544
2545 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2546}
2547
2548
2549/** Stubs an opcode. */
2550#define FNIEMOP_STUB(a_Name) \
2551 FNIEMOP_DEF(a_Name) \
2552 { \
2553 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2554 iemOpStubMsg2(pIemCpu); \
2555 RTAssertPanic(); \
2556 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2557 } \
2558 typedef int ignore_semicolon
2559
2560/** Stubs an opcode. */
2561#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2562 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2563 { \
2564 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2565 iemOpStubMsg2(pIemCpu); \
2566 RTAssertPanic(); \
2567 NOREF(a_Name0); \
2568 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2569 } \
2570 typedef int ignore_semicolon
2571
2572
2573
2574/** @name Register Access.
2575 * @{
2576 */
2577
2578/**
2579 * Gets a reference (pointer) to the specified hidden segment register.
2580 *
2581 * @returns Hidden register reference.
2582 * @param pIemCpu The per CPU data.
2583 * @param iSegReg The segment register.
2584 */
2585static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2586{
2587 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2588 switch (iSegReg)
2589 {
2590 case X86_SREG_ES: return &pCtx->esHid;
2591 case X86_SREG_CS: return &pCtx->csHid;
2592 case X86_SREG_SS: return &pCtx->ssHid;
2593 case X86_SREG_DS: return &pCtx->dsHid;
2594 case X86_SREG_FS: return &pCtx->fsHid;
2595 case X86_SREG_GS: return &pCtx->gsHid;
2596 }
2597 AssertFailedReturn(NULL);
2598}
2599
2600
2601/**
2602 * Gets a reference (pointer) to the specified segment register (the selector
2603 * value).
2604 *
2605 * @returns Pointer to the selector variable.
2606 * @param pIemCpu The per CPU data.
2607 * @param iSegReg The segment register.
2608 */
2609static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2610{
2611 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2612 switch (iSegReg)
2613 {
2614 case X86_SREG_ES: return &pCtx->es;
2615 case X86_SREG_CS: return &pCtx->cs;
2616 case X86_SREG_SS: return &pCtx->ss;
2617 case X86_SREG_DS: return &pCtx->ds;
2618 case X86_SREG_FS: return &pCtx->fs;
2619 case X86_SREG_GS: return &pCtx->gs;
2620 }
2621 AssertFailedReturn(NULL);
2622}
2623
2624
2625/**
2626 * Fetches the selector value of a segment register.
2627 *
2628 * @returns The selector value.
2629 * @param pIemCpu The per CPU data.
2630 * @param iSegReg The segment register.
2631 */
2632static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2633{
2634 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2635 switch (iSegReg)
2636 {
2637 case X86_SREG_ES: return pCtx->es;
2638 case X86_SREG_CS: return pCtx->cs;
2639 case X86_SREG_SS: return pCtx->ss;
2640 case X86_SREG_DS: return pCtx->ds;
2641 case X86_SREG_FS: return pCtx->fs;
2642 case X86_SREG_GS: return pCtx->gs;
2643 }
2644 AssertFailedReturn(0xffff);
2645}
2646
2647
2648/**
2649 * Gets a reference (pointer) to the specified general register.
2650 *
2651 * @returns Register reference.
2652 * @param pIemCpu The per CPU data.
2653 * @param iReg The general register.
2654 */
2655static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2656{
2657 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2658 switch (iReg)
2659 {
2660 case X86_GREG_xAX: return &pCtx->rax;
2661 case X86_GREG_xCX: return &pCtx->rcx;
2662 case X86_GREG_xDX: return &pCtx->rdx;
2663 case X86_GREG_xBX: return &pCtx->rbx;
2664 case X86_GREG_xSP: return &pCtx->rsp;
2665 case X86_GREG_xBP: return &pCtx->rbp;
2666 case X86_GREG_xSI: return &pCtx->rsi;
2667 case X86_GREG_xDI: return &pCtx->rdi;
2668 case X86_GREG_x8: return &pCtx->r8;
2669 case X86_GREG_x9: return &pCtx->r9;
2670 case X86_GREG_x10: return &pCtx->r10;
2671 case X86_GREG_x11: return &pCtx->r11;
2672 case X86_GREG_x12: return &pCtx->r12;
2673 case X86_GREG_x13: return &pCtx->r13;
2674 case X86_GREG_x14: return &pCtx->r14;
2675 case X86_GREG_x15: return &pCtx->r15;
2676 }
2677 AssertFailedReturn(NULL);
2678}
2679
2680
2681/**
2682 * Gets a reference (pointer) to the specified 8-bit general register.
2683 *
2684 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2685 *
2686 * @returns Register reference.
2687 * @param pIemCpu The per CPU data.
2688 * @param iReg The register.
2689 */
2690static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2691{
2692 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2693 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2694
2695 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2696 if (iReg >= 4)
2697 pu8Reg++;
2698 return pu8Reg;
2699}
2700
2701
2702/**
2703 * Fetches the value of a 8-bit general register.
2704 *
2705 * @returns The register value.
2706 * @param pIemCpu The per CPU data.
2707 * @param iReg The register.
2708 */
2709static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2710{
2711 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2712 return *pbSrc;
2713}
2714
2715
2716/**
2717 * Fetches the value of a 16-bit general register.
2718 *
2719 * @returns The register value.
2720 * @param pIemCpu The per CPU data.
2721 * @param iReg The register.
2722 */
2723static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2724{
2725 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2726}
2727
2728
2729/**
2730 * Fetches the value of a 32-bit general register.
2731 *
2732 * @returns The register value.
2733 * @param pIemCpu The per CPU data.
2734 * @param iReg The register.
2735 */
2736static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2737{
2738 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2739}
2740
2741
2742/**
2743 * Fetches the value of a 64-bit general register.
2744 *
2745 * @returns The register value.
2746 * @param pIemCpu The per CPU data.
2747 * @param iReg The register.
2748 */
2749static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2750{
2751 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2752}
2753
2754
2755/**
2756 * Is the FPU state in FXSAVE format or not.
2757 *
2758 * @returns true if it is, false if it's in FNSAVE.
2759 * @param pVCpu The virtual CPU handle.
2760 */
2761DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2762{
2763#ifdef RT_ARCH_AMD64
2764 NOREF(pIemCpu);
2765 return true;
2766#else
2767 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2768 return true;
2769#endif
2770}
2771
2772
2773/**
2774 * Gets the FPU status word.
2775 *
2776 * @returns FPU status word
2777 * @param pIemCpu The per CPU data.
2778 */
2779static uint16_t iemFRegFetchFsw(PIEMCPU pIemCpu)
2780{
2781 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2782 uint16_t u16Fsw;
2783 if (iemFRegIsFxSaveFormat(pIemCpu))
2784 u16Fsw = pCtx->fpu.FSW;
2785 else
2786 {
2787 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
2788 u16Fsw = pFpu->FSW;
2789 }
2790 return u16Fsw;
2791}
2792
2793/**
2794 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2795 *
2796 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2797 * segment limit.
2798 *
2799 * @param pIemCpu The per CPU data.
2800 * @param offNextInstr The offset of the next instruction.
2801 */
2802static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2803{
2804 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2805 switch (pIemCpu->enmEffOpSize)
2806 {
2807 case IEMMODE_16BIT:
2808 {
2809 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2810 if ( uNewIp > pCtx->csHid.u32Limit
2811 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2812 return iemRaiseGeneralProtectionFault0(pIemCpu);
2813 pCtx->rip = uNewIp;
2814 break;
2815 }
2816
2817 case IEMMODE_32BIT:
2818 {
2819 Assert(pCtx->rip <= UINT32_MAX);
2820 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2821
2822 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2823 if (uNewEip > pCtx->csHid.u32Limit)
2824 return iemRaiseGeneralProtectionFault0(pIemCpu);
2825 pCtx->rip = uNewEip;
2826 break;
2827 }
2828
2829 case IEMMODE_64BIT:
2830 {
2831 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2832
2833 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2834 if (!IEM_IS_CANONICAL(uNewRip))
2835 return iemRaiseGeneralProtectionFault0(pIemCpu);
2836 pCtx->rip = uNewRip;
2837 break;
2838 }
2839
2840 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2841 }
2842
2843 return VINF_SUCCESS;
2844}
2845
2846
2847/**
2848 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2849 *
2850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2851 * segment limit.
2852 *
2853 * @returns Strict VBox status code.
2854 * @param pIemCpu The per CPU data.
2855 * @param offNextInstr The offset of the next instruction.
2856 */
2857static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2858{
2859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2860 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2861
2862 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2863 if ( uNewIp > pCtx->csHid.u32Limit
2864 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2865 return iemRaiseGeneralProtectionFault0(pIemCpu);
2866 /** @todo Test 16-bit jump in 64-bit mode. */
2867 pCtx->rip = uNewIp;
2868
2869 return VINF_SUCCESS;
2870}
2871
2872
2873/**
2874 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2875 *
2876 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2877 * segment limit.
2878 *
2879 * @returns Strict VBox status code.
2880 * @param pIemCpu The per CPU data.
2881 * @param offNextInstr The offset of the next instruction.
2882 */
2883static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2884{
2885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2886 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2887
2888 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2889 {
2890 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2891
2892 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2893 if (uNewEip > pCtx->csHid.u32Limit)
2894 return iemRaiseGeneralProtectionFault0(pIemCpu);
2895 pCtx->rip = uNewEip;
2896 }
2897 else
2898 {
2899 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2900
2901 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2902 if (!IEM_IS_CANONICAL(uNewRip))
2903 return iemRaiseGeneralProtectionFault0(pIemCpu);
2904 pCtx->rip = uNewRip;
2905 }
2906 return VINF_SUCCESS;
2907}
2908
2909
2910/**
2911 * Performs a near jump to the specified address.
2912 *
2913 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2914 * segment limit.
2915 *
2916 * @param pIemCpu The per CPU data.
2917 * @param uNewRip The new RIP value.
2918 */
2919static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2920{
2921 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2922 switch (pIemCpu->enmEffOpSize)
2923 {
2924 case IEMMODE_16BIT:
2925 {
2926 Assert(uNewRip <= UINT16_MAX);
2927 if ( uNewRip > pCtx->csHid.u32Limit
2928 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2929 return iemRaiseGeneralProtectionFault0(pIemCpu);
2930 /** @todo Test 16-bit jump in 64-bit mode. */
2931 pCtx->rip = uNewRip;
2932 break;
2933 }
2934
2935 case IEMMODE_32BIT:
2936 {
2937 Assert(uNewRip <= UINT32_MAX);
2938 Assert(pCtx->rip <= UINT32_MAX);
2939 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2940
2941 if (uNewRip > pCtx->csHid.u32Limit)
2942 return iemRaiseGeneralProtectionFault0(pIemCpu);
2943 pCtx->rip = uNewRip;
2944 break;
2945 }
2946
2947 case IEMMODE_64BIT:
2948 {
2949 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2950
2951 if (!IEM_IS_CANONICAL(uNewRip))
2952 return iemRaiseGeneralProtectionFault0(pIemCpu);
2953 pCtx->rip = uNewRip;
2954 break;
2955 }
2956
2957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2958 }
2959
2960 return VINF_SUCCESS;
2961}
2962
2963
2964/**
2965 * Get the address of the top of the stack.
2966 *
2967 * @param pCtx The CPU context which SP/ESP/RSP should be
2968 * read.
2969 */
2970DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
2971{
2972 if (pCtx->ssHid.Attr.n.u1Long)
2973 return pCtx->rsp;
2974 if (pCtx->ssHid.Attr.n.u1DefBig)
2975 return pCtx->esp;
2976 return pCtx->sp;
2977}
2978
2979
2980/**
2981 * Updates the RIP/EIP/IP to point to the next instruction.
2982 *
2983 * @param pIemCpu The per CPU data.
2984 * @param cbInstr The number of bytes to add.
2985 */
2986static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
2987{
2988 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2989 switch (pIemCpu->enmCpuMode)
2990 {
2991 case IEMMODE_16BIT:
2992 Assert(pCtx->rip <= UINT16_MAX);
2993 pCtx->eip += cbInstr;
2994 pCtx->eip &= UINT32_C(0xffff);
2995 break;
2996
2997 case IEMMODE_32BIT:
2998 pCtx->eip += cbInstr;
2999 Assert(pCtx->rip <= UINT32_MAX);
3000 break;
3001
3002 case IEMMODE_64BIT:
3003 pCtx->rip += cbInstr;
3004 break;
3005 default: AssertFailed();
3006 }
3007}
3008
3009
3010/**
3011 * Updates the RIP/EIP/IP to point to the next instruction.
3012 *
3013 * @param pIemCpu The per CPU data.
3014 */
3015static void iemRegUpdateRip(PIEMCPU pIemCpu)
3016{
3017 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3018}
3019
3020
3021/**
3022 * Adds to the stack pointer.
3023 *
3024 * @param pCtx The CPU context which SP/ESP/RSP should be
3025 * updated.
3026 * @param cbToAdd The number of bytes to add.
3027 */
3028DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3029{
3030 if (pCtx->ssHid.Attr.n.u1Long)
3031 pCtx->rsp += cbToAdd;
3032 else if (pCtx->ssHid.Attr.n.u1DefBig)
3033 pCtx->esp += cbToAdd;
3034 else
3035 pCtx->sp += cbToAdd;
3036}
3037
3038
3039/**
3040 * Subtracts from the stack pointer.
3041 *
3042 * @param pCtx The CPU context which SP/ESP/RSP should be
3043 * updated.
3044 * @param cbToSub The number of bytes to subtract.
3045 */
3046DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3047{
3048 if (pCtx->ssHid.Attr.n.u1Long)
3049 pCtx->rsp -= cbToSub;
3050 else if (pCtx->ssHid.Attr.n.u1DefBig)
3051 pCtx->esp -= cbToSub;
3052 else
3053 pCtx->sp -= cbToSub;
3054}
3055
3056
3057/**
3058 * Adds to the temporary stack pointer.
3059 *
3060 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3061 * @param cbToAdd The number of bytes to add.
3062 * @param pCtx Where to get the current stack mode.
3063 */
3064DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
3065{
3066 if (pCtx->ssHid.Attr.n.u1Long)
3067 pTmpRsp->u += cbToAdd;
3068 else if (pCtx->ssHid.Attr.n.u1DefBig)
3069 pTmpRsp->DWords.dw0 += cbToAdd;
3070 else
3071 pTmpRsp->Words.w0 += cbToAdd;
3072}
3073
3074
3075/**
3076 * Subtracts from the temporary stack pointer.
3077 *
3078 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3079 * @param cbToSub The number of bytes to subtract.
3080 * @param pCtx Where to get the current stack mode.
3081 */
3082DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
3083{
3084 if (pCtx->ssHid.Attr.n.u1Long)
3085 pTmpRsp->u -= cbToSub;
3086 else if (pCtx->ssHid.Attr.n.u1DefBig)
3087 pTmpRsp->DWords.dw0 -= cbToSub;
3088 else
3089 pTmpRsp->Words.w0 -= cbToSub;
3090}
3091
3092
3093/**
3094 * Calculates the effective stack address for a push of the specified size as
3095 * well as the new RSP value (upper bits may be masked).
3096 *
3097 * @returns Effective stack addressf for the push.
3098 * @param pCtx Where to get the current stack mode.
3099 * @param cbItem The size of the stack item to pop.
3100 * @param puNewRsp Where to return the new RSP value.
3101 */
3102DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3103{
3104 RTUINT64U uTmpRsp;
3105 RTGCPTR GCPtrTop;
3106 uTmpRsp.u = pCtx->rsp;
3107
3108 if (pCtx->ssHid.Attr.n.u1Long)
3109 GCPtrTop = uTmpRsp.u -= cbItem;
3110 else if (pCtx->ssHid.Attr.n.u1DefBig)
3111 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3112 else
3113 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3114 *puNewRsp = uTmpRsp.u;
3115 return GCPtrTop;
3116}
3117
3118
3119/**
3120 * Gets the current stack pointer and calculates the value after a pop of the
3121 * specified size.
3122 *
3123 * @returns Current stack pointer.
3124 * @param pCtx Where to get the current stack mode.
3125 * @param cbItem The size of the stack item to pop.
3126 * @param puNewRsp Where to return the new RSP value.
3127 */
3128DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3129{
3130 RTUINT64U uTmpRsp;
3131 RTGCPTR GCPtrTop;
3132 uTmpRsp.u = pCtx->rsp;
3133
3134 if (pCtx->ssHid.Attr.n.u1Long)
3135 {
3136 GCPtrTop = uTmpRsp.u;
3137 uTmpRsp.u += cbItem;
3138 }
3139 else if (pCtx->ssHid.Attr.n.u1DefBig)
3140 {
3141 GCPtrTop = uTmpRsp.DWords.dw0;
3142 uTmpRsp.DWords.dw0 += cbItem;
3143 }
3144 else
3145 {
3146 GCPtrTop = uTmpRsp.Words.w0;
3147 uTmpRsp.Words.w0 += cbItem;
3148 }
3149 *puNewRsp = uTmpRsp.u;
3150 return GCPtrTop;
3151}
3152
3153
3154/**
3155 * Calculates the effective stack address for a push of the specified size as
3156 * well as the new temporary RSP value (upper bits may be masked).
3157 *
3158 * @returns Effective stack addressf for the push.
3159 * @param pTmpRsp The temporary stack pointer. This is updated.
3160 * @param cbItem The size of the stack item to pop.
3161 * @param puNewRsp Where to return the new RSP value.
3162 */
3163DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3164{
3165 RTGCPTR GCPtrTop;
3166
3167 if (pCtx->ssHid.Attr.n.u1Long)
3168 GCPtrTop = pTmpRsp->u -= cbItem;
3169 else if (pCtx->ssHid.Attr.n.u1DefBig)
3170 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3171 else
3172 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3173 return GCPtrTop;
3174}
3175
3176
3177/**
3178 * Gets the effective stack address for a pop of the specified size and
3179 * calculates and updates the temporary RSP.
3180 *
3181 * @returns Current stack pointer.
3182 * @param pTmpRsp The temporary stack pointer. This is updated.
3183 * @param pCtx Where to get the current stack mode.
3184 * @param cbItem The size of the stack item to pop.
3185 */
3186DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3187{
3188 RTGCPTR GCPtrTop;
3189 if (pCtx->ssHid.Attr.n.u1Long)
3190 {
3191 GCPtrTop = pTmpRsp->u;
3192 pTmpRsp->u += cbItem;
3193 }
3194 else if (pCtx->ssHid.Attr.n.u1DefBig)
3195 {
3196 GCPtrTop = pTmpRsp->DWords.dw0;
3197 pTmpRsp->DWords.dw0 += cbItem;
3198 }
3199 else
3200 {
3201 GCPtrTop = pTmpRsp->Words.w0;
3202 pTmpRsp->Words.w0 += cbItem;
3203 }
3204 return GCPtrTop;
3205}
3206
3207
3208/**
3209 * Checks if an Intel CPUID feature bit is set.
3210 *
3211 * @returns true / false.
3212 *
3213 * @param pIemCpu The IEM per CPU data.
3214 * @param fEdx The EDX bit to test, or 0 if ECX.
3215 * @param fEcx The ECX bit to test, or 0 if EDX.
3216 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3217 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3218 */
3219static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3220{
3221 uint32_t uEax, uEbx, uEcx, uEdx;
3222 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3223 return (fEcx && (uEcx & fEcx))
3224 || (fEdx && (uEdx & fEdx));
3225}
3226
3227
3228/**
3229 * Checks if an AMD CPUID feature bit is set.
3230 *
3231 * @returns true / false.
3232 *
3233 * @param pIemCpu The IEM per CPU data.
3234 * @param fEdx The EDX bit to test, or 0 if ECX.
3235 * @param fEcx The ECX bit to test, or 0 if EDX.
3236 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3237 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3238 */
3239static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3240{
3241 uint32_t uEax, uEbx, uEcx, uEdx;
3242 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3243 return (fEcx && (uEcx & fEcx))
3244 || (fEdx && (uEdx & fEdx));
3245}
3246
3247/** @} */
3248
3249
3250/** @name FPU access and helpers.
3251 *
3252 * @{
3253 */
3254
3255
3256/**
3257 * Hook for preparing to use the host FPU.
3258 *
3259 * This is necessary in ring-0 and raw-mode context.
3260 *
3261 * @param pIemCpu The IEM per CPU data.
3262 */
3263DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3264{
3265#ifdef IN_RING3
3266 NOREF(pIemCpu);
3267#else
3268# error "Implement me"
3269#endif
3270}
3271
3272
3273/**
3274 * Stores a QNaN value into a FPU register.
3275 *
3276 * @param pReg Pointer to the register.
3277 */
3278DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3279{
3280 pReg->au32[0] = UINT32_C(0x00000000);
3281 pReg->au32[1] = UINT32_C(0xc0000000);
3282 pReg->au16[4] = UINT16_C(0xffff);
3283}
3284
3285
3286/**
3287 * Updates the FOP, FPU.CS and FPUIP registers.
3288 *
3289 * @param pIemCpu The IEM per CPU data.
3290 * @param pCtx The CPU context.
3291 */
3292DECLINLINE(void) iemFpuUpdateOpcodeAndIP(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3293{
3294 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3295 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3296 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3297 pCtx->fpu.CS = pCtx->cs;
3298 pCtx->fpu.FPUIP = pCtx->rip;
3299}
3300
3301
3302/**
3303 * Updates the FPU.DS and FPUDP registers.
3304 *
3305 * @param pIemCpu The IEM per CPU data.
3306 * @param pCtx The CPU context.
3307 * @param iEffSeg The effective segment register.
3308 * @param GCPtrEff The effective address relative to @a iEffSeg.
3309 */
3310DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3311{
3312 RTSEL sel;
3313 switch (iEffSeg)
3314 {
3315 case X86_SREG_DS: sel = pCtx->ds; break;
3316 case X86_SREG_SS: sel = pCtx->ss; break;
3317 case X86_SREG_CS: sel = pCtx->cs; break;
3318 case X86_SREG_ES: sel = pCtx->es; break;
3319 case X86_SREG_FS: sel = pCtx->fs; break;
3320 case X86_SREG_GS: sel = pCtx->gs; break;
3321 default:
3322 AssertMsgFailed(("%d\n", iEffSeg));
3323 sel = pCtx->ds;
3324 }
3325 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3326 pCtx->fpu.DS = sel;
3327 pCtx->fpu.FPUDP = GCPtrEff;
3328}
3329
3330
3331/**
3332 * Rotates the stack registers in the push direction.
3333 *
3334 * @param pCtx The CPU context.
3335 * @remarks This is a complete waste of time, but fxsave stores the registers in
3336 * stack order.
3337 */
3338DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3339{
3340 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3341 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3342 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3343 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3344 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3345 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3346 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3347 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3348 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3349}
3350
3351
3352/**
3353 * Rotates the stack registers in the pop direction.
3354 *
3355 * @param pCtx The CPU context.
3356 * @remarks This is a complete waste of time, but fxsave stores the registers in
3357 * stack order.
3358 */
3359DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3360{
3361 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3362 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3363 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3364 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3365 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3366 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3367 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3368 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3369 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3370}
3371
3372
3373#if 0
3374/**
3375 *
3376 * @param pIemCpu The IEM per CPU data.
3377 * @param pResult The FPU operation result to push.
3378 * @param pCtx The CPU context.
3379 * @param iDstReg The destination register,
3380 * @param cStackAdj The stack adjustment on successful operation.
3381 * Note that this is an unsigned value.
3382 * @param fFlags Flags.
3383 */
3384static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx, uint16_t iDstReg,
3385 uint8_t cStackAdj, )
3386{
3387 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3388 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3389
3390 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3391 if (!(RT_BIT(iNewTop) & pCtx->fpu.FTW))
3392 {
3393 /* No stack error. */
3394 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3395 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3396 if (!fXcpts)
3397 {
3398 /* No unmasked exceptions, just store the result. */
3399 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3400 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3401 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3402 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3403 }
3404 else
3405 {
3406 AssertFailed();
3407 }
3408
3409 }
3410 else if (pCtx->fpu.FCW & X86_FCW_IM)
3411 {
3412 /* Masked stack overflow. */
3413 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3414 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3415 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3416 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3417 }
3418 else
3419 {
3420 /* Stack overflow exception. */
3421 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3422 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3423 return;
3424 }
3425
3426 iemFpuRotateStackPush(pCtx);
3427}
3428
3429
3430/**
3431 * Writes a FPU result to the FPU stack after inspecting the resulting
3432 * statuses.
3433 *
3434 * @param pIemCpu The IEM per CPU data.
3435 * @param pResult The FPU operation result to push.
3436 * @param iReg The stack relative FPU register number.
3437 */
3438static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iReg)
3439{
3440 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3441 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3442
3443 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iReg) & X86_FSW_TOP_SMASK;
3444
3445 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3446 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3447 if (!fXcpts)
3448 {
3449 /* No unmasked exceptions, just store the result. */
3450 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3451 pCtx->fpu.FSW |= (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3452 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3453 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3454 }
3455 else
3456 {
3457 AssertFailed();
3458 }
3459}
3460#endif
3461
3462
3463/**
3464 * Pushes a FPU result onto the FPU stack after inspecting the resulting
3465 * statuses.
3466 *
3467 * @param pIemCpu The IEM per CPU data.
3468 * @param pResult The FPU operation result to push.
3469 */
3470static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3471{
3472 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3473 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3474
3475 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3476 if (!(RT_BIT(iNewTop) & pCtx->fpu.FTW))
3477 {
3478 /* No stack error. */
3479 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3480 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3481 if (!fXcpts)
3482 {
3483 /* No unmasked exceptions, just store the result. */
3484 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3485 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3486 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3487 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3488 }
3489 else
3490 {
3491 AssertFailed();
3492 }
3493
3494 }
3495 else if (pCtx->fpu.FCW & X86_FCW_IM)
3496 {
3497 /* Masked stack overflow. */
3498 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3499 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3500 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3501 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3502 }
3503 else
3504 {
3505 /* Stack overflow exception. */
3506 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3507 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3508 return;
3509 }
3510
3511 iemFpuRotateStackPush(pCtx);
3512}
3513
3514
3515/**
3516 * Pushes a FPU result onto the FPU stack after inspecting the resulting
3517 * statuses, and sets FPU.DS and FPUDP.
3518 *
3519 * @param pIemCpu The IEM per CPU data.
3520 * @param pResult The FPU operation result to push.
3521 * @param iEffSeg The effective segment register.
3522 * @param GCPtrEff The effective address relative to @a iEffSeg.
3523 */
3524static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3525{
3526 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3527 iemFpuPushResult(pIemCpu, pResult);
3528}
3529
3530/** @} */
3531
3532
3533/** @name Memory access.
3534 *
3535 * @{
3536 */
3537
3538
3539/**
3540 * Checks if the given segment can be written to, raise the appropriate
3541 * exception if not.
3542 *
3543 * @returns VBox strict status code.
3544 *
3545 * @param pIemCpu The IEM per CPU data.
3546 * @param pHid Pointer to the hidden register.
3547 * @param iSegReg The register number.
3548 */
3549static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3550{
3551 if (!pHid->Attr.n.u1Present)
3552 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3553
3554 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3555 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3556 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3557 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
3558
3559 /** @todo DPL/RPL/CPL? */
3560
3561 return VINF_SUCCESS;
3562}
3563
3564
3565/**
3566 * Checks if the given segment can be read from, raise the appropriate
3567 * exception if not.
3568 *
3569 * @returns VBox strict status code.
3570 *
3571 * @param pIemCpu The IEM per CPU data.
3572 * @param pHid Pointer to the hidden register.
3573 * @param iSegReg The register number.
3574 */
3575static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3576{
3577 if (!pHid->Attr.n.u1Present)
3578 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3579
3580 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
3581 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3582 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
3583
3584 /** @todo DPL/RPL/CPL? */
3585
3586 return VINF_SUCCESS;
3587}
3588
3589
3590/**
3591 * Applies the segment limit, base and attributes.
3592 *
3593 * This may raise a \#GP or \#SS.
3594 *
3595 * @returns VBox strict status code.
3596 *
3597 * @param pIemCpu The IEM per CPU data.
3598 * @param fAccess The kind of access which is being performed.
3599 * @param iSegReg The index of the segment register to apply.
3600 * This is UINT8_MAX if none (for IDT, GDT, LDT,
3601 * TSS, ++).
3602 * @param pGCPtrMem Pointer to the guest memory address to apply
3603 * segmentation to. Input and output parameter.
3604 */
3605static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
3606 size_t cbMem, PRTGCPTR pGCPtrMem)
3607{
3608 if (iSegReg == UINT8_MAX)
3609 return VINF_SUCCESS;
3610
3611 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
3612 switch (pIemCpu->enmCpuMode)
3613 {
3614 case IEMMODE_16BIT:
3615 case IEMMODE_32BIT:
3616 {
3617 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
3618 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
3619
3620 Assert(pSel->Attr.n.u1Present);
3621 Assert(pSel->Attr.n.u1DescType);
3622 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
3623 {
3624 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3625 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3626 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3627
3628 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3629 {
3630 /** @todo CPL check. */
3631 }
3632
3633 /*
3634 * There are two kinds of data selectors, normal and expand down.
3635 */
3636 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
3637 {
3638 if ( GCPtrFirst32 > pSel->u32Limit
3639 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3640 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3641
3642 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3643 }
3644 else
3645 {
3646 /** @todo implement expand down segments. */
3647 AssertFailed(/** @todo implement this */);
3648 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
3649 }
3650 }
3651 else
3652 {
3653
3654 /*
3655 * Code selector and usually be used to read thru, writing is
3656 * only permitted in real and V8086 mode.
3657 */
3658 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3659 || ( (fAccess & IEM_ACCESS_TYPE_READ)
3660 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
3661 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
3662 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3663
3664 if ( GCPtrFirst32 > pSel->u32Limit
3665 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3666 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3667
3668 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3669 {
3670 /** @todo CPL check. */
3671 }
3672
3673 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3674 }
3675 return VINF_SUCCESS;
3676 }
3677
3678 case IEMMODE_64BIT:
3679 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
3680 *pGCPtrMem += pSel->u64Base;
3681 return VINF_SUCCESS;
3682
3683 default:
3684 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
3685 }
3686}
3687
3688
3689/**
3690 * Translates a virtual address to a physical physical address and checks if we
3691 * can access the page as specified.
3692 *
3693 * @param pIemCpu The IEM per CPU data.
3694 * @param GCPtrMem The virtual address.
3695 * @param fAccess The intended access.
3696 * @param pGCPhysMem Where to return the physical address.
3697 */
3698static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
3699 PRTGCPHYS pGCPhysMem)
3700{
3701 /** @todo Need a different PGM interface here. We're currently using
3702 * generic / REM interfaces. this won't cut it for R0 & RC. */
3703 RTGCPHYS GCPhys;
3704 uint64_t fFlags;
3705 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
3706 if (RT_FAILURE(rc))
3707 {
3708 /** @todo Check unassigned memory in unpaged mode. */
3709 /** @todo Reserved bits in page tables. Requires new PGM interface. */
3710 *pGCPhysMem = NIL_RTGCPHYS;
3711 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
3712 }
3713
3714 /* If the page is writable and does not have the no-exec bit set, all
3715 access is allowed. Otherwise we'll have to check more carefully... */
3716 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
3717 {
3718 /* Write to read only memory? */
3719 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3720 && !(fFlags & X86_PTE_RW)
3721 && ( pIemCpu->uCpl != 0
3722 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
3723 {
3724 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page\n", GCPtrMem));
3725 *pGCPhysMem = NIL_RTGCPHYS;
3726 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
3727 }
3728
3729 /* Kernel memory accessed by userland? */
3730 if ( !(fFlags & X86_PTE_US)
3731 && pIemCpu->uCpl == 3
3732 && !(fAccess & IEM_ACCESS_WHAT_SYS))
3733 {
3734 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page\n", GCPtrMem));
3735 *pGCPhysMem = NIL_RTGCPHYS;
3736 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3737 }
3738
3739 /* Executing non-executable memory? */
3740 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
3741 && (fFlags & X86_PTE_PAE_NX)
3742 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
3743 {
3744 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX\n", GCPtrMem));
3745 *pGCPhysMem = NIL_RTGCPHYS;
3746 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
3747 VERR_ACCESS_DENIED);
3748 }
3749 }
3750
3751 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
3752 *pGCPhysMem = GCPhys;
3753 return VINF_SUCCESS;
3754}
3755
3756
3757
3758/**
3759 * Maps a physical page.
3760 *
3761 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3762 * @param pIemCpu The IEM per CPU data.
3763 * @param GCPhysMem The physical address.
3764 * @param fAccess The intended access.
3765 * @param ppvMem Where to return the mapping address.
3766 */
3767static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
3768{
3769#ifdef IEM_VERIFICATION_MODE
3770 /* Force the alternative path so we can ignore writes. */
3771 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
3772 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3773#endif
3774
3775 /*
3776 * If we can map the page without trouble, do a block processing
3777 * until the end of the current page.
3778 */
3779 /** @todo need some better API. */
3780 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
3781 GCPhysMem,
3782 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3783 ppvMem);
3784}
3785
3786
3787/**
3788 * Unmap a page previously mapped by iemMemPageMap.
3789 *
3790 * This is currently a dummy function.
3791 *
3792 * @param pIemCpu The IEM per CPU data.
3793 * @param GCPhysMem The physical address.
3794 * @param fAccess The intended access.
3795 * @param pvMem What iemMemPageMap returned.
3796 */
3797DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
3798{
3799 NOREF(pIemCpu);
3800 NOREF(GCPhysMem);
3801 NOREF(fAccess);
3802 NOREF(pvMem);
3803}
3804
3805
3806/**
3807 * Looks up a memory mapping entry.
3808 *
3809 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
3810 * @param pIemCpu The IEM per CPU data.
3811 * @param pvMem The memory address.
3812 * @param fAccess The access to.
3813 */
3814DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3815{
3816 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
3817 if ( pIemCpu->aMemMappings[0].pv == pvMem
3818 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3819 return 0;
3820 if ( pIemCpu->aMemMappings[1].pv == pvMem
3821 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3822 return 1;
3823 if ( pIemCpu->aMemMappings[2].pv == pvMem
3824 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3825 return 2;
3826 return VERR_NOT_FOUND;
3827}
3828
3829
3830/**
3831 * Finds a free memmap entry when using iNextMapping doesn't work.
3832 *
3833 * @returns Memory mapping index, 1024 on failure.
3834 * @param pIemCpu The IEM per CPU data.
3835 */
3836static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
3837{
3838 /*
3839 * The easy case.
3840 */
3841 if (pIemCpu->cActiveMappings == 0)
3842 {
3843 pIemCpu->iNextMapping = 1;
3844 return 0;
3845 }
3846
3847 /* There should be enough mappings for all instructions. */
3848 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
3849
3850 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
3851 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
3852 return i;
3853
3854 AssertFailedReturn(1024);
3855}
3856
3857
3858/**
3859 * Commits a bounce buffer that needs writing back and unmaps it.
3860 *
3861 * @returns Strict VBox status code.
3862 * @param pIemCpu The IEM per CPU data.
3863 * @param iMemMap The index of the buffer to commit.
3864 */
3865static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
3866{
3867 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
3868 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
3869
3870 /*
3871 * Do the writing.
3872 */
3873 int rc;
3874 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
3875 && !IEM_VERIFICATION_ENABLED(pIemCpu))
3876 {
3877 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3878 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3879 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3880 if (!pIemCpu->fByPassHandlers)
3881 {
3882 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3883 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3884 pbBuf,
3885 cbFirst);
3886 if (cbSecond && rc == VINF_SUCCESS)
3887 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3888 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3889 pbBuf + cbFirst,
3890 cbSecond);
3891 }
3892 else
3893 {
3894 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3895 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3896 pbBuf,
3897 cbFirst);
3898 if (cbSecond && rc == VINF_SUCCESS)
3899 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3900 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3901 pbBuf + cbFirst,
3902 cbSecond);
3903 }
3904 }
3905 else
3906 rc = VINF_SUCCESS;
3907
3908#ifdef IEM_VERIFICATION_MODE
3909 /*
3910 * Record the write(s).
3911 */
3912 if (!pIemCpu->fNoRem)
3913 {
3914 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3915 if (pEvtRec)
3916 {
3917 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3918 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
3919 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3920 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
3921 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3922 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3923 }
3924 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
3925 {
3926 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3927 if (pEvtRec)
3928 {
3929 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3930 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
3931 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3932 memcpy(pEvtRec->u.RamWrite.ab,
3933 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
3934 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
3935 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3936 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3937 }
3938 }
3939 }
3940#endif
3941
3942 /*
3943 * Free the mapping entry.
3944 */
3945 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
3946 Assert(pIemCpu->cActiveMappings != 0);
3947 pIemCpu->cActiveMappings--;
3948 return rc;
3949}
3950
3951
3952/**
3953 * iemMemMap worker that deals with a request crossing pages.
3954 */
3955static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
3956 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
3957{
3958 /*
3959 * Do the address translations.
3960 */
3961 RTGCPHYS GCPhysFirst;
3962 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
3963 if (rcStrict != VINF_SUCCESS)
3964 return rcStrict;
3965
3966 RTGCPHYS GCPhysSecond;
3967 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
3968 if (rcStrict != VINF_SUCCESS)
3969 return rcStrict;
3970 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
3971
3972 /*
3973 * Read in the current memory content if it's a read of execute access.
3974 */
3975 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3976 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
3977 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
3978
3979 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
3980 {
3981 int rc;
3982 if (!pIemCpu->fByPassHandlers)
3983 {
3984 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
3985 if (rc != VINF_SUCCESS)
3986 return rc;
3987 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
3988 if (rc != VINF_SUCCESS)
3989 return rc;
3990 }
3991 else
3992 {
3993 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
3994 if (rc != VINF_SUCCESS)
3995 return rc;
3996 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
3997 if (rc != VINF_SUCCESS)
3998 return rc;
3999 }
4000
4001#ifdef IEM_VERIFICATION_MODE
4002 if (!pIemCpu->fNoRem)
4003 {
4004 /*
4005 * Record the reads.
4006 */
4007 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4008 if (pEvtRec)
4009 {
4010 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4011 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4012 pEvtRec->u.RamRead.cb = cbFirstPage;
4013 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4014 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4015 }
4016 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4017 if (pEvtRec)
4018 {
4019 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4020 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
4021 pEvtRec->u.RamRead.cb = cbSecondPage;
4022 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4023 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4024 }
4025 }
4026#endif
4027 }
4028#ifdef VBOX_STRICT
4029 else
4030 memset(pbBuf, 0xcc, cbMem);
4031#endif
4032#ifdef VBOX_STRICT
4033 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4034 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4035#endif
4036
4037 /*
4038 * Commit the bounce buffer entry.
4039 */
4040 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4041 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
4042 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
4043 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
4044 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
4045 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4046 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4047 pIemCpu->cActiveMappings++;
4048
4049 *ppvMem = pbBuf;
4050 return VINF_SUCCESS;
4051}
4052
4053
4054/**
4055 * iemMemMap woker that deals with iemMemPageMap failures.
4056 */
4057static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
4058 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
4059{
4060 /*
4061 * Filter out conditions we can handle and the ones which shouldn't happen.
4062 */
4063 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
4064 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
4065 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
4066 {
4067 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
4068 return rcMap;
4069 }
4070 pIemCpu->cPotentialExits++;
4071
4072 /*
4073 * Read in the current memory content if it's a read of execute access.
4074 */
4075 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4076 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
4077 {
4078 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
4079 memset(pbBuf, 0xff, cbMem);
4080 else
4081 {
4082 int rc;
4083 if (!pIemCpu->fByPassHandlers)
4084 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
4085 else
4086 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
4087 if (rc != VINF_SUCCESS)
4088 return rc;
4089 }
4090
4091#ifdef IEM_VERIFICATION_MODE
4092 if (!pIemCpu->fNoRem)
4093 {
4094 /*
4095 * Record the read.
4096 */
4097 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4098 if (pEvtRec)
4099 {
4100 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4101 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4102 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
4103 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4104 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4105 }
4106 }
4107#endif
4108 }
4109#ifdef VBOX_STRICT
4110 else
4111 memset(pbBuf, 0xcc, cbMem);
4112#endif
4113#ifdef VBOX_STRICT
4114 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4115 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4116#endif
4117
4118 /*
4119 * Commit the bounce buffer entry.
4120 */
4121 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4122 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
4123 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
4124 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
4125 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
4126 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4127 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4128 pIemCpu->cActiveMappings++;
4129
4130 *ppvMem = pbBuf;
4131 return VINF_SUCCESS;
4132}
4133
4134
4135
4136/**
4137 * Maps the specified guest memory for the given kind of access.
4138 *
4139 * This may be using bounce buffering of the memory if it's crossing a page
4140 * boundary or if there is an access handler installed for any of it. Because
4141 * of lock prefix guarantees, we're in for some extra clutter when this
4142 * happens.
4143 *
4144 * This may raise a \#GP, \#SS, \#PF or \#AC.
4145 *
4146 * @returns VBox strict status code.
4147 *
4148 * @param pIemCpu The IEM per CPU data.
4149 * @param ppvMem Where to return the pointer to the mapped
4150 * memory.
4151 * @param cbMem The number of bytes to map. This is usually 1,
4152 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
4153 * string operations it can be up to a page.
4154 * @param iSegReg The index of the segment register to use for
4155 * this access. The base and limits are checked.
4156 * Use UINT8_MAX to indicate that no segmentation
4157 * is required (for IDT, GDT and LDT accesses).
4158 * @param GCPtrMem The address of the guest memory.
4159 * @param a_fAccess How the memory is being accessed. The
4160 * IEM_ACCESS_TYPE_XXX bit is used to figure out
4161 * how to map the memory, while the
4162 * IEM_ACCESS_WHAT_XXX bit is used when raising
4163 * exceptions.
4164 */
4165static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
4166{
4167 /*
4168 * Check the input and figure out which mapping entry to use.
4169 */
4170 Assert(cbMem <= 32 || cbMem == 512);
4171 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
4172
4173 unsigned iMemMap = pIemCpu->iNextMapping;
4174 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
4175 {
4176 iMemMap = iemMemMapFindFree(pIemCpu);
4177 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
4178 }
4179
4180 /*
4181 * Map the memory, checking that we can actually access it. If something
4182 * slightly complicated happens, fall back on bounce buffering.
4183 */
4184 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
4185 if (rcStrict != VINF_SUCCESS)
4186 return rcStrict;
4187
4188 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
4189 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
4190
4191 RTGCPHYS GCPhysFirst;
4192 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
4193 if (rcStrict != VINF_SUCCESS)
4194 return rcStrict;
4195
4196 void *pvMem;
4197 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
4198 if (rcStrict != VINF_SUCCESS)
4199 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
4200
4201 /*
4202 * Fill in the mapping table entry.
4203 */
4204 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
4205 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
4206 pIemCpu->iNextMapping = iMemMap + 1;
4207 pIemCpu->cActiveMappings++;
4208
4209 *ppvMem = pvMem;
4210 return VINF_SUCCESS;
4211}
4212
4213
4214/**
4215 * Commits the guest memory if bounce buffered and unmaps it.
4216 *
4217 * @returns Strict VBox status code.
4218 * @param pIemCpu The IEM per CPU data.
4219 * @param pvMem The mapping.
4220 * @param fAccess The kind of access.
4221 */
4222static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4223{
4224 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
4225 AssertReturn(iMemMap >= 0, iMemMap);
4226
4227 /*
4228 * If it's bounce buffered, we need to write back the buffer.
4229 */
4230 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4231 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4232 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
4233
4234 /* Free the entry. */
4235 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4236 Assert(pIemCpu->cActiveMappings != 0);
4237 pIemCpu->cActiveMappings--;
4238 return VINF_SUCCESS;
4239}
4240
4241
4242/**
4243 * Fetches a data byte.
4244 *
4245 * @returns Strict VBox status code.
4246 * @param pIemCpu The IEM per CPU data.
4247 * @param pu8Dst Where to return the byte.
4248 * @param iSegReg The index of the segment register to use for
4249 * this access. The base and limits are checked.
4250 * @param GCPtrMem The address of the guest memory.
4251 */
4252static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4253{
4254 /* The lazy approach for now... */
4255 uint8_t const *pu8Src;
4256 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4257 if (rc == VINF_SUCCESS)
4258 {
4259 *pu8Dst = *pu8Src;
4260 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4261 }
4262 return rc;
4263}
4264
4265
4266/**
4267 * Fetches a data word.
4268 *
4269 * @returns Strict VBox status code.
4270 * @param pIemCpu The IEM per CPU data.
4271 * @param pu16Dst Where to return the word.
4272 * @param iSegReg The index of the segment register to use for
4273 * this access. The base and limits are checked.
4274 * @param GCPtrMem The address of the guest memory.
4275 */
4276static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4277{
4278 /* The lazy approach for now... */
4279 uint16_t const *pu16Src;
4280 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4281 if (rc == VINF_SUCCESS)
4282 {
4283 *pu16Dst = *pu16Src;
4284 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
4285 }
4286 return rc;
4287}
4288
4289
4290/**
4291 * Fetches a data dword.
4292 *
4293 * @returns Strict VBox status code.
4294 * @param pIemCpu The IEM per CPU data.
4295 * @param pu32Dst Where to return the dword.
4296 * @param iSegReg The index of the segment register to use for
4297 * this access. The base and limits are checked.
4298 * @param GCPtrMem The address of the guest memory.
4299 */
4300static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4301{
4302 /* The lazy approach for now... */
4303 uint32_t const *pu32Src;
4304 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4305 if (rc == VINF_SUCCESS)
4306 {
4307 *pu32Dst = *pu32Src;
4308 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
4309 }
4310 return rc;
4311}
4312
4313
4314#ifdef SOME_UNUSED_FUNCTION
4315/**
4316 * Fetches a data dword and sign extends it to a qword.
4317 *
4318 * @returns Strict VBox status code.
4319 * @param pIemCpu The IEM per CPU data.
4320 * @param pu64Dst Where to return the sign extended value.
4321 * @param iSegReg The index of the segment register to use for
4322 * this access. The base and limits are checked.
4323 * @param GCPtrMem The address of the guest memory.
4324 */
4325static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4326{
4327 /* The lazy approach for now... */
4328 int32_t const *pi32Src;
4329 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4330 if (rc == VINF_SUCCESS)
4331 {
4332 *pu64Dst = *pi32Src;
4333 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
4334 }
4335#ifdef __GNUC__ /* warning: GCC may be a royal pain */
4336 else
4337 *pu64Dst = 0;
4338#endif
4339 return rc;
4340}
4341#endif
4342
4343
4344/**
4345 * Fetches a data qword.
4346 *
4347 * @returns Strict VBox status code.
4348 * @param pIemCpu The IEM per CPU data.
4349 * @param pu64Dst Where to return the qword.
4350 * @param iSegReg The index of the segment register to use for
4351 * this access. The base and limits are checked.
4352 * @param GCPtrMem The address of the guest memory.
4353 */
4354static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4355{
4356 /* The lazy approach for now... */
4357 uint64_t const *pu64Src;
4358 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4359 if (rc == VINF_SUCCESS)
4360 {
4361 *pu64Dst = *pu64Src;
4362 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
4363 }
4364 return rc;
4365}
4366
4367
4368/**
4369 * Fetches a descriptor register (lgdt, lidt).
4370 *
4371 * @returns Strict VBox status code.
4372 * @param pIemCpu The IEM per CPU data.
4373 * @param pcbLimit Where to return the limit.
4374 * @param pGCPTrBase Where to return the base.
4375 * @param iSegReg The index of the segment register to use for
4376 * this access. The base and limits are checked.
4377 * @param GCPtrMem The address of the guest memory.
4378 * @param enmOpSize The effective operand size.
4379 */
4380static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
4381 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
4382{
4383 uint8_t const *pu8Src;
4384 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
4385 (void **)&pu8Src,
4386 enmOpSize == IEMMODE_64BIT
4387 ? 2 + 8
4388 : enmOpSize == IEMMODE_32BIT
4389 ? 2 + 4
4390 : 2 + 3,
4391 iSegReg,
4392 GCPtrMem,
4393 IEM_ACCESS_DATA_R);
4394 if (rcStrict == VINF_SUCCESS)
4395 {
4396 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
4397 switch (enmOpSize)
4398 {
4399 case IEMMODE_16BIT:
4400 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
4401 break;
4402 case IEMMODE_32BIT:
4403 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
4404 break;
4405 case IEMMODE_64BIT:
4406 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
4407 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
4408 break;
4409
4410 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4411 }
4412 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4413 }
4414 return rcStrict;
4415}
4416
4417
4418
4419/**
4420 * Stores a data byte.
4421 *
4422 * @returns Strict VBox status code.
4423 * @param pIemCpu The IEM per CPU data.
4424 * @param iSegReg The index of the segment register to use for
4425 * this access. The base and limits are checked.
4426 * @param GCPtrMem The address of the guest memory.
4427 * @param u8Value The value to store.
4428 */
4429static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
4430{
4431 /* The lazy approach for now... */
4432 uint8_t *pu8Dst;
4433 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4434 if (rc == VINF_SUCCESS)
4435 {
4436 *pu8Dst = u8Value;
4437 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
4438 }
4439 return rc;
4440}
4441
4442
4443/**
4444 * Stores a data word.
4445 *
4446 * @returns Strict VBox status code.
4447 * @param pIemCpu The IEM per CPU data.
4448 * @param iSegReg The index of the segment register to use for
4449 * this access. The base and limits are checked.
4450 * @param GCPtrMem The address of the guest memory.
4451 * @param u16Value The value to store.
4452 */
4453static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
4454{
4455 /* The lazy approach for now... */
4456 uint16_t *pu16Dst;
4457 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4458 if (rc == VINF_SUCCESS)
4459 {
4460 *pu16Dst = u16Value;
4461 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
4462 }
4463 return rc;
4464}
4465
4466
4467/**
4468 * Stores a data dword.
4469 *
4470 * @returns Strict VBox status code.
4471 * @param pIemCpu The IEM per CPU data.
4472 * @param iSegReg The index of the segment register to use for
4473 * this access. The base and limits are checked.
4474 * @param GCPtrMem The address of the guest memory.
4475 * @param u32Value The value to store.
4476 */
4477static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
4478{
4479 /* The lazy approach for now... */
4480 uint32_t *pu32Dst;
4481 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4482 if (rc == VINF_SUCCESS)
4483 {
4484 *pu32Dst = u32Value;
4485 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
4486 }
4487 return rc;
4488}
4489
4490
4491/**
4492 * Stores a data qword.
4493 *
4494 * @returns Strict VBox status code.
4495 * @param pIemCpu The IEM per CPU data.
4496 * @param iSegReg The index of the segment register to use for
4497 * this access. The base and limits are checked.
4498 * @param GCPtrMem The address of the guest memory.
4499 * @param u64Value The value to store.
4500 */
4501static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
4502{
4503 /* The lazy approach for now... */
4504 uint64_t *pu64Dst;
4505 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4506 if (rc == VINF_SUCCESS)
4507 {
4508 *pu64Dst = u64Value;
4509 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
4510 }
4511 return rc;
4512}
4513
4514
4515/**
4516 * Pushes a word onto the stack.
4517 *
4518 * @returns Strict VBox status code.
4519 * @param pIemCpu The IEM per CPU data.
4520 * @param u16Value The value to push.
4521 */
4522static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
4523{
4524 /* Increment the stack pointer. */
4525 uint64_t uNewRsp;
4526 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4527 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
4528
4529 /* Write the word the lazy way. */
4530 uint16_t *pu16Dst;
4531 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4532 if (rc == VINF_SUCCESS)
4533 {
4534 *pu16Dst = u16Value;
4535 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4536 }
4537
4538 /* Commit the new RSP value unless we an access handler made trouble. */
4539 if (rc == VINF_SUCCESS)
4540 pCtx->rsp = uNewRsp;
4541
4542 return rc;
4543}
4544
4545
4546/**
4547 * Pushes a dword onto the stack.
4548 *
4549 * @returns Strict VBox status code.
4550 * @param pIemCpu The IEM per CPU data.
4551 * @param u32Value The value to push.
4552 */
4553static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
4554{
4555 /* Increment the stack pointer. */
4556 uint64_t uNewRsp;
4557 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4558 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
4559
4560 /* Write the word the lazy way. */
4561 uint32_t *pu32Dst;
4562 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4563 if (rc == VINF_SUCCESS)
4564 {
4565 *pu32Dst = u32Value;
4566 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4567 }
4568
4569 /* Commit the new RSP value unless we an access handler made trouble. */
4570 if (rc == VINF_SUCCESS)
4571 pCtx->rsp = uNewRsp;
4572
4573 return rc;
4574}
4575
4576
4577/**
4578 * Pushes a qword onto the stack.
4579 *
4580 * @returns Strict VBox status code.
4581 * @param pIemCpu The IEM per CPU data.
4582 * @param u64Value The value to push.
4583 */
4584static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
4585{
4586 /* Increment the stack pointer. */
4587 uint64_t uNewRsp;
4588 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4589 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
4590
4591 /* Write the word the lazy way. */
4592 uint64_t *pu64Dst;
4593 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4594 if (rc == VINF_SUCCESS)
4595 {
4596 *pu64Dst = u64Value;
4597 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4598 }
4599
4600 /* Commit the new RSP value unless we an access handler made trouble. */
4601 if (rc == VINF_SUCCESS)
4602 pCtx->rsp = uNewRsp;
4603
4604 return rc;
4605}
4606
4607
4608/**
4609 * Pops a word from the stack.
4610 *
4611 * @returns Strict VBox status code.
4612 * @param pIemCpu The IEM per CPU data.
4613 * @param pu16Value Where to store the popped value.
4614 */
4615static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
4616{
4617 /* Increment the stack pointer. */
4618 uint64_t uNewRsp;
4619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4620 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
4621
4622 /* Write the word the lazy way. */
4623 uint16_t const *pu16Src;
4624 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4625 if (rc == VINF_SUCCESS)
4626 {
4627 *pu16Value = *pu16Src;
4628 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4629
4630 /* Commit the new RSP value. */
4631 if (rc == VINF_SUCCESS)
4632 pCtx->rsp = uNewRsp;
4633 }
4634
4635 return rc;
4636}
4637
4638
4639/**
4640 * Pops a dword from the stack.
4641 *
4642 * @returns Strict VBox status code.
4643 * @param pIemCpu The IEM per CPU data.
4644 * @param pu32Value Where to store the popped value.
4645 */
4646static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
4647{
4648 /* Increment the stack pointer. */
4649 uint64_t uNewRsp;
4650 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4651 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
4652
4653 /* Write the word the lazy way. */
4654 uint32_t const *pu32Src;
4655 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4656 if (rc == VINF_SUCCESS)
4657 {
4658 *pu32Value = *pu32Src;
4659 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4660
4661 /* Commit the new RSP value. */
4662 if (rc == VINF_SUCCESS)
4663 pCtx->rsp = uNewRsp;
4664 }
4665
4666 return rc;
4667}
4668
4669
4670/**
4671 * Pops a qword from the stack.
4672 *
4673 * @returns Strict VBox status code.
4674 * @param pIemCpu The IEM per CPU data.
4675 * @param pu64Value Where to store the popped value.
4676 */
4677static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
4678{
4679 /* Increment the stack pointer. */
4680 uint64_t uNewRsp;
4681 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4682 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
4683
4684 /* Write the word the lazy way. */
4685 uint64_t const *pu64Src;
4686 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4687 if (rc == VINF_SUCCESS)
4688 {
4689 *pu64Value = *pu64Src;
4690 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4691
4692 /* Commit the new RSP value. */
4693 if (rc == VINF_SUCCESS)
4694 pCtx->rsp = uNewRsp;
4695 }
4696
4697 return rc;
4698}
4699
4700
4701/**
4702 * Pushes a word onto the stack, using a temporary stack pointer.
4703 *
4704 * @returns Strict VBox status code.
4705 * @param pIemCpu The IEM per CPU data.
4706 * @param u16Value The value to push.
4707 * @param pTmpRsp Pointer to the temporary stack pointer.
4708 */
4709static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
4710{
4711 /* Increment the stack pointer. */
4712 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4713 RTUINT64U NewRsp = *pTmpRsp;
4714 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
4715
4716 /* Write the word the lazy way. */
4717 uint16_t *pu16Dst;
4718 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4719 if (rc == VINF_SUCCESS)
4720 {
4721 *pu16Dst = u16Value;
4722 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4723 }
4724
4725 /* Commit the new RSP value unless we an access handler made trouble. */
4726 if (rc == VINF_SUCCESS)
4727 *pTmpRsp = NewRsp;
4728
4729 return rc;
4730}
4731
4732
4733/**
4734 * Pushes a dword onto the stack, using a temporary stack pointer.
4735 *
4736 * @returns Strict VBox status code.
4737 * @param pIemCpu The IEM per CPU data.
4738 * @param u32Value The value to push.
4739 * @param pTmpRsp Pointer to the temporary stack pointer.
4740 */
4741static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
4742{
4743 /* Increment the stack pointer. */
4744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4745 RTUINT64U NewRsp = *pTmpRsp;
4746 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
4747
4748 /* Write the word the lazy way. */
4749 uint32_t *pu32Dst;
4750 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4751 if (rc == VINF_SUCCESS)
4752 {
4753 *pu32Dst = u32Value;
4754 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4755 }
4756
4757 /* Commit the new RSP value unless we an access handler made trouble. */
4758 if (rc == VINF_SUCCESS)
4759 *pTmpRsp = NewRsp;
4760
4761 return rc;
4762}
4763
4764
4765#ifdef SOME_UNUSED_FUNCTION
4766/**
4767 * Pushes a dword onto the stack, using a temporary stack pointer.
4768 *
4769 * @returns Strict VBox status code.
4770 * @param pIemCpu The IEM per CPU data.
4771 * @param u64Value The value to push.
4772 * @param pTmpRsp Pointer to the temporary stack pointer.
4773 */
4774static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
4775{
4776 /* Increment the stack pointer. */
4777 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4778 RTUINT64U NewRsp = *pTmpRsp;
4779 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
4780
4781 /* Write the word the lazy way. */
4782 uint64_t *pu64Dst;
4783 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4784 if (rc == VINF_SUCCESS)
4785 {
4786 *pu64Dst = u64Value;
4787 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4788 }
4789
4790 /* Commit the new RSP value unless we an access handler made trouble. */
4791 if (rc == VINF_SUCCESS)
4792 *pTmpRsp = NewRsp;
4793
4794 return rc;
4795}
4796#endif
4797
4798
4799/**
4800 * Pops a word from the stack, using a temporary stack pointer.
4801 *
4802 * @returns Strict VBox status code.
4803 * @param pIemCpu The IEM per CPU data.
4804 * @param pu16Value Where to store the popped value.
4805 * @param pTmpRsp Pointer to the temporary stack pointer.
4806 */
4807static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
4808{
4809 /* Increment the stack pointer. */
4810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4811 RTUINT64U NewRsp = *pTmpRsp;
4812 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
4813
4814 /* Write the word the lazy way. */
4815 uint16_t const *pu16Src;
4816 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4817 if (rc == VINF_SUCCESS)
4818 {
4819 *pu16Value = *pu16Src;
4820 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4821
4822 /* Commit the new RSP value. */
4823 if (rc == VINF_SUCCESS)
4824 *pTmpRsp = NewRsp;
4825 }
4826
4827 return rc;
4828}
4829
4830
4831/**
4832 * Pops a dword from the stack, using a temporary stack pointer.
4833 *
4834 * @returns Strict VBox status code.
4835 * @param pIemCpu The IEM per CPU data.
4836 * @param pu32Value Where to store the popped value.
4837 * @param pTmpRsp Pointer to the temporary stack pointer.
4838 */
4839static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
4840{
4841 /* Increment the stack pointer. */
4842 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4843 RTUINT64U NewRsp = *pTmpRsp;
4844 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
4845
4846 /* Write the word the lazy way. */
4847 uint32_t const *pu32Src;
4848 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4849 if (rc == VINF_SUCCESS)
4850 {
4851 *pu32Value = *pu32Src;
4852 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4853
4854 /* Commit the new RSP value. */
4855 if (rc == VINF_SUCCESS)
4856 *pTmpRsp = NewRsp;
4857 }
4858
4859 return rc;
4860}
4861
4862
4863/**
4864 * Pops a qword from the stack, using a temporary stack pointer.
4865 *
4866 * @returns Strict VBox status code.
4867 * @param pIemCpu The IEM per CPU data.
4868 * @param pu64Value Where to store the popped value.
4869 * @param pTmpRsp Pointer to the temporary stack pointer.
4870 */
4871static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
4872{
4873 /* Increment the stack pointer. */
4874 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4875 RTUINT64U NewRsp = *pTmpRsp;
4876 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4877
4878 /* Write the word the lazy way. */
4879 uint64_t const *pu64Src;
4880 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4881 if (rcStrict == VINF_SUCCESS)
4882 {
4883 *pu64Value = *pu64Src;
4884 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4885
4886 /* Commit the new RSP value. */
4887 if (rcStrict == VINF_SUCCESS)
4888 *pTmpRsp = NewRsp;
4889 }
4890
4891 return rcStrict;
4892}
4893
4894
4895/**
4896 * Begin a special stack push (used by interrupt, exceptions and such).
4897 *
4898 * This will raise #SS or #PF if appropriate.
4899 *
4900 * @returns Strict VBox status code.
4901 * @param pIemCpu The IEM per CPU data.
4902 * @param cbMem The number of bytes to push onto the stack.
4903 * @param ppvMem Where to return the pointer to the stack memory.
4904 * As with the other memory functions this could be
4905 * direct access or bounce buffered access, so
4906 * don't commit register until the commit call
4907 * succeeds.
4908 * @param puNewRsp Where to return the new RSP value. This must be
4909 * passed unchanged to
4910 * iemMemStackPushCommitSpecial().
4911 */
4912static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
4913{
4914 Assert(cbMem < UINT8_MAX);
4915 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4916 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
4917 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4918}
4919
4920
4921/**
4922 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
4923 *
4924 * This will update the rSP.
4925 *
4926 * @returns Strict VBox status code.
4927 * @param pIemCpu The IEM per CPU data.
4928 * @param pvMem The pointer returned by
4929 * iemMemStackPushBeginSpecial().
4930 * @param uNewRsp The new RSP value returned by
4931 * iemMemStackPushBeginSpecial().
4932 */
4933static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
4934{
4935 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
4936 if (rcStrict == VINF_SUCCESS)
4937 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
4938 return rcStrict;
4939}
4940
4941
4942/**
4943 * Begin a special stack pop (used by iret, retf and such).
4944 *
4945 * This will raise \#SS or \#PF if appropriate.
4946 *
4947 * @returns Strict VBox status code.
4948 * @param pIemCpu The IEM per CPU data.
4949 * @param cbMem The number of bytes to push onto the stack.
4950 * @param ppvMem Where to return the pointer to the stack memory.
4951 * @param puNewRsp Where to return the new RSP value. This must be
4952 * passed unchanged to
4953 * iemMemStackPopCommitSpecial() or applied
4954 * manually if iemMemStackPopDoneSpecial() is used.
4955 */
4956static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4957{
4958 Assert(cbMem < UINT8_MAX);
4959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4960 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
4961 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4962}
4963
4964
4965/**
4966 * Continue a special stack pop (used by iret).
4967 *
4968 * This will raise \#SS or \#PF if appropriate.
4969 *
4970 * @returns Strict VBox status code.
4971 * @param pIemCpu The IEM per CPU data.
4972 * @param cbMem The number of bytes to push onto the stack.
4973 * @param ppvMem Where to return the pointer to the stack memory.
4974 * @param puNewRsp Where to return the new RSP value. This must be
4975 * passed unchanged to
4976 * iemMemStackPopCommitSpecial() or applied
4977 * manually if iemMemStackPopDoneSpecial() is used.
4978 */
4979static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4980{
4981 Assert(cbMem < UINT8_MAX);
4982 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4983 RTUINT64U NewRsp;
4984 NewRsp.u = *puNewRsp;
4985 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4986 *puNewRsp = NewRsp.u;
4987 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4988}
4989
4990
4991/**
4992 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
4993 *
4994 * This will update the rSP.
4995 *
4996 * @returns Strict VBox status code.
4997 * @param pIemCpu The IEM per CPU data.
4998 * @param pvMem The pointer returned by
4999 * iemMemStackPopBeginSpecial().
5000 * @param uNewRsp The new RSP value returned by
5001 * iemMemStackPopBeginSpecial().
5002 */
5003static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
5004{
5005 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5006 if (rcStrict == VINF_SUCCESS)
5007 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5008 return rcStrict;
5009}
5010
5011
5012/**
5013 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
5014 * iemMemStackPopContinueSpecial).
5015 *
5016 * The caller will manually commit the rSP.
5017 *
5018 * @returns Strict VBox status code.
5019 * @param pIemCpu The IEM per CPU data.
5020 * @param pvMem The pointer returned by
5021 * iemMemStackPopBeginSpecial() or
5022 * iemMemStackPopContinueSpecial().
5023 */
5024static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
5025{
5026 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5027}
5028
5029
5030/**
5031 * Fetches a system table dword.
5032 *
5033 * @returns Strict VBox status code.
5034 * @param pIemCpu The IEM per CPU data.
5035 * @param pu32Dst Where to return the dword.
5036 * @param iSegReg The index of the segment register to use for
5037 * this access. The base and limits are checked.
5038 * @param GCPtrMem The address of the guest memory.
5039 */
5040static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5041{
5042 /* The lazy approach for now... */
5043 uint32_t const *pu32Src;
5044 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5045 if (rc == VINF_SUCCESS)
5046 {
5047 *pu32Dst = *pu32Src;
5048 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
5049 }
5050 return rc;
5051}
5052
5053
5054/**
5055 * Fetches a system table qword.
5056 *
5057 * @returns Strict VBox status code.
5058 * @param pIemCpu The IEM per CPU data.
5059 * @param pu64Dst Where to return the qword.
5060 * @param iSegReg The index of the segment register to use for
5061 * this access. The base and limits are checked.
5062 * @param GCPtrMem The address of the guest memory.
5063 */
5064static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5065{
5066 /* The lazy approach for now... */
5067 uint64_t const *pu64Src;
5068 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5069 if (rc == VINF_SUCCESS)
5070 {
5071 *pu64Dst = *pu64Src;
5072 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
5073 }
5074 return rc;
5075}
5076
5077
5078/**
5079 * Fetches a descriptor table entry.
5080 *
5081 * @returns Strict VBox status code.
5082 * @param pIemCpu The IEM per CPU.
5083 * @param pDesc Where to return the descriptor table entry.
5084 * @param uSel The selector which table entry to fetch.
5085 */
5086static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
5087{
5088 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5089
5090 /** @todo did the 286 require all 8 bytes to be accessible? */
5091 /*
5092 * Get the selector table base and check bounds.
5093 */
5094 RTGCPTR GCPtrBase;
5095 if (uSel & X86_SEL_LDT)
5096 {
5097 if ( !pCtx->ldtrHid.Attr.n.u1Present
5098 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
5099 {
5100 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
5101 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
5102 /** @todo is this the right exception? */
5103 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5104 }
5105
5106 Assert(pCtx->ldtrHid.Attr.n.u1Present);
5107 GCPtrBase = pCtx->ldtrHid.u64Base;
5108 }
5109 else
5110 {
5111 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
5112 {
5113 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
5114 /** @todo is this the right exception? */
5115 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5116 }
5117 GCPtrBase = pCtx->gdtr.pGdt;
5118 }
5119
5120 /*
5121 * Read the legacy descriptor and maybe the long mode extensions if
5122 * required.
5123 */
5124 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5125 if (rcStrict == VINF_SUCCESS)
5126 {
5127 if ( !IEM_IS_LONG_MODE(pIemCpu)
5128 || pDesc->Legacy.Gen.u1DescType)
5129 pDesc->Long.au64[1] = 0;
5130 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
5131 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5132 else
5133 {
5134 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
5135 /** @todo is this the right exception? */
5136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5137 }
5138 }
5139 return rcStrict;
5140}
5141
5142
5143/**
5144 * Marks the selector descriptor as accessed (only non-system descriptors).
5145 *
5146 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
5147 * will therefore skip the limit checks.
5148 *
5149 * @returns Strict VBox status code.
5150 * @param pIemCpu The IEM per CPU.
5151 * @param uSel The selector.
5152 */
5153static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
5154{
5155 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5156
5157 /*
5158 * Get the selector table base and calculate the entry address.
5159 */
5160 RTGCPTR GCPtr = uSel & X86_SEL_LDT
5161 ? pCtx->ldtrHid.u64Base
5162 : pCtx->gdtr.pGdt;
5163 GCPtr += uSel & X86_SEL_MASK;
5164
5165 /*
5166 * ASMAtomicBitSet will assert if the address is misaligned, so do some
5167 * ugly stuff to avoid this. This will make sure it's an atomic access
5168 * as well more or less remove any question about 8-bit or 32-bit accesss.
5169 */
5170 VBOXSTRICTRC rcStrict;
5171 uint32_t volatile *pu32;
5172 if ((GCPtr & 3) == 0)
5173 {
5174 /* The normal case, map the 32-bit bits around the accessed bit (40). */
5175 GCPtr += 2 + 2;
5176 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5177 if (rcStrict != VINF_SUCCESS)
5178 return rcStrict;
5179 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
5180 }
5181 else
5182 {
5183 /* The misaligned GDT/LDT case, map the whole thing. */
5184 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5185 if (rcStrict != VINF_SUCCESS)
5186 return rcStrict;
5187 switch ((uintptr_t)pu32 & 3)
5188 {
5189 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
5190 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
5191 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
5192 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
5193 }
5194 }
5195
5196 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
5197}
5198
5199/** @} */
5200
5201
5202/*
5203 * Include the C/C++ implementation of instruction.
5204 */
5205#include "IEMAllCImpl.cpp.h"
5206
5207
5208
5209/** @name "Microcode" macros.
5210 *
5211 * The idea is that we should be able to use the same code to interpret
5212 * instructions as well as recompiler instructions. Thus this obfuscation.
5213 *
5214 * @{
5215 */
5216#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
5217#define IEM_MC_END() }
5218#define IEM_MC_PAUSE() do {} while (0)
5219#define IEM_MC_CONTINUE() do {} while (0)
5220
5221/** Internal macro. */
5222#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5223 do \
5224 { \
5225 VBOXSTRICTRC rcStrict2 = a_Expr; \
5226 if (rcStrict2 != VINF_SUCCESS) \
5227 return rcStrict2; \
5228 } while (0)
5229
5230#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5231#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5232#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5233#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5234#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5235#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5236#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5237
5238#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5239#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
5240 do { \
5241 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
5242 return iemRaiseDeviceNotAvailable(pIemCpu); \
5243 } while (0)
5244#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
5245 do { \
5246 if (iemFRegFetchFsw(pIemCpu) & X86_FSW_ES) \
5247 return iemRaiseMathFault(pIemCpu); \
5248 } while (0)
5249#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
5250 do { \
5251 if (pIemCpu->uCpl != 0) \
5252 return iemRaiseGeneralProtectionFault0(pIemCpu); \
5253 } while (0)
5254
5255
5256#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5257#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5258#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5259#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5260#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5261#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
5262#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5263 uint32_t a_Name; \
5264 uint32_t *a_pName = &a_Name
5265#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
5266 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
5267
5268#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5269#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
5270
5271#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5272#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5273#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5274#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5275#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5276#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5277#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5278#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5279#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5280#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5281#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5282#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5283#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5284#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5285#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
5286#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5287#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
5288#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5289#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5290#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5291#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5292#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5293#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
5294#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5295#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5296#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = iemFRegFetchFsw(pIemCpu)
5297
5298#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
5299#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
5300#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
5301#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
5302#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
5303#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
5304#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
5305#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
5306#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
5307#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
5308
5309#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
5310#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
5311/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
5312 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
5313#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
5314#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
5315#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5316
5317#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
5318#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
5319#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
5320 do { \
5321 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5322 *pu32Reg += (a_u32Value); \
5323 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5324 } while (0)
5325#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
5326
5327#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
5328#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
5329#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
5330 do { \
5331 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5332 *pu32Reg -= (a_u32Value); \
5333 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5334 } while (0)
5335#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
5336
5337#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
5338#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
5339#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
5340#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
5341#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
5342#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
5343#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
5344
5345#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
5346#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
5347#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5348#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
5349
5350#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
5351#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
5352#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
5353
5354#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
5355#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5356
5357#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
5358#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
5359#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
5360
5361#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
5362#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
5363#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
5364
5365#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5366
5367#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5368
5369#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
5370#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
5371#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
5372 do { \
5373 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5374 *pu32Reg &= (a_u32Value); \
5375 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5376 } while (0)
5377#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
5378
5379#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
5380#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
5381#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
5382 do { \
5383 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5384 *pu32Reg |= (a_u32Value); \
5385 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5386 } while (0)
5387#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
5388
5389
5390#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
5391#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
5392#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
5393
5394
5395
5396#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
5397 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
5398#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
5399 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
5400#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
5401 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
5402
5403#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5404 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
5405#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5406 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5407
5408#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5409 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
5410#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5411 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5412
5413#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5414 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5415
5416#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5417 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5418#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5419 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5420
5421#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
5422 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
5423#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
5424 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
5425#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
5426 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r64Dst), (a_iSeg), (a_GCPtrMem)))
5427
5428
5429#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5430 do { \
5431 uint8_t u8Tmp; \
5432 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5433 (a_u16Dst) = u8Tmp; \
5434 } while (0)
5435#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5436 do { \
5437 uint8_t u8Tmp; \
5438 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5439 (a_u32Dst) = u8Tmp; \
5440 } while (0)
5441#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5442 do { \
5443 uint8_t u8Tmp; \
5444 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5445 (a_u64Dst) = u8Tmp; \
5446 } while (0)
5447#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5448 do { \
5449 uint16_t u16Tmp; \
5450 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5451 (a_u32Dst) = u16Tmp; \
5452 } while (0)
5453#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5454 do { \
5455 uint16_t u16Tmp; \
5456 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5457 (a_u64Dst) = u16Tmp; \
5458 } while (0)
5459#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5460 do { \
5461 uint32_t u32Tmp; \
5462 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5463 (a_u64Dst) = u32Tmp; \
5464 } while (0)
5465
5466#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5467 do { \
5468 uint8_t u8Tmp; \
5469 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5470 (a_u16Dst) = (int8_t)u8Tmp; \
5471 } while (0)
5472#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5473 do { \
5474 uint8_t u8Tmp; \
5475 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5476 (a_u32Dst) = (int8_t)u8Tmp; \
5477 } while (0)
5478#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5479 do { \
5480 uint8_t u8Tmp; \
5481 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5482 (a_u64Dst) = (int8_t)u8Tmp; \
5483 } while (0)
5484#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5485 do { \
5486 uint16_t u16Tmp; \
5487 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5488 (a_u32Dst) = (int16_t)u16Tmp; \
5489 } while (0)
5490#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5491 do { \
5492 uint16_t u16Tmp; \
5493 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5494 (a_u64Dst) = (int16_t)u16Tmp; \
5495 } while (0)
5496#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5497 do { \
5498 uint32_t u32Tmp; \
5499 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5500 (a_u64Dst) = (int32_t)u32Tmp; \
5501 } while (0)
5502
5503#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
5504 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
5505#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
5506 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
5507#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
5508 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
5509#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
5510 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
5511
5512#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
5513 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
5514
5515#define IEM_MC_PUSH_U16(a_u16Value) \
5516 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
5517#define IEM_MC_PUSH_U32(a_u32Value) \
5518 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
5519#define IEM_MC_PUSH_U64(a_u64Value) \
5520 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
5521
5522#define IEM_MC_POP_U16(a_pu16Value) \
5523 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
5524#define IEM_MC_POP_U32(a_pu32Value) \
5525 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
5526#define IEM_MC_POP_U64(a_pu64Value) \
5527 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
5528
5529/** Maps guest memory for direct or bounce buffered access.
5530 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5531 * @remarks May return.
5532 */
5533#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
5534 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5535
5536/** Maps guest memory for direct or bounce buffered access.
5537 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5538 * @remarks May return.
5539 */
5540#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
5541 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5542
5543/** Commits the memory and unmaps the guest memory.
5544 * @remarks May return.
5545 */
5546#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
5547 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
5548
5549/** Calculate efficient address from R/M. */
5550#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
5551 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
5552
5553#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
5554#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
5555#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
5556#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
5557#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
5558
5559/**
5560 * Defers the rest of the instruction emulation to a C implementation routine
5561 * and returns, only taking the standard parameters.
5562 *
5563 * @param a_pfnCImpl The pointer to the C routine.
5564 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5565 */
5566#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5567
5568/**
5569 * Defers the rest of instruction emulation to a C implementation routine and
5570 * returns, taking one argument in addition to the standard ones.
5571 *
5572 * @param a_pfnCImpl The pointer to the C routine.
5573 * @param a0 The argument.
5574 */
5575#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5576
5577/**
5578 * Defers the rest of the instruction emulation to a C implementation routine
5579 * and returns, taking two arguments in addition to the standard ones.
5580 *
5581 * @param a_pfnCImpl The pointer to the C routine.
5582 * @param a0 The first extra argument.
5583 * @param a1 The second extra argument.
5584 */
5585#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5586
5587/**
5588 * Defers the rest of the instruction emulation to a C implementation routine
5589 * and returns, taking two arguments in addition to the standard ones.
5590 *
5591 * @param a_pfnCImpl The pointer to the C routine.
5592 * @param a0 The first extra argument.
5593 * @param a1 The second extra argument.
5594 * @param a2 The third extra argument.
5595 */
5596#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5597
5598/**
5599 * Defers the rest of the instruction emulation to a C implementation routine
5600 * and returns, taking two arguments in addition to the standard ones.
5601 *
5602 * @param a_pfnCImpl The pointer to the C routine.
5603 * @param a0 The first extra argument.
5604 * @param a1 The second extra argument.
5605 * @param a2 The third extra argument.
5606 * @param a3 The fourth extra argument.
5607 * @param a4 The fifth extra argument.
5608 */
5609#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
5610
5611/**
5612 * Defers the entire instruction emulation to a C implementation routine and
5613 * returns, only taking the standard parameters.
5614 *
5615 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5616 *
5617 * @param a_pfnCImpl The pointer to the C routine.
5618 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5619 */
5620#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5621
5622/**
5623 * Defers the entire instruction emulation to a C implementation routine and
5624 * returns, taking one argument in addition to the standard ones.
5625 *
5626 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5627 *
5628 * @param a_pfnCImpl The pointer to the C routine.
5629 * @param a0 The argument.
5630 */
5631#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5632
5633/**
5634 * Defers the entire instruction emulation to a C implementation routine and
5635 * returns, taking two arguments in addition to the standard ones.
5636 *
5637 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5638 *
5639 * @param a_pfnCImpl The pointer to the C routine.
5640 * @param a0 The first extra argument.
5641 * @param a1 The second extra argument.
5642 */
5643#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5644
5645/**
5646 * Defers the entire instruction emulation to a C implementation routine and
5647 * returns, taking three arguments in addition to the standard ones.
5648 *
5649 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5650 *
5651 * @param a_pfnCImpl The pointer to the C routine.
5652 * @param a0 The first extra argument.
5653 * @param a1 The second extra argument.
5654 * @param a2 The third extra argument.
5655 */
5656#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5657
5658/**
5659 * Calls a FPU assembly implementation taking two visible arguments.
5660 *
5661 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5662 *
5663 * @param a_pfnAImpl Pointer to the assembly FPU routine.
5664 * @param a0 The first extra argument.
5665 * @param a1 The second extra argument.
5666 */
5667#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
5668 do { \
5669 iemFpuPrepareUsage(pIemCpu); \
5670 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
5671 } while (0)
5672/** Pushes FPU result onto the stack. */
5673#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
5674 iemFpuPushResult(pIemCpu, &a_FpuData)
5675/** Pushes FPU result onto the stack and sets the FPUDP. */
5676#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
5677 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
5678
5679
5680#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
5681#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
5682#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
5683#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
5684#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
5685 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5686 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5687#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
5688 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5689 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5690#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
5691 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5692 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5693 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5694#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
5695 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5696 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5697 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5698#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
5699#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
5700#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
5701#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5702 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5703 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5704#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5705 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5706 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5707#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5708 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5709 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5710#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5711 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5712 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5713#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5714 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5715 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5716#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5717 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5718 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5719#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
5720#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
5721#define IEM_MC_ELSE() } else {
5722#define IEM_MC_ENDIF() } do {} while (0)
5723
5724/** @} */
5725
5726
5727/** @name Opcode Debug Helpers.
5728 * @{
5729 */
5730#ifdef DEBUG
5731# define IEMOP_MNEMONIC(a_szMnemonic) \
5732 Log2(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5733 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
5734# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
5735 Log2(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5736 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
5737#else
5738# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
5739# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
5740#endif
5741
5742/** @} */
5743
5744
5745/** @name Opcode Helpers.
5746 * @{
5747 */
5748
5749/** The instruction allows no lock prefixing (in this encoding), throw #UD if
5750 * lock prefixed. */
5751#define IEMOP_HLP_NO_LOCK_PREFIX() \
5752 do \
5753 { \
5754 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5755 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
5756 } while (0)
5757
5758/** The instruction is not available in 64-bit mode, throw #UD if we're in
5759 * 64-bit mode. */
5760#define IEMOP_HLP_NO_64BIT() \
5761 do \
5762 { \
5763 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5764 return IEMOP_RAISE_INVALID_OPCODE(); \
5765 } while (0)
5766
5767/** The instruction defaults to 64-bit operand size if 64-bit mode. */
5768#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
5769 do \
5770 { \
5771 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5772 iemRecalEffOpSize64Default(pIemCpu); \
5773 } while (0)
5774
5775
5776
5777/**
5778 * Calculates the effective address of a ModR/M memory operand.
5779 *
5780 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
5781 *
5782 * @return Strict VBox status code.
5783 * @param pIemCpu The IEM per CPU data.
5784 * @param bRm The ModRM byte.
5785 * @param pGCPtrEff Where to return the effective address.
5786 */
5787static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
5788{
5789 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
5790 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5791#define SET_SS_DEF() \
5792 do \
5793 { \
5794 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
5795 pIemCpu->iEffSeg = X86_SREG_SS; \
5796 } while (0)
5797
5798/** @todo Check the effective address size crap! */
5799 switch (pIemCpu->enmEffAddrMode)
5800 {
5801 case IEMMODE_16BIT:
5802 {
5803 uint16_t u16EffAddr;
5804
5805 /* Handle the disp16 form with no registers first. */
5806 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5807 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
5808 else
5809 {
5810 /* Get the displacment. */
5811 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5812 {
5813 case 0: u16EffAddr = 0; break;
5814 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
5815 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
5816 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5817 }
5818
5819 /* Add the base and index registers to the disp. */
5820 switch (bRm & X86_MODRM_RM_MASK)
5821 {
5822 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5823 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5824 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5825 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5826 case 4: u16EffAddr += pCtx->si; break;
5827 case 5: u16EffAddr += pCtx->di; break;
5828 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5829 case 7: u16EffAddr += pCtx->bx; break;
5830 }
5831 }
5832
5833 *pGCPtrEff = u16EffAddr;
5834 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5835 return VINF_SUCCESS;
5836 }
5837
5838 case IEMMODE_32BIT:
5839 {
5840 uint32_t u32EffAddr;
5841
5842 /* Handle the disp32 form with no registers first. */
5843 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5844 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
5845 else
5846 {
5847 /* Get the register (or SIB) value. */
5848 switch ((bRm & X86_MODRM_RM_MASK))
5849 {
5850 case 0: u32EffAddr = pCtx->eax; break;
5851 case 1: u32EffAddr = pCtx->ecx; break;
5852 case 2: u32EffAddr = pCtx->edx; break;
5853 case 3: u32EffAddr = pCtx->ebx; break;
5854 case 4: /* SIB */
5855 {
5856 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5857
5858 /* Get the index and scale it. */
5859 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
5860 {
5861 case 0: u32EffAddr = pCtx->eax; break;
5862 case 1: u32EffAddr = pCtx->ecx; break;
5863 case 2: u32EffAddr = pCtx->edx; break;
5864 case 3: u32EffAddr = pCtx->ebx; break;
5865 case 4: u32EffAddr = 0; /*none */ break;
5866 case 5: u32EffAddr = pCtx->ebp; break;
5867 case 6: u32EffAddr = pCtx->esi; break;
5868 case 7: u32EffAddr = pCtx->edi; break;
5869 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5870 }
5871 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5872
5873 /* add base */
5874 switch (bSib & X86_SIB_BASE_MASK)
5875 {
5876 case 0: u32EffAddr += pCtx->eax; break;
5877 case 1: u32EffAddr += pCtx->ecx; break;
5878 case 2: u32EffAddr += pCtx->edx; break;
5879 case 3: u32EffAddr += pCtx->ebx; break;
5880 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5881 case 5:
5882 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5883 {
5884 u32EffAddr += pCtx->ebp;
5885 SET_SS_DEF();
5886 }
5887 else
5888 {
5889 uint32_t u32Disp;
5890 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5891 u32EffAddr += u32Disp;
5892 }
5893 break;
5894 case 6: u32EffAddr += pCtx->esi; break;
5895 case 7: u32EffAddr += pCtx->edi; break;
5896 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5897 }
5898 break;
5899 }
5900 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5901 case 6: u32EffAddr = pCtx->esi; break;
5902 case 7: u32EffAddr = pCtx->edi; break;
5903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5904 }
5905
5906 /* Get and add the displacement. */
5907 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5908 {
5909 case 0:
5910 break;
5911 case 1:
5912 {
5913 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
5914 u32EffAddr += i8Disp;
5915 break;
5916 }
5917 case 2:
5918 {
5919 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5920 u32EffAddr += u32Disp;
5921 break;
5922 }
5923 default:
5924 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5925 }
5926
5927 }
5928 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
5929 *pGCPtrEff = u32EffAddr;
5930 else
5931 {
5932 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
5933 *pGCPtrEff = u32EffAddr & UINT16_MAX;
5934 }
5935 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5936 return VINF_SUCCESS;
5937 }
5938
5939 case IEMMODE_64BIT:
5940 {
5941 uint64_t u64EffAddr;
5942
5943 /* Handle the rip+disp32 form with no registers first. */
5944 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5945 {
5946 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
5947 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
5948 }
5949 else
5950 {
5951 /* Get the register (or SIB) value. */
5952 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
5953 {
5954 case 0: u64EffAddr = pCtx->rax; break;
5955 case 1: u64EffAddr = pCtx->rcx; break;
5956 case 2: u64EffAddr = pCtx->rdx; break;
5957 case 3: u64EffAddr = pCtx->rbx; break;
5958 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
5959 case 6: u64EffAddr = pCtx->rsi; break;
5960 case 7: u64EffAddr = pCtx->rdi; break;
5961 case 8: u64EffAddr = pCtx->r8; break;
5962 case 9: u64EffAddr = pCtx->r9; break;
5963 case 10: u64EffAddr = pCtx->r10; break;
5964 case 11: u64EffAddr = pCtx->r11; break;
5965 case 13: u64EffAddr = pCtx->r13; break;
5966 case 14: u64EffAddr = pCtx->r14; break;
5967 case 15: u64EffAddr = pCtx->r15; break;
5968 /* SIB */
5969 case 4:
5970 case 12:
5971 {
5972 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5973
5974 /* Get the index and scale it. */
5975 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
5976 {
5977 case 0: u64EffAddr = pCtx->rax; break;
5978 case 1: u64EffAddr = pCtx->rcx; break;
5979 case 2: u64EffAddr = pCtx->rdx; break;
5980 case 3: u64EffAddr = pCtx->rbx; break;
5981 case 4: u64EffAddr = 0; /*none */ break;
5982 case 5: u64EffAddr = pCtx->rbp; break;
5983 case 6: u64EffAddr = pCtx->rsi; break;
5984 case 7: u64EffAddr = pCtx->rdi; break;
5985 case 8: u64EffAddr = pCtx->r8; break;
5986 case 9: u64EffAddr = pCtx->r9; break;
5987 case 10: u64EffAddr = pCtx->r10; break;
5988 case 11: u64EffAddr = pCtx->r11; break;
5989 case 12: u64EffAddr = pCtx->r12; break;
5990 case 13: u64EffAddr = pCtx->r13; break;
5991 case 14: u64EffAddr = pCtx->r14; break;
5992 case 15: u64EffAddr = pCtx->r15; break;
5993 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5994 }
5995 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5996
5997 /* add base */
5998 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
5999 {
6000 case 0: u64EffAddr += pCtx->rax; break;
6001 case 1: u64EffAddr += pCtx->rcx; break;
6002 case 2: u64EffAddr += pCtx->rdx; break;
6003 case 3: u64EffAddr += pCtx->rbx; break;
6004 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
6005 case 6: u64EffAddr += pCtx->rsi; break;
6006 case 7: u64EffAddr += pCtx->rdi; break;
6007 case 8: u64EffAddr += pCtx->r8; break;
6008 case 9: u64EffAddr += pCtx->r9; break;
6009 case 10: u64EffAddr += pCtx->r10; break;
6010 case 11: u64EffAddr += pCtx->r11; break;
6011 case 14: u64EffAddr += pCtx->r14; break;
6012 case 15: u64EffAddr += pCtx->r15; break;
6013 /* complicated encodings */
6014 case 5:
6015 case 13:
6016 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6017 {
6018 if (!pIemCpu->uRexB)
6019 {
6020 u64EffAddr += pCtx->rbp;
6021 SET_SS_DEF();
6022 }
6023 else
6024 u64EffAddr += pCtx->r13;
6025 }
6026 else
6027 {
6028 uint32_t u32Disp;
6029 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6030 u64EffAddr += (int32_t)u32Disp;
6031 }
6032 break;
6033 }
6034 break;
6035 }
6036 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6037 }
6038
6039 /* Get and add the displacement. */
6040 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6041 {
6042 case 0:
6043 break;
6044 case 1:
6045 {
6046 int8_t i8Disp;
6047 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6048 u64EffAddr += i8Disp;
6049 break;
6050 }
6051 case 2:
6052 {
6053 uint32_t u32Disp;
6054 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6055 u64EffAddr += (int32_t)u32Disp;
6056 break;
6057 }
6058 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
6059 }
6060
6061 }
6062 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
6063 *pGCPtrEff = u64EffAddr;
6064 else
6065 *pGCPtrEff = u64EffAddr & UINT16_MAX;
6066 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6067 return VINF_SUCCESS;
6068 }
6069 }
6070
6071 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6072}
6073
6074/** @} */
6075
6076
6077
6078/*
6079 * Include the instructions
6080 */
6081#include "IEMAllInstructions.cpp.h"
6082
6083
6084
6085
6086#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6087
6088/**
6089 * Sets up execution verification mode.
6090 */
6091static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
6092{
6093 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6094 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
6095
6096 /*
6097 * Enable verification and/or logging.
6098 */
6099 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
6100 if ( pIemCpu->fNoRem
6101#if 0 /* auto enable on first paged protected mode interrupt */
6102 && pOrgCtx->eflags.Bits.u1IF
6103 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
6104 && TRPMHasTrap(pVCpu)
6105 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6106#endif
6107#if 0
6108 && pOrgCtx->cs == 0x10
6109 && ( pOrgCtx->rip == 0x90119e3e
6110 || pOrgCtx->rip == 0x901d9810
6111 )
6112#endif
6113#if 1 /* Auto enable DSL - FPU stuff. */
6114 && pOrgCtx->cs == 0x10
6115 && ( pOrgCtx->rip == 0xc02ec07f
6116 || pOrgCtx->rip == 0xc02ec082
6117 || pOrgCtx->rip == 0xc02ec0c9
6118 )
6119#endif
6120#if 0
6121 && pOrgCtx->rip == 0x9022bb3a
6122#endif
6123#if 0
6124 && 0
6125#endif
6126 )
6127 {
6128 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
6129 RTLogFlags(NULL, "enabled");
6130 pIemCpu->fNoRem = false;
6131 }
6132
6133 /*
6134 * Switch state.
6135 */
6136 if (IEM_VERIFICATION_ENABLED(pIemCpu))
6137 {
6138 static CPUMCTX s_DebugCtx; /* Ugly! */
6139
6140 s_DebugCtx = *pOrgCtx;
6141 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
6142 }
6143
6144 /*
6145 * See if there is an interrupt pending in TRPM and inject it if we can.
6146 */
6147 if ( pOrgCtx->eflags.Bits.u1IF
6148 && TRPMHasTrap(pVCpu)
6149 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6150 {
6151 uint8_t u8TrapNo;
6152 TRPMEVENT enmType;
6153 RTGCUINT uErrCode;
6154 RTGCPTR uCr2;
6155 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
6156 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
6157 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6158 TRPMResetTrap(pVCpu);
6159 }
6160
6161 /*
6162 * Reset the counters.
6163 */
6164 pIemCpu->cIOReads = 0;
6165 pIemCpu->cIOWrites = 0;
6166 pIemCpu->fUndefinedEFlags = 0;
6167
6168 if (IEM_VERIFICATION_ENABLED(pIemCpu))
6169 {
6170 /*
6171 * Free all verification records.
6172 */
6173 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
6174 pIemCpu->pIemEvtRecHead = NULL;
6175 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
6176 do
6177 {
6178 while (pEvtRec)
6179 {
6180 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
6181 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
6182 pIemCpu->pFreeEvtRec = pEvtRec;
6183 pEvtRec = pNext;
6184 }
6185 pEvtRec = pIemCpu->pOtherEvtRecHead;
6186 pIemCpu->pOtherEvtRecHead = NULL;
6187 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
6188 } while (pEvtRec);
6189 }
6190}
6191
6192
6193/**
6194 * Allocate an event record.
6195 * @returns Poitner to a record.
6196 */
6197static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
6198{
6199 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6200 return NULL;
6201
6202 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
6203 if (pEvtRec)
6204 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
6205 else
6206 {
6207 if (!pIemCpu->ppIemEvtRecNext)
6208 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
6209
6210 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
6211 if (!pEvtRec)
6212 return NULL;
6213 }
6214 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
6215 pEvtRec->pNext = NULL;
6216 return pEvtRec;
6217}
6218
6219
6220/**
6221 * IOMMMIORead notification.
6222 */
6223VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
6224{
6225 PVMCPU pVCpu = VMMGetCpu(pVM);
6226 if (!pVCpu)
6227 return;
6228 PIEMCPU pIemCpu = &pVCpu->iem.s;
6229 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6230 if (!pEvtRec)
6231 return;
6232 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6233 pEvtRec->u.RamRead.GCPhys = GCPhys;
6234 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
6235 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6236 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6237}
6238
6239
6240/**
6241 * IOMMMIOWrite notification.
6242 */
6243VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
6244{
6245 PVMCPU pVCpu = VMMGetCpu(pVM);
6246 if (!pVCpu)
6247 return;
6248 PIEMCPU pIemCpu = &pVCpu->iem.s;
6249 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6250 if (!pEvtRec)
6251 return;
6252 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6253 pEvtRec->u.RamWrite.GCPhys = GCPhys;
6254 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
6255 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
6256 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
6257 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
6258 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
6259 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6260 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6261}
6262
6263
6264/**
6265 * IOMIOPortRead notification.
6266 */
6267VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
6268{
6269 PVMCPU pVCpu = VMMGetCpu(pVM);
6270 if (!pVCpu)
6271 return;
6272 PIEMCPU pIemCpu = &pVCpu->iem.s;
6273 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6274 if (!pEvtRec)
6275 return;
6276 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6277 pEvtRec->u.IOPortRead.Port = Port;
6278 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
6279 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6280 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6281}
6282
6283/**
6284 * IOMIOPortWrite notification.
6285 */
6286VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6287{
6288 PVMCPU pVCpu = VMMGetCpu(pVM);
6289 if (!pVCpu)
6290 return;
6291 PIEMCPU pIemCpu = &pVCpu->iem.s;
6292 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6293 if (!pEvtRec)
6294 return;
6295 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6296 pEvtRec->u.IOPortWrite.Port = Port;
6297 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
6298 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6299 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6300 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6301}
6302
6303
6304VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
6305{
6306 AssertFailed();
6307}
6308
6309
6310VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
6311{
6312 AssertFailed();
6313}
6314
6315
6316/**
6317 * Fakes and records an I/O port read.
6318 *
6319 * @returns VINF_SUCCESS.
6320 * @param pIemCpu The IEM per CPU data.
6321 * @param Port The I/O port.
6322 * @param pu32Value Where to store the fake value.
6323 * @param cbValue The size of the access.
6324 */
6325static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6326{
6327 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6328 if (pEvtRec)
6329 {
6330 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6331 pEvtRec->u.IOPortRead.Port = Port;
6332 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
6333 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6334 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6335 }
6336 pIemCpu->cIOReads++;
6337 *pu32Value = 0xcccccccc;
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/**
6343 * Fakes and records an I/O port write.
6344 *
6345 * @returns VINF_SUCCESS.
6346 * @param pIemCpu The IEM per CPU data.
6347 * @param Port The I/O port.
6348 * @param u32Value The value being written.
6349 * @param cbValue The size of the access.
6350 */
6351static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6352{
6353 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6354 if (pEvtRec)
6355 {
6356 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6357 pEvtRec->u.IOPortWrite.Port = Port;
6358 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
6359 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6360 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6361 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6362 }
6363 pIemCpu->cIOWrites++;
6364 return VINF_SUCCESS;
6365}
6366
6367
6368/**
6369 * Used to add extra details about a stub case.
6370 * @param pIemCpu The IEM per CPU state.
6371 */
6372static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
6373{
6374 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6375 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6376 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6377 char szRegs[4096];
6378 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6379 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6380 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6381 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6382 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6383 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6384 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6385 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6386 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6387 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6388 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6389 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6390 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6391 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6392 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6393 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6394 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6395 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6396 " efer=%016VR{efer}\n"
6397 " pat=%016VR{pat}\n"
6398 " sf_mask=%016VR{sf_mask}\n"
6399 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6400 " lstar=%016VR{lstar}\n"
6401 " star=%016VR{star} cstar=%016VR{cstar}\n"
6402 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6403 );
6404
6405 char szInstr1[256];
6406 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
6407 DBGF_DISAS_FLAGS_DEFAULT_MODE,
6408 szInstr1, sizeof(szInstr1), NULL);
6409 char szInstr2[256];
6410 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
6411 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6412 szInstr2, sizeof(szInstr2), NULL);
6413
6414 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
6415}
6416
6417
6418/**
6419 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
6420 * dump to the assertion info.
6421 *
6422 * @param pEvtRec The record to dump.
6423 */
6424static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
6425{
6426 switch (pEvtRec->enmEvent)
6427 {
6428 case IEMVERIFYEVENT_IOPORT_READ:
6429 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
6430 pEvtRec->u.IOPortWrite.Port,
6431 pEvtRec->u.IOPortWrite.cbValue);
6432 break;
6433 case IEMVERIFYEVENT_IOPORT_WRITE:
6434 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
6435 pEvtRec->u.IOPortWrite.Port,
6436 pEvtRec->u.IOPortWrite.cbValue,
6437 pEvtRec->u.IOPortWrite.u32Value);
6438 break;
6439 case IEMVERIFYEVENT_RAM_READ:
6440 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
6441 pEvtRec->u.RamRead.GCPhys,
6442 pEvtRec->u.RamRead.cb);
6443 break;
6444 case IEMVERIFYEVENT_RAM_WRITE:
6445 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
6446 pEvtRec->u.RamWrite.GCPhys,
6447 pEvtRec->u.RamWrite.cb,
6448 (int)pEvtRec->u.RamWrite.cb,
6449 pEvtRec->u.RamWrite.ab);
6450 break;
6451 default:
6452 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
6453 break;
6454 }
6455}
6456
6457
6458/**
6459 * Raises an assertion on the specified record, showing the given message with
6460 * a record dump attached.
6461 *
6462 * @param pIemCpu The IEM per CPU data.
6463 * @param pEvtRec1 The first record.
6464 * @param pEvtRec2 The second record.
6465 * @param pszMsg The message explaining why we're asserting.
6466 */
6467static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
6468{
6469 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6470 iemVerifyAssertAddRecordDump(pEvtRec1);
6471 iemVerifyAssertAddRecordDump(pEvtRec2);
6472 iemVerifyAssertMsg2(pIemCpu);
6473 RTAssertPanic();
6474}
6475
6476
6477/**
6478 * Raises an assertion on the specified record, showing the given message with
6479 * a record dump attached.
6480 *
6481 * @param pIemCpu The IEM per CPU data.
6482 * @param pEvtRec1 The first record.
6483 * @param pszMsg The message explaining why we're asserting.
6484 */
6485static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
6486{
6487 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6488 iemVerifyAssertAddRecordDump(pEvtRec);
6489 iemVerifyAssertMsg2(pIemCpu);
6490 RTAssertPanic();
6491}
6492
6493
6494/**
6495 * Verifies a write record.
6496 *
6497 * @param pIemCpu The IEM per CPU data.
6498 * @param pEvtRec The write record.
6499 */
6500static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
6501{
6502 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
6503 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
6504 if ( RT_FAILURE(rc)
6505 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
6506 {
6507 /* fend off ins */
6508 if ( !pIemCpu->cIOReads
6509 || pEvtRec->u.RamWrite.ab[0] != 0xcc
6510 || ( pEvtRec->u.RamWrite.cb != 1
6511 && pEvtRec->u.RamWrite.cb != 2
6512 && pEvtRec->u.RamWrite.cb != 4) )
6513 {
6514 /* fend off ROMs */
6515 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
6516 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
6517 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
6518 {
6519 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6520 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
6521 RTAssertMsg2Add("REM: %.*Rhxs\n"
6522 "IEM: %.*Rhxs\n",
6523 pEvtRec->u.RamWrite.cb, abBuf,
6524 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
6525 iemVerifyAssertAddRecordDump(pEvtRec);
6526 iemVerifyAssertMsg2(pIemCpu);
6527 RTAssertPanic();
6528 }
6529 }
6530 }
6531
6532}
6533
6534/**
6535 * Performs the post-execution verfication checks.
6536 */
6537static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
6538{
6539 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6540 return;
6541
6542 /*
6543 * Switch back the state.
6544 */
6545 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
6546 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
6547 Assert(pOrgCtx != pDebugCtx);
6548 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6549
6550 /*
6551 * Execute the instruction in REM.
6552 */
6553 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6554 EMRemLock(pVM);
6555 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
6556 AssertRC(rc);
6557 EMRemUnlock(pVM);
6558
6559 /*
6560 * Compare the register states.
6561 */
6562 unsigned cDiffs = 0;
6563 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
6564 {
6565 Log(("REM and IEM ends up with different registers!\n"));
6566
6567# define CHECK_FIELD(a_Field) \
6568 do \
6569 { \
6570 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6571 { \
6572 switch (sizeof(pOrgCtx->a_Field)) \
6573 { \
6574 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6575 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6576 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6577 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6578 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
6579 } \
6580 cDiffs++; \
6581 } \
6582 } while (0)
6583
6584# define CHECK_BIT_FIELD(a_Field) \
6585 do \
6586 { \
6587 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6588 { \
6589 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
6590 cDiffs++; \
6591 } \
6592 } while (0)
6593
6594# define CHECK_SEL(a_Sel) \
6595 do \
6596 { \
6597 CHECK_FIELD(a_Sel); \
6598 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
6599 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
6600 { \
6601 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
6602 cDiffs++; \
6603 } \
6604 CHECK_FIELD(a_Sel##Hid.u64Base); \
6605 CHECK_FIELD(a_Sel##Hid.u32Limit); \
6606 } while (0)
6607
6608 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
6609 {
6610 RTAssertMsg2Weak(" the FPU state differs\n");
6611 cDiffs++;
6612 CHECK_FIELD(fpu.FCW);
6613 CHECK_FIELD(fpu.FSW);
6614 CHECK_FIELD(fpu.FTW);
6615 CHECK_FIELD(fpu.FOP);
6616 CHECK_FIELD(fpu.FPUIP);
6617 CHECK_FIELD(fpu.CS);
6618 CHECK_FIELD(fpu.Rsrvd1);
6619 CHECK_FIELD(fpu.FPUDP);
6620 CHECK_FIELD(fpu.DS);
6621 CHECK_FIELD(fpu.Rsrvd2);
6622 CHECK_FIELD(fpu.MXCSR);
6623 CHECK_FIELD(fpu.MXCSR_MASK);
6624 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
6625 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
6626 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
6627 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
6628 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
6629 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
6630 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
6631 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
6632 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
6633 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
6634 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
6635 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
6636 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
6637 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
6638 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
6639 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
6640 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
6641 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
6642 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
6643 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
6644 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
6645 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
6646 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
6647 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
6648 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
6649 CHECK_FIELD(fpu.au32RsrvdRest[i]);
6650 }
6651 CHECK_FIELD(rip);
6652 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
6653 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
6654 {
6655 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
6656 CHECK_BIT_FIELD(rflags.Bits.u1CF);
6657 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
6658 CHECK_BIT_FIELD(rflags.Bits.u1PF);
6659 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
6660 CHECK_BIT_FIELD(rflags.Bits.u1AF);
6661 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
6662 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
6663 CHECK_BIT_FIELD(rflags.Bits.u1SF);
6664 CHECK_BIT_FIELD(rflags.Bits.u1TF);
6665 CHECK_BIT_FIELD(rflags.Bits.u1IF);
6666 CHECK_BIT_FIELD(rflags.Bits.u1DF);
6667 CHECK_BIT_FIELD(rflags.Bits.u1OF);
6668 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
6669 CHECK_BIT_FIELD(rflags.Bits.u1NT);
6670 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
6671 CHECK_BIT_FIELD(rflags.Bits.u1RF);
6672 CHECK_BIT_FIELD(rflags.Bits.u1VM);
6673 CHECK_BIT_FIELD(rflags.Bits.u1AC);
6674 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
6675 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
6676 CHECK_BIT_FIELD(rflags.Bits.u1ID);
6677 }
6678
6679 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
6680 CHECK_FIELD(rax);
6681 CHECK_FIELD(rcx);
6682 if (!pIemCpu->fIgnoreRaxRdx)
6683 CHECK_FIELD(rdx);
6684 CHECK_FIELD(rbx);
6685 CHECK_FIELD(rsp);
6686 CHECK_FIELD(rbp);
6687 CHECK_FIELD(rsi);
6688 CHECK_FIELD(rdi);
6689 CHECK_FIELD(r8);
6690 CHECK_FIELD(r9);
6691 CHECK_FIELD(r10);
6692 CHECK_FIELD(r11);
6693 CHECK_FIELD(r12);
6694 CHECK_FIELD(r13);
6695 CHECK_SEL(cs);
6696 CHECK_SEL(ss);
6697 CHECK_SEL(ds);
6698 CHECK_SEL(es);
6699 CHECK_SEL(fs);
6700 CHECK_SEL(gs);
6701 CHECK_FIELD(cr0);
6702 CHECK_FIELD(cr2);
6703 CHECK_FIELD(cr3);
6704 CHECK_FIELD(cr4);
6705 CHECK_FIELD(dr[0]);
6706 CHECK_FIELD(dr[1]);
6707 CHECK_FIELD(dr[2]);
6708 CHECK_FIELD(dr[3]);
6709 CHECK_FIELD(dr[6]);
6710 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
6711 CHECK_FIELD(dr[7]);
6712 CHECK_FIELD(gdtr.cbGdt);
6713 CHECK_FIELD(gdtr.pGdt);
6714 CHECK_FIELD(idtr.cbIdt);
6715 CHECK_FIELD(idtr.pIdt);
6716 CHECK_FIELD(ldtr);
6717 CHECK_FIELD(ldtrHid.u64Base);
6718 CHECK_FIELD(ldtrHid.u32Limit);
6719 CHECK_FIELD(ldtrHid.Attr.u);
6720 CHECK_FIELD(tr);
6721 CHECK_FIELD(trHid.u64Base);
6722 CHECK_FIELD(trHid.u32Limit);
6723 CHECK_FIELD(trHid.Attr.u);
6724 CHECK_FIELD(SysEnter.cs);
6725 CHECK_FIELD(SysEnter.eip);
6726 CHECK_FIELD(SysEnter.esp);
6727 CHECK_FIELD(msrEFER);
6728 CHECK_FIELD(msrSTAR);
6729 CHECK_FIELD(msrPAT);
6730 CHECK_FIELD(msrLSTAR);
6731 CHECK_FIELD(msrCSTAR);
6732 CHECK_FIELD(msrSFMASK);
6733 CHECK_FIELD(msrKERNELGSBASE);
6734
6735 if (cDiffs != 0)
6736 {
6737 if (LogIs3Enabled())
6738 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
6739 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
6740 iemVerifyAssertMsg2(pIemCpu);
6741 RTAssertPanic();
6742 }
6743# undef CHECK_FIELD
6744# undef CHECK_BIT_FIELD
6745 }
6746
6747 /*
6748 * If the register state compared fine, check the verification event
6749 * records.
6750 */
6751 if (cDiffs == 0)
6752 {
6753 /*
6754 * Compare verficiation event records.
6755 * - I/O port accesses should be a 1:1 match.
6756 */
6757 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
6758 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
6759 while (pIemRec && pOtherRec)
6760 {
6761 /* Since we might miss RAM writes and reads, ignore reads and check
6762 that any written memory is the same extra ones. */
6763 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
6764 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
6765 && pIemRec->pNext)
6766 {
6767 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6768 iemVerifyWriteRecord(pIemCpu, pIemRec);
6769 pIemRec = pIemRec->pNext;
6770 }
6771
6772 /* Do the compare. */
6773 if (pIemRec->enmEvent != pOtherRec->enmEvent)
6774 {
6775 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
6776 break;
6777 }
6778 bool fEquals;
6779 switch (pIemRec->enmEvent)
6780 {
6781 case IEMVERIFYEVENT_IOPORT_READ:
6782 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
6783 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
6784 break;
6785 case IEMVERIFYEVENT_IOPORT_WRITE:
6786 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
6787 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
6788 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
6789 break;
6790 case IEMVERIFYEVENT_RAM_READ:
6791 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
6792 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
6793 break;
6794 case IEMVERIFYEVENT_RAM_WRITE:
6795 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
6796 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
6797 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
6798 break;
6799 default:
6800 fEquals = false;
6801 break;
6802 }
6803 if (!fEquals)
6804 {
6805 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
6806 break;
6807 }
6808
6809 /* advance */
6810 pIemRec = pIemRec->pNext;
6811 pOtherRec = pOtherRec->pNext;
6812 }
6813
6814 /* Ignore extra writes and reads. */
6815 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
6816 {
6817 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6818 iemVerifyWriteRecord(pIemCpu, pIemRec);
6819 pIemRec = pIemRec->pNext;
6820 }
6821 if (pIemRec != NULL)
6822 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
6823 else if (pOtherRec != NULL)
6824 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
6825 }
6826 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6827
6828#if 0
6829 /*
6830 * HACK ALERT! You don't normally want to verify a whole boot sequence.
6831 */
6832 if (pIemCpu->cInstructions == 1)
6833 RTLogFlags(NULL, "disabled");
6834#endif
6835}
6836
6837#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6838
6839/* stubs */
6840static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6841{
6842 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
6843 return VERR_INTERNAL_ERROR;
6844}
6845
6846static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6847{
6848 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
6849 return VERR_INTERNAL_ERROR;
6850}
6851
6852#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6853
6854
6855/**
6856 * Execute one instruction.
6857 *
6858 * @return Strict VBox status code.
6859 * @param pVCpu The current virtual CPU.
6860 */
6861VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6862{
6863 PIEMCPU pIemCpu = &pVCpu->iem.s;
6864
6865#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6866 iemExecVerificationModeSetup(pIemCpu);
6867#endif
6868#ifdef LOG_ENABLED
6869 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6870 if (LogIs2Enabled())
6871 {
6872 char szInstr[256];
6873 uint32_t cbInstr = 0;
6874 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6875 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6876 szInstr, sizeof(szInstr), &cbInstr);
6877
6878 Log2(("**** "
6879 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6880 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6881 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6882 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
6883 " %s\n"
6884 ,
6885 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6886 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6887 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6888 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6889 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
6890 szInstr));
6891
6892 if (LogIs3Enabled())
6893 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
6894 }
6895#endif
6896
6897 /*
6898 * Do the decoding and emulation.
6899 */
6900 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6901 if (rcStrict != VINF_SUCCESS)
6902 {
6903#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6904 iemExecVerificationModeCheck(pIemCpu);
6905#endif
6906 return rcStrict;
6907 }
6908
6909 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6910 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6911 if (rcStrict == VINF_SUCCESS)
6912 pIemCpu->cInstructions++;
6913//#ifdef DEBUG
6914// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
6915//#endif
6916
6917 /* Execute the next instruction as well if a cli, pop ss or
6918 mov ss, Gr has just completed successfully. */
6919 if ( rcStrict == VINF_SUCCESS
6920 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6921 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
6922 {
6923 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6924 if (rcStrict == VINF_SUCCESS)
6925 {
6926 b; IEM_OPCODE_GET_NEXT_U8(&b);
6927 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6928 if (rcStrict == VINF_SUCCESS)
6929 pIemCpu->cInstructions++;
6930 }
6931 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
6932 }
6933
6934#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6935 /*
6936 * Assert some sanity.
6937 */
6938 iemExecVerificationModeCheck(pIemCpu);
6939#endif
6940 return rcStrict;
6941}
6942
6943
6944/**
6945 * Injects a trap, fault, abort, software interrupt or external interrupt.
6946 *
6947 * The parameter list matches TRPMQueryTrapAll pretty closely.
6948 *
6949 * @returns Strict VBox status code.
6950 * @param pVCpu The current virtual CPU.
6951 * @param u8TrapNo The trap number.
6952 * @param enmType What type is it (trap/fault/abort), software
6953 * interrupt or hardware interrupt.
6954 * @param uErrCode The error code if applicable.
6955 * @param uCr2 The CR2 value if applicable.
6956 */
6957VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
6958{
6959 iemInitDecoder(&pVCpu->iem.s);
6960
6961 uint32_t fFlags;
6962 switch (enmType)
6963 {
6964 case TRPM_HARDWARE_INT:
6965 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
6966 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6967 uErrCode = uCr2 = 0;
6968 break;
6969
6970 case TRPM_SOFTWARE_INT:
6971 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
6972 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6973 uErrCode = uCr2 = 0;
6974 break;
6975
6976 case TRPM_TRAP:
6977 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
6978 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6979 if (u8TrapNo == X86_XCPT_PF)
6980 fFlags |= IEM_XCPT_FLAGS_CR2;
6981 switch (u8TrapNo)
6982 {
6983 case X86_XCPT_DF:
6984 case X86_XCPT_TS:
6985 case X86_XCPT_NP:
6986 case X86_XCPT_SS:
6987 case X86_XCPT_PF:
6988 case X86_XCPT_AC:
6989 fFlags |= IEM_XCPT_FLAGS_ERR;
6990 break;
6991 }
6992 break;
6993
6994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6995 }
6996
6997 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
6998}
6999
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette