VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 36798

Last change on this file since 36798 was 36798, checked in by vboxsync, 14 years ago

build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 232.1 KB
Line 
1/* $Id: IEMAll.cpp 36798 2011-04-21 15:58:16Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47//#define RT_STRICT
48#define LOG_GROUP LOG_GROUP_EM /** @todo add log group */
49#include <VBox/vmm/iem.h>
50#include <VBox/vmm/pgm.h>
51#include <VBox/vmm/iom.h>
52#include <VBox/vmm/em.h>
53#include <VBox/vmm/dbgf.h>
54#ifdef IEM_VERIFICATION_MODE
55# include <VBox/vmm/rem.h>
56# include <VBox/vmm/mm.h>
57#endif
58#include "IEMInternal.h"
59#include <VBox/vmm/vm.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/x86.h>
64#include <iprt/assert.h>
65#include <iprt/string.h>
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/** @typedef PFNIEMOP
72 * Pointer to an opcode decoder function.
73 */
74
75/** @def FNIEMOP_DEF
76 * Define an opcode decoder function.
77 *
78 * We're using macors for this so that adding and removing parameters as well as
79 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
80 *
81 * @param a_Name The function name.
82 */
83
84
85#if defined(__GNUC__) && defined(RT_ARCH_X86)
86typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
87# define FNIEMOP_DEF(a_Name) \
88 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
89# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
90 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
91# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
92 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
93
94#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
95typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
96# define FNIEMOP_DEF(a_Name) \
97 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
98# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
99 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
100# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
101 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
102
103#else
104typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
105# define FNIEMOP_DEF(a_Name) \
106 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
107# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
108 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
109# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
110 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
111
112#endif
113
114
115/**
116 * Function table for a binary operator providing implementation based on
117 * operand size.
118 */
119typedef struct IEMOPBINSIZES
120{
121 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
122 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
123 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
124 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
125} IEMOPBINSIZES;
126/** Pointer to a binary operator function table. */
127typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
128
129
130/**
131 * Function table for a unary operator providing implementation based on
132 * operand size.
133 */
134typedef struct IEMOPUNARYSIZES
135{
136 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
137 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
138 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
139 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
140} IEMOPUNARYSIZES;
141/** Pointer to a unary operator function table. */
142typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
143
144
145/**
146 * Function table for a shift operator providing implementation based on
147 * operand size.
148 */
149typedef struct IEMOPSHIFTSIZES
150{
151 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
152 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
153 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
154 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
155} IEMOPSHIFTSIZES;
156/** Pointer to a shift operator function table. */
157typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
158
159
160/**
161 * Function table for a multiplication or division operation.
162 */
163typedef struct IEMOPMULDIVSIZES
164{
165 PFNIEMAIMPLMULDIVU8 pfnU8;
166 PFNIEMAIMPLMULDIVU16 pfnU16;
167 PFNIEMAIMPLMULDIVU32 pfnU32;
168 PFNIEMAIMPLMULDIVU64 pfnU64;
169} IEMOPMULDIVSIZES;
170/** Pointer to a multiplication or division operation function table. */
171typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_INTERNAL_ERROR_4)
198
199/**
200 * Call an opcode decoder function.
201 *
202 * We're using macors for this so that adding and removing parameters can be
203 * done as we please. See FNIEMOP_DEF.
204 */
205#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
206
207/**
208 * Call a common opcode decoder function taking one extra argument.
209 *
210 * We're using macors for this so that adding and removing parameters can be
211 * done as we please. See FNIEMOP_DEF_1.
212 */
213#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
214
215/**
216 * Call a common opcode decoder function taking one extra argument.
217 *
218 * We're using macors for this so that adding and removing parameters can be
219 * done as we please. See FNIEMOP_DEF_1.
220 */
221#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
222
223/**
224 * Check if we're currently executing in real or virtual 8086 mode.
225 *
226 * @returns @c true if it is, @c false if not.
227 * @param a_pIemCpu The IEM state of the current CPU.
228 */
229#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
230
231/**
232 * Check if we're currently executing in long mode.
233 *
234 * @returns @c true if it is, @c false if not.
235 * @param a_pIemCpu The IEM state of the current CPU.
236 */
237#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
238
239/**
240 * Check if we're currently executing in real mode.
241 *
242 * @returns @c true if it is, @c false if not.
243 * @param a_pIemCpu The IEM state of the current CPU.
244 */
245#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
246
247/**
248 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
249 */
250#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
251
252/**
253 * Check if the address is canonical.
254 */
255#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
256
257
258/*******************************************************************************
259* Global Variables *
260*******************************************************************************/
261extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
262
263
264/** Function table for the ADD instruction. */
265static const IEMOPBINSIZES g_iemAImpl_add =
266{
267 iemAImpl_add_u8, iemAImpl_add_u8_locked,
268 iemAImpl_add_u16, iemAImpl_add_u16_locked,
269 iemAImpl_add_u32, iemAImpl_add_u32_locked,
270 iemAImpl_add_u64, iemAImpl_add_u64_locked
271};
272
273/** Function table for the ADC instruction. */
274static const IEMOPBINSIZES g_iemAImpl_adc =
275{
276 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
277 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
278 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
279 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
280};
281
282/** Function table for the SUB instruction. */
283static const IEMOPBINSIZES g_iemAImpl_sub =
284{
285 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
286 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
287 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
288 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
289};
290
291/** Function table for the SBB instruction. */
292static const IEMOPBINSIZES g_iemAImpl_sbb =
293{
294 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
295 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
296 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
297 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
298};
299
300/** Function table for the OR instruction. */
301static const IEMOPBINSIZES g_iemAImpl_or =
302{
303 iemAImpl_or_u8, iemAImpl_or_u8_locked,
304 iemAImpl_or_u16, iemAImpl_or_u16_locked,
305 iemAImpl_or_u32, iemAImpl_or_u32_locked,
306 iemAImpl_or_u64, iemAImpl_or_u64_locked
307};
308
309/** Function table for the XOR instruction. */
310static const IEMOPBINSIZES g_iemAImpl_xor =
311{
312 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
313 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
314 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
315 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
316};
317
318/** Function table for the AND instruction. */
319static const IEMOPBINSIZES g_iemAImpl_and =
320{
321 iemAImpl_and_u8, iemAImpl_and_u8_locked,
322 iemAImpl_and_u16, iemAImpl_and_u16_locked,
323 iemAImpl_and_u32, iemAImpl_and_u32_locked,
324 iemAImpl_and_u64, iemAImpl_and_u64_locked
325};
326
327/** Function table for the CMP instruction.
328 * @remarks Making operand order ASSUMPTIONS.
329 */
330static const IEMOPBINSIZES g_iemAImpl_cmp =
331{
332 iemAImpl_cmp_u8, NULL,
333 iemAImpl_cmp_u16, NULL,
334 iemAImpl_cmp_u32, NULL,
335 iemAImpl_cmp_u64, NULL
336};
337
338/** Function table for the TEST instruction.
339 * @remarks Making operand order ASSUMPTIONS.
340 */
341static const IEMOPBINSIZES g_iemAImpl_test =
342{
343 iemAImpl_test_u8, NULL,
344 iemAImpl_test_u16, NULL,
345 iemAImpl_test_u32, NULL,
346 iemAImpl_test_u64, NULL
347};
348
349/** Group 1 /r lookup table. */
350static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
351{
352 &g_iemAImpl_add,
353 &g_iemAImpl_or,
354 &g_iemAImpl_adc,
355 &g_iemAImpl_sbb,
356 &g_iemAImpl_and,
357 &g_iemAImpl_sub,
358 &g_iemAImpl_xor,
359 &g_iemAImpl_cmp
360};
361
362/** Function table for the INC instruction. */
363static const IEMOPUNARYSIZES g_iemAImpl_inc =
364{
365 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
366 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
367 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
368 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
369};
370
371/** Function table for the DEC instruction. */
372static const IEMOPUNARYSIZES g_iemAImpl_dec =
373{
374 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
375 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
376 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
377 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
378};
379
380/** Function table for the NEG instruction. */
381static const IEMOPUNARYSIZES g_iemAImpl_neg =
382{
383 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
384 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
385 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
386 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
387};
388
389/** Function table for the NOT instruction. */
390static const IEMOPUNARYSIZES g_iemAImpl_not =
391{
392 iemAImpl_not_u8, iemAImpl_not_u8_locked,
393 iemAImpl_not_u16, iemAImpl_not_u16_locked,
394 iemAImpl_not_u32, iemAImpl_not_u32_locked,
395 iemAImpl_not_u64, iemAImpl_not_u64_locked
396};
397
398
399/** Function table for the ROL instruction. */
400static const IEMOPSHIFTSIZES g_iemAImpl_rol =
401{
402 iemAImpl_rol_u8,
403 iemAImpl_rol_u16,
404 iemAImpl_rol_u32,
405 iemAImpl_rol_u64
406};
407
408/** Function table for the ROR instruction. */
409static const IEMOPSHIFTSIZES g_iemAImpl_ror =
410{
411 iemAImpl_ror_u8,
412 iemAImpl_ror_u16,
413 iemAImpl_ror_u32,
414 iemAImpl_ror_u64
415};
416
417/** Function table for the RCL instruction. */
418static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
419{
420 iemAImpl_rcl_u8,
421 iemAImpl_rcl_u16,
422 iemAImpl_rcl_u32,
423 iemAImpl_rcl_u64
424};
425
426/** Function table for the RCR instruction. */
427static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
428{
429 iemAImpl_rcr_u8,
430 iemAImpl_rcr_u16,
431 iemAImpl_rcr_u32,
432 iemAImpl_rcr_u64
433};
434
435/** Function table for the SHL instruction. */
436static const IEMOPSHIFTSIZES g_iemAImpl_shl =
437{
438 iemAImpl_shl_u8,
439 iemAImpl_shl_u16,
440 iemAImpl_shl_u32,
441 iemAImpl_shl_u64
442};
443
444/** Function table for the SHR instruction. */
445static const IEMOPSHIFTSIZES g_iemAImpl_shr =
446{
447 iemAImpl_shr_u8,
448 iemAImpl_shr_u16,
449 iemAImpl_shr_u32,
450 iemAImpl_shr_u64
451};
452
453/** Function table for the SAR instruction. */
454static const IEMOPSHIFTSIZES g_iemAImpl_sar =
455{
456 iemAImpl_sar_u8,
457 iemAImpl_sar_u16,
458 iemAImpl_sar_u32,
459 iemAImpl_sar_u64
460};
461
462
463/** Function table for the MUL instruction. */
464static const IEMOPMULDIVSIZES g_iemAImpl_mul =
465{
466 iemAImpl_mul_u8,
467 iemAImpl_mul_u16,
468 iemAImpl_mul_u32,
469 iemAImpl_mul_u64
470};
471
472/** Function table for the IMUL instruction working implicitly on rAX. */
473static const IEMOPMULDIVSIZES g_iemAImpl_imul =
474{
475 iemAImpl_imul_u8,
476 iemAImpl_imul_u16,
477 iemAImpl_imul_u32,
478 iemAImpl_imul_u64
479};
480
481/** Function table for the DIV instruction. */
482static const IEMOPMULDIVSIZES g_iemAImpl_div =
483{
484 iemAImpl_div_u8,
485 iemAImpl_div_u16,
486 iemAImpl_div_u32,
487 iemAImpl_div_u64
488};
489
490/** Function table for the MUL instruction. */
491static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
492{
493 iemAImpl_idiv_u8,
494 iemAImpl_idiv_u16,
495 iemAImpl_idiv_u32,
496 iemAImpl_idiv_u64
497};
498
499
500/*******************************************************************************
501* Internal Functions *
502*******************************************************************************/
503static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
504static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
505static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
506static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
507static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
508#if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)
509static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
510static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
511static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
512#endif
513
514
515/**
516 * Initializes the decoder state.
517 *
518 * @param pIemCpu The per CPU IEM state.
519 */
520DECLINLINE(void) iemInitDecode(PIEMCPU pIemCpu)
521{
522 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
523
524 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
525 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
526 ? IEMMODE_64BIT
527 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
528 ? IEMMODE_32BIT
529 : IEMMODE_16BIT;
530 pIemCpu->enmCpuMode = enmMode;
531 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
532 pIemCpu->enmEffAddrMode = enmMode;
533 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
534 pIemCpu->enmEffOpSize = enmMode;
535 pIemCpu->fPrefixes = 0;
536 pIemCpu->uRexReg = 0;
537 pIemCpu->uRexB = 0;
538 pIemCpu->uRexIndex = 0;
539 pIemCpu->iEffSeg = X86_SREG_DS;
540 pIemCpu->offOpcode = 0;
541 pIemCpu->cbOpcode = 0;
542 pIemCpu->cActiveMappings = 0;
543 pIemCpu->iNextMapping = 0;
544}
545
546
547/**
548 * Prefetch opcodes the first time when starting executing.
549 *
550 * @returns Strict VBox status code.
551 * @param pIemCpu The IEM state.
552 */
553static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
554{
555 iemInitDecode(pIemCpu);
556
557 /*
558 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
559 *
560 * First translate CS:rIP to a physical address.
561 */
562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
563 uint32_t cbToTryRead;
564 RTGCPTR GCPtrPC;
565 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
566 {
567 cbToTryRead = PAGE_SIZE;
568 GCPtrPC = pCtx->rip;
569 if (!IEM_IS_CANONICAL(GCPtrPC))
570 return iemRaiseGeneralProtectionFault0(pIemCpu);
571 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
572 }
573 else
574 {
575 uint32_t GCPtrPC32 = pCtx->eip;
576 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
577 if (GCPtrPC32 > pCtx->csHid.u32Limit)
578 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
579 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
580 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
581 }
582
583 RTGCPHYS GCPhys;
584 uint64_t fFlags;
585 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
586 if (RT_FAILURE(rc))
587 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
588 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
589 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
590 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
591 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
592 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
593 /** @todo Check reserved bits and such stuff. PGM is better at doing
594 * that, so do it when implementing the guest virtual address
595 * TLB... */
596
597 /*
598 * Read the bytes at this address.
599 */
600 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
601 if (cbToTryRead > cbLeftOnPage)
602 cbToTryRead = cbLeftOnPage;
603 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
604 cbToTryRead = sizeof(pIemCpu->abOpcode);
605 if (!pIemCpu->fByPassHandlers)
606 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
607 else
608 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
609 if (rc != VINF_SUCCESS)
610 return rc;
611 pIemCpu->cbOpcode = cbToTryRead;
612
613 return VINF_SUCCESS;
614}
615
616
617/**
618 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
619 * exception if it fails.
620 *
621 * @returns Strict VBox status code.
622 * @param pIemCpu The IEM state.
623 * @param cbMin Where to return the opcode byte.
624 */
625static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
626{
627 /*
628 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
629 *
630 * First translate CS:rIP to a physical address.
631 */
632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
633 uint32_t cbToTryRead;
634 RTGCPTR GCPtrNext;
635 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
636 {
637 cbToTryRead = PAGE_SIZE;
638 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
639 if (!IEM_IS_CANONICAL(GCPtrNext))
640 return iemRaiseGeneralProtectionFault0(pIemCpu);
641 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
642 Assert(cbToTryRead >= cbMin); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
643 }
644 else
645 {
646 uint32_t GCPtrNext32 = pCtx->eip;
647 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
648 GCPtrNext32 += pIemCpu->cbOpcode;
649 if (GCPtrNext32 > pCtx->csHid.u32Limit)
650 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
651 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
652 if (cbToTryRead < cbMin)
653 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
654 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
655 }
656
657 RTGCPHYS GCPhys;
658 uint64_t fFlags;
659 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
660 if (RT_FAILURE(rc))
661 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
662 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
663 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
664 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
665 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
666 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
667 /** @todo Check reserved bits and such stuff. PGM is better at doing
668 * that, so do it when implementing the guest virtual address
669 * TLB... */
670
671 /*
672 * Read the bytes at this address.
673 */
674 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
675 if (cbToTryRead > cbLeftOnPage)
676 cbToTryRead = cbLeftOnPage;
677 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
678 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
679 if (!pIemCpu->fByPassHandlers)
680 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
681 else
682 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
683 if (rc != VINF_SUCCESS)
684 return rc;
685 pIemCpu->cbOpcode += cbToTryRead;
686
687 return VINF_SUCCESS;
688}
689
690
691/**
692 * Deals with the problematic cases that iemOpcodeGetNextByte doesn't like.
693 *
694 * @returns Strict VBox status code.
695 * @param pIemCpu The IEM state.
696 * @param pb Where to return the opcode byte.
697 */
698static VBOXSTRICTRC iemOpcodeGetNextByteSlow(PIEMCPU pIemCpu, uint8_t *pb)
699{
700 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
701 if (rcStrict == VINF_SUCCESS)
702 {
703 uint8_t offOpcode = pIemCpu->offOpcode;
704 *pb = pIemCpu->abOpcode[offOpcode];
705 pIemCpu->offOpcode = offOpcode + 1;
706 }
707 else
708 *pb = 0;
709 return rcStrict;
710}
711
712
713/**
714 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
715 *
716 * @returns Strict VBox status code.
717 * @param pIemCpu The IEM state.
718 * @param pu16 Where to return the opcode dword.
719 */
720static VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
721{
722 uint8_t u8;
723 VBOXSTRICTRC rcStrict = iemOpcodeGetNextByteSlow(pIemCpu, &u8);
724 if (rcStrict == VINF_SUCCESS)
725 *pu16 = (int8_t)u8;
726 return rcStrict;
727}
728
729
730/**
731 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
732 *
733 * @returns Strict VBox status code.
734 * @param pIemCpu The IEM state.
735 * @param pu16 Where to return the opcode word.
736 */
737static VBOXSTRICTRC iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
738{
739 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
740 if (rcStrict == VINF_SUCCESS)
741 {
742 uint8_t offOpcode = pIemCpu->offOpcode;
743 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
744 pIemCpu->offOpcode = offOpcode + 2;
745 }
746 else
747 *pu16 = 0;
748 return rcStrict;
749}
750
751
752/**
753 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
754 *
755 * @returns Strict VBox status code.
756 * @param pIemCpu The IEM state.
757 * @param pu32 Where to return the opcode dword.
758 */
759static VBOXSTRICTRC iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
760{
761 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
762 if (rcStrict == VINF_SUCCESS)
763 {
764 uint8_t offOpcode = pIemCpu->offOpcode;
765 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
766 pIemCpu->abOpcode[offOpcode + 1],
767 pIemCpu->abOpcode[offOpcode + 2],
768 pIemCpu->abOpcode[offOpcode + 3]);
769 pIemCpu->offOpcode = offOpcode + 4;
770 }
771 else
772 *pu32 = 0;
773 return rcStrict;
774}
775
776
777/**
778 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
779 *
780 * @returns Strict VBox status code.
781 * @param pIemCpu The IEM state.
782 * @param pu64 Where to return the opcode qword.
783 */
784static VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
785{
786 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
787 if (rcStrict == VINF_SUCCESS)
788 {
789 uint8_t offOpcode = pIemCpu->offOpcode;
790 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
791 pIemCpu->abOpcode[offOpcode + 1],
792 pIemCpu->abOpcode[offOpcode + 2],
793 pIemCpu->abOpcode[offOpcode + 3]);
794 pIemCpu->offOpcode = offOpcode + 4;
795 }
796 else
797 *pu64 = 0;
798 return rcStrict;
799}
800
801
802/**
803 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
804 *
805 * @returns Strict VBox status code.
806 * @param pIemCpu The IEM state.
807 * @param pu64 Where to return the opcode qword.
808 */
809static VBOXSTRICTRC iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
810{
811 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
812 if (rcStrict == VINF_SUCCESS)
813 {
814 uint8_t offOpcode = pIemCpu->offOpcode;
815 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
816 pIemCpu->abOpcode[offOpcode + 1],
817 pIemCpu->abOpcode[offOpcode + 2],
818 pIemCpu->abOpcode[offOpcode + 3],
819 pIemCpu->abOpcode[offOpcode + 4],
820 pIemCpu->abOpcode[offOpcode + 5],
821 pIemCpu->abOpcode[offOpcode + 6],
822 pIemCpu->abOpcode[offOpcode + 7]);
823 pIemCpu->offOpcode = offOpcode + 8;
824 }
825 else
826 *pu64 = 0;
827 return rcStrict;
828}
829
830
831/**
832 * Fetches the next opcode byte.
833 *
834 * @returns Strict VBox status code.
835 * @param pIemCpu The IEM state.
836 * @param pu8 Where to return the opcode byte.
837 */
838DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
839{
840 uint8_t const offOpcode = pIemCpu->offOpcode;
841 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
842 return iemOpcodeGetNextByteSlow(pIemCpu, pu8);
843
844 *pu8 = pIemCpu->abOpcode[offOpcode];
845 pIemCpu->offOpcode = offOpcode + 1;
846 return VINF_SUCCESS;
847}
848
849/**
850 * Fetches the next opcode byte, returns automatically on failure.
851 *
852 * @param pIemCpu The IEM state.
853 * @param a_pu8 Where to return the opcode byte.
854 */
855#define IEM_OPCODE_GET_NEXT_BYTE(a_pIemCpu, a_pu8) \
856 do \
857 { \
858 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8((a_pIemCpu), (a_pu8)); \
859 if (rcStrict2 != VINF_SUCCESS) \
860 return rcStrict2; \
861 } while (0)
862
863
864/**
865 * Fetches the next signed byte from the opcode stream.
866 *
867 * @returns Strict VBox status code.
868 * @param pIemCpu The IEM state.
869 * @param pi8 Where to return the signed byte.
870 */
871DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
872{
873 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
874}
875
876/**
877 * Fetches the next signed byte from the opcode stream, returning automatically
878 * on failure.
879 *
880 * @param pIemCpu The IEM state.
881 * @param pi8 Where to return the signed byte.
882 */
883#define IEM_OPCODE_GET_NEXT_S8(a_pIemCpu, a_pi8) \
884 do \
885 { \
886 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8((a_pIemCpu), (a_pi8)); \
887 if (rcStrict2 != VINF_SUCCESS) \
888 return rcStrict2; \
889 } while (0)
890
891
892/**
893 * Fetches the next signed byte from the opcode stream, extending it to
894 * unsigned 16-bit.
895 *
896 * @returns Strict VBox status code.
897 * @param pIemCpu The IEM state.
898 * @param pu16 Where to return the unsigned word.
899 */
900DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
901{
902 uint8_t const offOpcode = pIemCpu->offOpcode;
903 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
904 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
905
906 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
907 pIemCpu->offOpcode = offOpcode + 1;
908 return VINF_SUCCESS;
909}
910
911
912/**
913 * Fetches the next signed byte from the opcode stream and sign-extending it to
914 * a word, returning automatically on failure.
915 *
916 * @param pIemCpu The IEM state.
917 * @param pu16 Where to return the word.
918 */
919#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pIemCpu, a_pu16) \
920 do \
921 { \
922 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16((a_pIemCpu), (a_pu16)); \
923 if (rcStrict2 != VINF_SUCCESS) \
924 return rcStrict2; \
925 } while (0)
926
927
928/**
929 * Fetches the next opcode word.
930 *
931 * @returns Strict VBox status code.
932 * @param pIemCpu The IEM state.
933 * @param pu16 Where to return the opcode word.
934 */
935DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
936{
937 uint8_t const offOpcode = pIemCpu->offOpcode;
938 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
939 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
940
941 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
942 pIemCpu->offOpcode = offOpcode + 2;
943 return VINF_SUCCESS;
944}
945
946/**
947 * Fetches the next opcode word, returns automatically on failure.
948 *
949 * @param pIemCpu The IEM state.
950 * @param a_pu16 Where to return the opcode word.
951 */
952#define IEM_OPCODE_GET_NEXT_U16(a_pIemCpu, a_pu16) \
953 do \
954 { \
955 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16((a_pIemCpu), (a_pu16)); \
956 if (rcStrict2 != VINF_SUCCESS) \
957 return rcStrict2; \
958 } while (0)
959
960
961/**
962 * Fetches the next opcode dword.
963 *
964 * @returns Strict VBox status code.
965 * @param pIemCpu The IEM state.
966 * @param pu32 Where to return the opcode double word.
967 */
968DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
969{
970 uint8_t const offOpcode = pIemCpu->offOpcode;
971 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
972 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
973
974 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
975 pIemCpu->abOpcode[offOpcode + 1],
976 pIemCpu->abOpcode[offOpcode + 2],
977 pIemCpu->abOpcode[offOpcode + 3]);
978 pIemCpu->offOpcode = offOpcode + 4;
979 return VINF_SUCCESS;
980}
981
982/**
983 * Fetches the next opcode dword, returns automatically on failure.
984 *
985 * @param pIemCpu The IEM state.
986 * @param a_u32 Where to return the opcode dword.
987 */
988#define IEM_OPCODE_GET_NEXT_U32(a_pIemCpu, a_pu32) \
989 do \
990 { \
991 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32((a_pIemCpu), (a_pu32)); \
992 if (rcStrict2 != VINF_SUCCESS) \
993 return rcStrict2; \
994 } while (0)
995
996
997/**
998 * Fetches the next opcode dword, sign extending it into a quad word.
999 *
1000 * @returns Strict VBox status code.
1001 * @param pIemCpu The IEM state.
1002 * @param pu64 Where to return the opcode quad word.
1003 */
1004DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1005{
1006 uint8_t const offOpcode = pIemCpu->offOpcode;
1007 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1008 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1009
1010 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1011 pIemCpu->abOpcode[offOpcode + 1],
1012 pIemCpu->abOpcode[offOpcode + 2],
1013 pIemCpu->abOpcode[offOpcode + 3]);
1014 *pu64 = i32;
1015 pIemCpu->offOpcode = offOpcode + 4;
1016 return VINF_SUCCESS;
1017}
1018
1019/**
1020 * Fetches the next opcode double word and sign extends it to a quad word,
1021 * returns automatically on failure.
1022 *
1023 * @param pIemCpu The IEM state.
1024 * @param a_pu64 Where to return the opcode quad word.
1025 */
1026#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pIemCpu, a_pu64) \
1027 do \
1028 { \
1029 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64((a_pIemCpu), (a_pu64)); \
1030 if (rcStrict2 != VINF_SUCCESS) \
1031 return rcStrict2; \
1032 } while (0)
1033
1034
1035/**
1036 * Fetches the next opcode qword.
1037 *
1038 * @returns Strict VBox status code.
1039 * @param pIemCpu The IEM state.
1040 * @param pu64 Where to return the opcode qword.
1041 */
1042DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1043{
1044 uint8_t const offOpcode = pIemCpu->offOpcode;
1045 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1046 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1047
1048 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1049 pIemCpu->abOpcode[offOpcode + 1],
1050 pIemCpu->abOpcode[offOpcode + 2],
1051 pIemCpu->abOpcode[offOpcode + 3],
1052 pIemCpu->abOpcode[offOpcode + 4],
1053 pIemCpu->abOpcode[offOpcode + 5],
1054 pIemCpu->abOpcode[offOpcode + 6],
1055 pIemCpu->abOpcode[offOpcode + 7]);
1056 pIemCpu->offOpcode = offOpcode + 8;
1057 return VINF_SUCCESS;
1058}
1059
1060/**
1061 * Fetches the next opcode word, returns automatically on failure.
1062 *
1063 * @param pIemCpu The IEM state.
1064 * @param a_pu64 Where to return the opcode qword.
1065 */
1066#define IEM_OPCODE_GET_NEXT_U64(a_pIemCpu, a_pu64) \
1067 do \
1068 { \
1069 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64((a_pIemCpu), (a_pu64)); \
1070 if (rcStrict2 != VINF_SUCCESS) \
1071 return rcStrict2; \
1072 } while (0)
1073
1074
1075/** @name Raising Exceptions.
1076 *
1077 * @{
1078 */
1079
1080static VBOXSTRICTRC iemRaiseDivideError(PIEMCPU pIemCpu)
1081{
1082 AssertFailed(/** @todo implement this */);
1083 return VERR_NOT_IMPLEMENTED;
1084}
1085
1086
1087static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
1088{
1089 AssertFailed(/** @todo implement this */);
1090 return VERR_NOT_IMPLEMENTED;
1091}
1092
1093
1094static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
1095{
1096 AssertFailed(/** @todo implement this */);
1097 return VERR_NOT_IMPLEMENTED;
1098}
1099
1100
1101static VBOXSTRICTRC iemRaiseNotCanonical(PIEMCPU pIemCpu)
1102{
1103 AssertFailed(/** @todo implement this */);
1104 return VERR_NOT_IMPLEMENTED;
1105}
1106
1107
1108static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1109{
1110 AssertFailed(/** @todo implement this */);
1111 return VERR_NOT_IMPLEMENTED;
1112}
1113
1114
1115static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1116{
1117 AssertFailed(/** @todo implement this */);
1118 return VERR_NOT_IMPLEMENTED;
1119}
1120
1121
1122static VBOXSTRICTRC iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
1123{
1124 AssertFailed(/** @todo implement this */);
1125 return VERR_NOT_IMPLEMENTED;
1126}
1127
1128
1129static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
1130{
1131 AssertFailed(/** @todo implement this */);
1132 return VERR_NOT_IMPLEMENTED;
1133}
1134
1135
1136static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
1137{
1138 AssertFailed(/** @todo implement this */);
1139 return VERR_NOT_IMPLEMENTED;
1140}
1141
1142
1143/**
1144 * Macro for calling iemCImplRaiseInvalidLockPrefix().
1145 *
1146 * This enables us to add/remove arguments and force different levels of
1147 * inlining as we wish.
1148 *
1149 * @return Strict VBox status code.
1150 */
1151#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
1152IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
1153{
1154 AssertFailed();
1155 return VERR_NOT_IMPLEMENTED;
1156}
1157
1158
1159/**
1160 * Macro for calling iemCImplRaiseInvalidOpcode().
1161 *
1162 * This enables us to add/remove arguments and force different levels of
1163 * inlining as we wish.
1164 *
1165 * @return Strict VBox status code.
1166 */
1167#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
1168IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
1169{
1170 AssertFailed();
1171 return VERR_NOT_IMPLEMENTED;
1172}
1173
1174
1175/** @} */
1176
1177
1178/*
1179 *
1180 * Helpers routines.
1181 * Helpers routines.
1182 * Helpers routines.
1183 *
1184 */
1185
1186/**
1187 * Recalculates the effective operand size.
1188 *
1189 * @param pIemCpu The IEM state.
1190 */
1191static void iemRecalEffOpSize(PIEMCPU pIemCpu)
1192{
1193 switch (pIemCpu->enmCpuMode)
1194 {
1195 case IEMMODE_16BIT:
1196 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1197 break;
1198 case IEMMODE_32BIT:
1199 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1200 break;
1201 case IEMMODE_64BIT:
1202 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1203 {
1204 case 0:
1205 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
1206 break;
1207 case IEM_OP_PRF_SIZE_OP:
1208 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1209 break;
1210 case IEM_OP_PRF_SIZE_REX_W:
1211 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1212 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1213 break;
1214 }
1215 break;
1216 default:
1217 AssertFailed();
1218 }
1219}
1220
1221
1222/**
1223 * Sets the default operand size to 64-bit and recalculates the effective
1224 * operand size.
1225 *
1226 * @param pIemCpu The IEM state.
1227 */
1228static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
1229{
1230 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1231 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1232 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1233 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1234 else
1235 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1236}
1237
1238
1239/*
1240 *
1241 * Common opcode decoders.
1242 * Common opcode decoders.
1243 * Common opcode decoders.
1244 *
1245 */
1246
1247/** Stubs an opcode. */
1248#define FNIEMOP_STUB(a_Name) \
1249 FNIEMOP_DEF(a_Name) \
1250 { \
1251 IEMOP_MNEMONIC(#a_Name); \
1252 AssertMsgFailed(("After %d instructions\n", pIemCpu->cInstructions)); \
1253 return VERR_NOT_IMPLEMENTED; \
1254 } \
1255 typedef int ignore_semicolon
1256
1257
1258
1259/** @name Register Access.
1260 * @{
1261 */
1262
1263/**
1264 * Gets a reference (pointer) to the specified hidden segment register.
1265 *
1266 * @returns Hidden register reference.
1267 * @param pIemCpu The per CPU data.
1268 * @param iSegReg The segment register.
1269 */
1270static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
1271{
1272 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1273 switch (iSegReg)
1274 {
1275 case X86_SREG_ES: return &pCtx->esHid;
1276 case X86_SREG_CS: return &pCtx->csHid;
1277 case X86_SREG_SS: return &pCtx->ssHid;
1278 case X86_SREG_DS: return &pCtx->dsHid;
1279 case X86_SREG_FS: return &pCtx->fsHid;
1280 case X86_SREG_GS: return &pCtx->gsHid;
1281 }
1282 AssertFailedReturn(NULL);
1283}
1284
1285
1286/**
1287 * Gets a reference (pointer) to the specified segment register (the selector
1288 * value).
1289 *
1290 * @returns Pointer to the selector variable.
1291 * @param pIemCpu The per CPU data.
1292 * @param iSegReg The segment register.
1293 */
1294static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
1295{
1296 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1297 switch (iSegReg)
1298 {
1299 case X86_SREG_ES: return &pCtx->es;
1300 case X86_SREG_CS: return &pCtx->cs;
1301 case X86_SREG_SS: return &pCtx->ss;
1302 case X86_SREG_DS: return &pCtx->ds;
1303 case X86_SREG_FS: return &pCtx->fs;
1304 case X86_SREG_GS: return &pCtx->gs;
1305 }
1306 AssertFailedReturn(NULL);
1307}
1308
1309
1310/**
1311 * Fetches the selector value of a segment register.
1312 *
1313 * @returns The selector value.
1314 * @param pIemCpu The per CPU data.
1315 * @param iSegReg The segment register.
1316 */
1317static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
1318{
1319 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1320 switch (iSegReg)
1321 {
1322 case X86_SREG_ES: return pCtx->es;
1323 case X86_SREG_CS: return pCtx->cs;
1324 case X86_SREG_SS: return pCtx->ss;
1325 case X86_SREG_DS: return pCtx->ds;
1326 case X86_SREG_FS: return pCtx->fs;
1327 case X86_SREG_GS: return pCtx->gs;
1328 }
1329 AssertFailedReturn(0xffff);
1330}
1331
1332
1333/**
1334 * Gets a reference (pointer) to the specified general register.
1335 *
1336 * @returns Register reference.
1337 * @param pIemCpu The per CPU data.
1338 * @param iReg The general register.
1339 */
1340static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
1341{
1342 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1343 switch (iReg)
1344 {
1345 case X86_GREG_xAX: return &pCtx->rax;
1346 case X86_GREG_xCX: return &pCtx->rcx;
1347 case X86_GREG_xDX: return &pCtx->rdx;
1348 case X86_GREG_xBX: return &pCtx->rbx;
1349 case X86_GREG_xSP: return &pCtx->rsp;
1350 case X86_GREG_xBP: return &pCtx->rbp;
1351 case X86_GREG_xSI: return &pCtx->rsi;
1352 case X86_GREG_xDI: return &pCtx->rdi;
1353 case X86_GREG_x8: return &pCtx->r8;
1354 case X86_GREG_x9: return &pCtx->r9;
1355 case X86_GREG_x10: return &pCtx->r10;
1356 case X86_GREG_x11: return &pCtx->r11;
1357 case X86_GREG_x12: return &pCtx->r12;
1358 case X86_GREG_x13: return &pCtx->r13;
1359 case X86_GREG_x14: return &pCtx->r14;
1360 case X86_GREG_x15: return &pCtx->r15;
1361 }
1362 AssertFailedReturn(NULL);
1363}
1364
1365
1366/**
1367 * Gets a reference (pointer) to the specified 8-bit general register.
1368 *
1369 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1370 *
1371 * @returns Register reference.
1372 * @param pIemCpu The per CPU data.
1373 * @param iReg The register.
1374 */
1375static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
1376{
1377 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
1378 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
1379
1380 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
1381 if (iReg >= 4)
1382 pu8Reg++;
1383 return pu8Reg;
1384}
1385
1386
1387/**
1388 * Fetches the value of a 8-bit general register.
1389 *
1390 * @returns The register value.
1391 * @param pIemCpu The per CPU data.
1392 * @param iReg The register.
1393 */
1394static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
1395{
1396 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
1397 return *pbSrc;
1398}
1399
1400
1401/**
1402 * Fetches the value of a 16-bit general register.
1403 *
1404 * @returns The register value.
1405 * @param pIemCpu The per CPU data.
1406 * @param iReg The register.
1407 */
1408static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
1409{
1410 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
1411}
1412
1413
1414/**
1415 * Fetches the value of a 32-bit general register.
1416 *
1417 * @returns The register value.
1418 * @param pIemCpu The per CPU data.
1419 * @param iReg The register.
1420 */
1421static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
1422{
1423 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
1424}
1425
1426
1427/**
1428 * Fetches the value of a 64-bit general register.
1429 *
1430 * @returns The register value.
1431 * @param pIemCpu The per CPU data.
1432 * @param iReg The register.
1433 */
1434static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
1435{
1436 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
1437}
1438
1439
1440/**
1441 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
1442 *
1443 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1444 * segment limit.
1445 *
1446 * @param pIemCpu The per CPU data.
1447 * @param offNextInstr The offset of the next instruction.
1448 */
1449static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
1450{
1451 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1452 switch (pIemCpu->enmEffOpSize)
1453 {
1454 case IEMMODE_16BIT:
1455 {
1456 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1457 if ( uNewIp > pCtx->csHid.u32Limit
1458 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1459 return iemRaiseGeneralProtectionFault0(pIemCpu);
1460 pCtx->rip = uNewIp;
1461 break;
1462 }
1463
1464 case IEMMODE_32BIT:
1465 {
1466 Assert(pCtx->rip <= UINT32_MAX);
1467 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1468
1469 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1470 if (uNewEip > pCtx->csHid.u32Limit)
1471 return iemRaiseGeneralProtectionFault0(pIemCpu);
1472 pCtx->rip = uNewEip;
1473 break;
1474 }
1475
1476 case IEMMODE_64BIT:
1477 {
1478 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1479
1480 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1481 if (!IEM_IS_CANONICAL(uNewRip))
1482 return iemRaiseGeneralProtectionFault0(pIemCpu);
1483 pCtx->rip = uNewRip;
1484 break;
1485 }
1486
1487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1488 }
1489
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
1496 *
1497 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1498 * segment limit.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The per CPU data.
1502 * @param offNextInstr The offset of the next instruction.
1503 */
1504static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
1505{
1506 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1507 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
1508
1509 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1510 if ( uNewIp > pCtx->csHid.u32Limit
1511 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1512 return iemRaiseGeneralProtectionFault0(pIemCpu);
1513 /** @todo Test 16-bit jump in 64-bit mode. */
1514 pCtx->rip = uNewIp;
1515
1516 return VINF_SUCCESS;
1517}
1518
1519
1520/**
1521 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
1522 *
1523 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1524 * segment limit.
1525 *
1526 * @returns Strict VBox status code.
1527 * @param pIemCpu The per CPU data.
1528 * @param offNextInstr The offset of the next instruction.
1529 */
1530static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
1531{
1532 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1533 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
1534
1535 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
1536 {
1537 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1538
1539 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1540 if (uNewEip > pCtx->csHid.u32Limit)
1541 return iemRaiseGeneralProtectionFault0(pIemCpu);
1542 pCtx->rip = uNewEip;
1543 }
1544 else
1545 {
1546 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1547
1548 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1549 if (!IEM_IS_CANONICAL(uNewRip))
1550 return iemRaiseGeneralProtectionFault0(pIemCpu);
1551 pCtx->rip = uNewRip;
1552 }
1553 return VINF_SUCCESS;
1554}
1555
1556
1557/**
1558 * Performs a near jump to the specified address.
1559 *
1560 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1561 * segment limit.
1562 *
1563 * @param pIemCpu The per CPU data.
1564 * @param uNewRip The new RIP value.
1565 */
1566static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
1567{
1568 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1569 switch (pIemCpu->enmEffOpSize)
1570 {
1571 case IEMMODE_16BIT:
1572 {
1573 Assert(uNewRip <= UINT16_MAX);
1574 if ( uNewRip > pCtx->csHid.u32Limit
1575 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1576 return iemRaiseGeneralProtectionFault0(pIemCpu);
1577 /** @todo Test 16-bit jump in 64-bit mode. */
1578 pCtx->rip = uNewRip;
1579 break;
1580 }
1581
1582 case IEMMODE_32BIT:
1583 {
1584 Assert(uNewRip <= UINT32_MAX);
1585 Assert(pCtx->rip <= UINT32_MAX);
1586 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1587
1588 if (uNewRip > pCtx->csHid.u32Limit)
1589 return iemRaiseGeneralProtectionFault0(pIemCpu);
1590 pCtx->rip = uNewRip;
1591 break;
1592 }
1593
1594 case IEMMODE_64BIT:
1595 {
1596 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1597
1598 if (!IEM_IS_CANONICAL(uNewRip))
1599 return iemRaiseGeneralProtectionFault0(pIemCpu);
1600 pCtx->rip = uNewRip;
1601 break;
1602 }
1603
1604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1605 }
1606
1607 return VINF_SUCCESS;
1608}
1609
1610
1611/**
1612 * Get the address of the top of the stack.
1613 *
1614 * @param pCtx The CPU context which SP/ESP/RSP should be
1615 * read.
1616 */
1617DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
1618{
1619 if (pCtx->ssHid.Attr.n.u1Long)
1620 return pCtx->rsp;
1621 if (pCtx->ssHid.Attr.n.u1DefBig)
1622 return pCtx->esp;
1623 return pCtx->sp;
1624}
1625
1626
1627/**
1628 * Updates the RIP/EIP/IP to point to the next instruction.
1629 *
1630 * @param pIemCpu The per CPU data.
1631 * @param cbInstr The number of bytes to add.
1632 */
1633static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
1634{
1635 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1636 switch (pIemCpu->enmCpuMode)
1637 {
1638 case IEMMODE_16BIT:
1639 Assert(pCtx->rip <= UINT16_MAX);
1640 pCtx->eip += cbInstr;
1641 pCtx->eip &= UINT32_C(0xffff);
1642 break;
1643
1644 case IEMMODE_32BIT:
1645 pCtx->eip += cbInstr;
1646 Assert(pCtx->rip <= UINT32_MAX);
1647 break;
1648
1649 case IEMMODE_64BIT:
1650 pCtx->rip += cbInstr;
1651 break;
1652 default: AssertFailed();
1653 }
1654}
1655
1656
1657/**
1658 * Updates the RIP/EIP/IP to point to the next instruction.
1659 *
1660 * @param pIemCpu The per CPU data.
1661 */
1662static void iemRegUpdateRip(PIEMCPU pIemCpu)
1663{
1664 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
1665}
1666
1667
1668/**
1669 * Adds to the stack pointer.
1670 *
1671 * @param pCtx The CPU context which SP/ESP/RSP should be
1672 * updated.
1673 * @param cbToAdd The number of bytes to add.
1674 */
1675DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
1676{
1677 if (pCtx->ssHid.Attr.n.u1Long)
1678 pCtx->rsp += cbToAdd;
1679 else if (pCtx->ssHid.Attr.n.u1DefBig)
1680 pCtx->esp += cbToAdd;
1681 else
1682 pCtx->sp += cbToAdd;
1683}
1684
1685
1686/**
1687 * Subtracts from the stack pointer.
1688 *
1689 * @param pCtx The CPU context which SP/ESP/RSP should be
1690 * updated.
1691 * @param cbToSub The number of bytes to subtract.
1692 */
1693DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
1694{
1695 if (pCtx->ssHid.Attr.n.u1Long)
1696 pCtx->rsp -= cbToSub;
1697 else if (pCtx->ssHid.Attr.n.u1DefBig)
1698 pCtx->esp -= cbToSub;
1699 else
1700 pCtx->sp -= cbToSub;
1701}
1702
1703
1704/**
1705 * Adds to the temporary stack pointer.
1706 *
1707 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1708 * @param cbToAdd The number of bytes to add.
1709 * @param pCtx Where to get the current stack mode.
1710 */
1711DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
1712{
1713 if (pCtx->ssHid.Attr.n.u1Long)
1714 pTmpRsp->u += cbToAdd;
1715 else if (pCtx->ssHid.Attr.n.u1DefBig)
1716 pTmpRsp->DWords.dw0 += cbToAdd;
1717 else
1718 pTmpRsp->Words.w0 += cbToAdd;
1719}
1720
1721
1722/**
1723 * Subtracts from the temporary stack pointer.
1724 *
1725 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1726 * @param cbToSub The number of bytes to subtract.
1727 * @param pCtx Where to get the current stack mode.
1728 */
1729DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
1730{
1731 if (pCtx->ssHid.Attr.n.u1Long)
1732 pTmpRsp->u -= cbToSub;
1733 else if (pCtx->ssHid.Attr.n.u1DefBig)
1734 pTmpRsp->DWords.dw0 -= cbToSub;
1735 else
1736 pTmpRsp->Words.w0 -= cbToSub;
1737}
1738
1739
1740/**
1741 * Calculates the effective stack address for a push of the specified size as
1742 * well as the new RSP value (upper bits may be masked).
1743 *
1744 * @returns Effective stack addressf for the push.
1745 * @param pCtx Where to get the current stack mode.
1746 * @param cbItem The size of the stack item to pop.
1747 * @param puNewRsp Where to return the new RSP value.
1748 */
1749DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1750{
1751 RTUINT64U uTmpRsp;
1752 RTGCPTR GCPtrTop;
1753 uTmpRsp.u = pCtx->rsp;
1754
1755 if (pCtx->ssHid.Attr.n.u1Long)
1756 GCPtrTop = uTmpRsp.u -= cbItem;
1757 else if (pCtx->ssHid.Attr.n.u1DefBig)
1758 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
1759 else
1760 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
1761 *puNewRsp = uTmpRsp.u;
1762 return GCPtrTop;
1763}
1764
1765
1766/**
1767 * Gets the current stack pointer and calculates the value after a pop of the
1768 * specified size.
1769 *
1770 * @returns Current stack pointer.
1771 * @param pCtx Where to get the current stack mode.
1772 * @param cbItem The size of the stack item to pop.
1773 * @param puNewRsp Where to return the new RSP value.
1774 */
1775DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1776{
1777 RTUINT64U uTmpRsp;
1778 RTGCPTR GCPtrTop;
1779 uTmpRsp.u = pCtx->rsp;
1780
1781 if (pCtx->ssHid.Attr.n.u1Long)
1782 {
1783 GCPtrTop = uTmpRsp.u;
1784 uTmpRsp.u += cbItem;
1785 }
1786 else if (pCtx->ssHid.Attr.n.u1DefBig)
1787 {
1788 GCPtrTop = uTmpRsp.DWords.dw0;
1789 uTmpRsp.DWords.dw0 += cbItem;
1790 }
1791 else
1792 {
1793 GCPtrTop = uTmpRsp.Words.w0;
1794 uTmpRsp.Words.w0 += cbItem;
1795 }
1796 *puNewRsp = uTmpRsp.u;
1797 return GCPtrTop;
1798}
1799
1800
1801/**
1802 * Calculates the effective stack address for a push of the specified size as
1803 * well as the new temporary RSP value (upper bits may be masked).
1804 *
1805 * @returns Effective stack addressf for the push.
1806 * @param pTmpRsp The temporary stack pointer. This is updated.
1807 * @param cbItem The size of the stack item to pop.
1808 * @param puNewRsp Where to return the new RSP value.
1809 */
1810DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
1811{
1812 RTGCPTR GCPtrTop;
1813
1814 if (pCtx->ssHid.Attr.n.u1Long)
1815 GCPtrTop = pTmpRsp->u -= cbItem;
1816 else if (pCtx->ssHid.Attr.n.u1DefBig)
1817 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
1818 else
1819 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
1820 return GCPtrTop;
1821}
1822
1823
1824/**
1825 * Gets the effective stack address for a pop of the specified size and
1826 * calculates and updates the temporary RSP.
1827 *
1828 * @returns Current stack pointer.
1829 * @param pTmpRsp The temporary stack pointer. This is updated.
1830 * @param pCtx Where to get the current stack mode.
1831 * @param cbItem The size of the stack item to pop.
1832 */
1833DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
1834{
1835 RTGCPTR GCPtrTop;
1836 if (pCtx->ssHid.Attr.n.u1Long)
1837 {
1838 GCPtrTop = pTmpRsp->u;
1839 pTmpRsp->u += cbItem;
1840 }
1841 else if (pCtx->ssHid.Attr.n.u1DefBig)
1842 {
1843 GCPtrTop = pTmpRsp->DWords.dw0;
1844 pTmpRsp->DWords.dw0 += cbItem;
1845 }
1846 else
1847 {
1848 GCPtrTop = pTmpRsp->Words.w0;
1849 pTmpRsp->Words.w0 += cbItem;
1850 }
1851 return GCPtrTop;
1852}
1853
1854
1855/**
1856 * Checks if an AMD CPUID feature bit is set.
1857 *
1858 * @returns true / false.
1859 *
1860 * @param pIemCpu The IEM per CPU data.
1861 * @param fEdx The EDX bit to test, or 0 if ECX.
1862 * @param fEcx The ECX bit to test, or 0 if EDX.
1863 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX.
1864 */
1865static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
1866{
1867 uint32_t uEax, uEbx, uEcx, uEdx;
1868 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
1869 return (fEcx && (uEcx & fEcx))
1870 || (fEdx && (uEdx & fEdx));
1871}
1872
1873/** @} */
1874
1875
1876/** @name Memory access.
1877 *
1878 * @{
1879 */
1880
1881
1882/**
1883 * Checks if the given segment can be written to, raise the appropriate
1884 * exception if not.
1885 *
1886 * @returns VBox strict status code.
1887 *
1888 * @param pIemCpu The IEM per CPU data.
1889 * @param pHid Pointer to the hidden register.
1890 * @param iSegReg The register number.
1891 */
1892static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
1893{
1894 if (!pHid->Attr.n.u1Present)
1895 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
1896
1897 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
1898 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
1899 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
1900 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
1901
1902 /** @todo DPL/RPL/CPL? */
1903
1904 return VINF_SUCCESS;
1905}
1906
1907
1908/**
1909 * Checks if the given segment can be read from, raise the appropriate
1910 * exception if not.
1911 *
1912 * @returns VBox strict status code.
1913 *
1914 * @param pIemCpu The IEM per CPU data.
1915 * @param pHid Pointer to the hidden register.
1916 * @param iSegReg The register number.
1917 */
1918static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
1919{
1920 if (!pHid->Attr.n.u1Present)
1921 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
1922
1923 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
1924 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
1925 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
1926
1927 /** @todo DPL/RPL/CPL? */
1928
1929 return VINF_SUCCESS;
1930}
1931
1932
1933/**
1934 * Applies the segment limit, base and attributes.
1935 *
1936 * This may raise a \#GP or \#SS.
1937 *
1938 * @returns VBox strict status code.
1939 *
1940 * @param pIemCpu The IEM per CPU data.
1941 * @param fAccess The kind of access which is being performed.
1942 * @param iSegReg The index of the segment register to apply.
1943 * This is UINT8_MAX if none (for IDT, GDT, LDT,
1944 * TSS, ++).
1945 * @param pGCPtrMem Pointer to the guest memory address to apply
1946 * segmentation to. Input and output parameter.
1947 */
1948static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
1949 size_t cbMem, PRTGCPTR pGCPtrMem)
1950{
1951 if (iSegReg == UINT8_MAX)
1952 return VINF_SUCCESS;
1953
1954 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
1955 switch (pIemCpu->enmCpuMode)
1956 {
1957 case IEMMODE_16BIT:
1958 case IEMMODE_32BIT:
1959 {
1960 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
1961 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
1962
1963 Assert(pSel->Attr.n.u1Present);
1964 Assert(pSel->Attr.n.u1DescType);
1965 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
1966 {
1967 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
1968 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
1969 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
1970
1971 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1972 {
1973 /** @todo CPL check. */
1974 }
1975
1976 /*
1977 * There are two kinds of data selectors, normal and expand down.
1978 */
1979 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
1980 {
1981 if ( GCPtrFirst32 > pSel->u32Limit
1982 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
1983 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
1984
1985 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
1986 }
1987 else
1988 {
1989 /** @todo implement expand down segments. */
1990 AssertFailed(/** @todo implement this */);
1991 return VERR_NOT_IMPLEMENTED;
1992 }
1993 }
1994 else
1995 {
1996
1997 /*
1998 * Code selector and usually be used to read thru, writing is
1999 * only permitted in real and V8086 mode.
2000 */
2001 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
2002 || ( (fAccess & IEM_ACCESS_TYPE_READ)
2003 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
2004 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
2005 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
2006
2007 if ( GCPtrFirst32 > pSel->u32Limit
2008 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
2009 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
2010
2011 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2012 {
2013 /** @todo CPL check. */
2014 }
2015
2016 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
2017 }
2018 return VINF_SUCCESS;
2019 }
2020
2021 case IEMMODE_64BIT:
2022 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
2023 *pGCPtrMem += pSel->u64Base;
2024 return VINF_SUCCESS;
2025
2026 default:
2027 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
2028 }
2029}
2030
2031
2032/**
2033 * Translates a virtual address to a physical physical address and checks if we
2034 * can access the page as specified.
2035 *
2036 * @param pIemCpu The IEM per CPU data.
2037 * @param GCPtrMem The virtual address.
2038 * @param fAccess The intended access.
2039 * @param pGCPhysMem Where to return the physical address.
2040 */
2041static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
2042 PRTGCPHYS pGCPhysMem)
2043{
2044 /** @todo Need a different PGM interface here. We're currently using
2045 * generic / REM interfaces. this won't cut it for R0 & RC. */
2046 RTGCPHYS GCPhys;
2047 uint64_t fFlags;
2048 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
2049 if (RT_FAILURE(rc))
2050 {
2051 /** @todo Check unassigned memory in unpaged mode. */
2052 *pGCPhysMem = NIL_RTGCPHYS;
2053 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
2054 }
2055
2056 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)
2057 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */
2058 && !(fFlags & X86_PTE_RW)
2059 && ( pIemCpu->uCpl != 0
2060 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) )
2061 || ( !(fFlags & X86_PTE_US) /* Kernel memory */
2062 && pIemCpu->uCpl == 3)
2063 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */
2064 && (fFlags & X86_PTE_PAE_NX)
2065 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
2066 )
2067 )
2068 {
2069 *pGCPhysMem = NIL_RTGCPHYS;
2070 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
2071 }
2072
2073 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
2074 *pGCPhysMem = GCPhys;
2075 return VINF_SUCCESS;
2076}
2077
2078
2079
2080/**
2081 * Maps a physical page.
2082 *
2083 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2084 * @param pIemCpu The IEM per CPU data.
2085 * @param GCPhysMem The physical address.
2086 * @param fAccess The intended access.
2087 * @param ppvMem Where to return the mapping address.
2088 */
2089static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
2090{
2091#if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)
2092 /* Force the alternative path so we can ignore writes. */
2093 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2094 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2095#endif
2096
2097 /*
2098 * If we can map the page without trouble, do a block processing
2099 * until the end of the current page.
2100 */
2101 /** @todo need some better API. */
2102 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
2103 GCPhysMem,
2104 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2105 ppvMem);
2106}
2107
2108
2109/**
2110 * Looks up a memory mapping entry.
2111 *
2112 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
2113 * @param pIemCpu The IEM per CPU data.
2114 * @param pvMem The memory address.
2115 * @param fAccess The access to.
2116 */
2117DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2118{
2119 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
2120 if ( pIemCpu->aMemMappings[0].pv == pvMem
2121 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2122 return 0;
2123 if ( pIemCpu->aMemMappings[1].pv == pvMem
2124 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2125 return 1;
2126 if ( pIemCpu->aMemMappings[2].pv == pvMem
2127 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2128 return 2;
2129 return VERR_NOT_FOUND;
2130}
2131
2132
2133/**
2134 * Finds a free memmap entry when using iNextMapping doesn't work.
2135 *
2136 * @returns Memory mapping index, 1024 on failure.
2137 * @param pIemCpu The IEM per CPU data.
2138 */
2139static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
2140{
2141 /*
2142 * The easy case.
2143 */
2144 if (pIemCpu->cActiveMappings == 0)
2145 {
2146 pIemCpu->iNextMapping = 1;
2147 return 0;
2148 }
2149
2150 /* There should be enough mappings for all instructions. */
2151 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
2152
2153 AssertFailed(); /** @todo implement me. */
2154 return 1024;
2155
2156}
2157
2158
2159/**
2160 * Commits a bounce buffer that needs writing back and unmaps it.
2161 *
2162 * @returns Strict VBox status code.
2163 * @param pIemCpu The IEM per CPU data.
2164 * @param iMemMap The index of the buffer to commit.
2165 */
2166static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
2167{
2168 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
2169 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
2170
2171 /*
2172 * Do the writing.
2173 */
2174 int rc;
2175#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM) /* No memory changes in verification mode. */
2176 if (!pIemCpu->aMemBbMappings[iMemMap].fUnassigned)
2177 {
2178 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2179 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2180 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2181 if (!pIemCpu->fByPassHandlers)
2182 {
2183 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2184 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2185 pbBuf,
2186 cbFirst);
2187 if (cbSecond && rc == VINF_SUCCESS)
2188 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2189 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2190 pbBuf + cbFirst,
2191 cbSecond);
2192 }
2193 else
2194 {
2195 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2196 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2197 pbBuf,
2198 cbFirst);
2199 if (cbSecond && rc == VINF_SUCCESS)
2200 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2201 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2202 pbBuf + cbFirst,
2203 cbSecond);
2204 }
2205 }
2206 else
2207#endif
2208 rc = VINF_SUCCESS;
2209
2210#if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)
2211 /*
2212 * Record the write(s).
2213 */
2214 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2215 if (pEvtRec)
2216 {
2217 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
2218 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
2219 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2220 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
2221 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2222 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2223 }
2224 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
2225 {
2226 pEvtRec = iemVerifyAllocRecord(pIemCpu);
2227 if (pEvtRec)
2228 {
2229 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
2230 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
2231 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2232 memcpy(pEvtRec->u.RamWrite.ab,
2233 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
2234 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
2235 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2236 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2237 }
2238 }
2239#endif
2240
2241 /*
2242 * Free the mapping entry.
2243 */
2244 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2245 Assert(pIemCpu->cActiveMappings != 0);
2246 pIemCpu->cActiveMappings--;
2247 return rc;
2248}
2249
2250
2251/**
2252 * iemMemMap worker that deals with a request crossing pages.
2253 */
2254static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
2255 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
2256{
2257 /*
2258 * Do the address translations.
2259 */
2260 RTGCPHYS GCPhysFirst;
2261 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
2262 if (rcStrict != VINF_SUCCESS)
2263 return rcStrict;
2264
2265 RTGCPHYS GCPhysSecond;
2266 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
2267 if (rcStrict != VINF_SUCCESS)
2268 return rcStrict;
2269 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2270
2271 /*
2272 * Read in the current memory content if it's a read of execute access.
2273 */
2274 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2275 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
2276 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
2277
2278 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2279 {
2280 int rc;
2281 if (!pIemCpu->fByPassHandlers)
2282 {
2283 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
2284 if (rc != VINF_SUCCESS)
2285 return rc;
2286 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
2287 if (rc != VINF_SUCCESS)
2288 return rc;
2289 }
2290 else
2291 {
2292 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
2293 if (rc != VINF_SUCCESS)
2294 return rc;
2295 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
2296 if (rc != VINF_SUCCESS)
2297 return rc;
2298 }
2299
2300#if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)
2301 /*
2302 * Record the reads.
2303 */
2304 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2305 if (pEvtRec)
2306 {
2307 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2308 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
2309 pEvtRec->u.RamRead.cb = cbFirstPage;
2310 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2311 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2312 }
2313 pEvtRec = iemVerifyAllocRecord(pIemCpu);
2314 if (pEvtRec)
2315 {
2316 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2317 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
2318 pEvtRec->u.RamRead.cb = cbSecondPage;
2319 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2320 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2321 }
2322#endif
2323 }
2324#ifdef VBOX_STRICT
2325 else
2326 memset(pbBuf, 0xcc, cbMem);
2327#endif
2328#ifdef VBOX_STRICT
2329 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2330 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2331#endif
2332
2333 /*
2334 * Commit the bounce buffer entry.
2335 */
2336 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2337 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
2338 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
2339 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
2340 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
2341 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2342 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2343 pIemCpu->cActiveMappings++;
2344
2345 *ppvMem = pbBuf;
2346 return VINF_SUCCESS;
2347}
2348
2349
2350/**
2351 * iemMemMap woker that deals with iemMemPageMap failures.
2352 */
2353static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
2354 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
2355{
2356 /*
2357 * Filter out conditions we can handle and the ones which shouldn't happen.
2358 */
2359 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
2360 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
2361 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
2362 {
2363 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
2364 return rcMap;
2365 }
2366 pIemCpu->cPotentialExits++;
2367
2368 /*
2369 * Read in the current memory content if it's a read of execute access.
2370 */
2371 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2372 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2373 {
2374 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
2375 memset(pbBuf, 0xff, cbMem);
2376 else
2377 {
2378 int rc;
2379 if (!pIemCpu->fByPassHandlers)
2380 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
2381 else
2382 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
2383 if (rc != VINF_SUCCESS)
2384 return rc;
2385 }
2386
2387#if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)
2388 /*
2389 * Record the read.
2390 */
2391 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2392 if (pEvtRec)
2393 {
2394 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2395 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
2396 pEvtRec->u.RamRead.cb = cbMem;
2397 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2398 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2399 }
2400#endif
2401 }
2402#ifdef VBOX_STRICT
2403 else
2404 memset(pbBuf, 0xcc, cbMem);
2405#endif
2406#ifdef VBOX_STRICT
2407 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2408 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2409#endif
2410
2411 /*
2412 * Commit the bounce buffer entry.
2413 */
2414 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2415 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
2416 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
2417 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
2418 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
2419 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2420 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2421 pIemCpu->cActiveMappings++;
2422
2423 *ppvMem = pbBuf;
2424 return VINF_SUCCESS;
2425}
2426
2427
2428
2429/**
2430 * Maps the specified guest memory for the given kind of access.
2431 *
2432 * This may be using bounce buffering of the memory if it's crossing a page
2433 * boundary or if there is an access handler installed for any of it. Because
2434 * of lock prefix guarantees, we're in for some extra clutter when this
2435 * happens.
2436 *
2437 * This may raise a \#GP, \#SS, \#PF or \#AC.
2438 *
2439 * @returns VBox strict status code.
2440 *
2441 * @param pIemCpu The IEM per CPU data.
2442 * @param ppvMem Where to return the pointer to the mapped
2443 * memory.
2444 * @param cbMem The number of bytes to map. This is usually 1,
2445 * 2, 4, 6, 8, 12, 16 or 32. When used by string
2446 * operations it can be up to a page.
2447 * @param iSegReg The index of the segment register to use for
2448 * this access. The base and limits are checked.
2449 * Use UINT8_MAX to indicate that no segmentation
2450 * is required (for IDT, GDT and LDT accesses).
2451 * @param GCPtrMem The address of the guest memory.
2452 * @param a_fAccess How the memory is being accessed. The
2453 * IEM_ACCESS_TYPE_XXX bit is used to figure out
2454 * how to map the memory, while the
2455 * IEM_ACCESS_WHAT_XXX bit is used when raising
2456 * exceptions.
2457 */
2458static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
2459{
2460 /*
2461 * Check the input and figure out which mapping entry to use.
2462 */
2463 Assert(cbMem <= 32);
2464 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
2465
2466 unsigned iMemMap = pIemCpu->iNextMapping;
2467 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
2468 {
2469 iMemMap = iemMemMapFindFree(pIemCpu);
2470 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
2471 }
2472
2473 /*
2474 * Map the memory, checking that we can actually access it. If something
2475 * slightly complicated happens, fall back on bounce buffering.
2476 */
2477 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
2478 if (rcStrict != VINF_SUCCESS)
2479 return rcStrict;
2480
2481 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
2482 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
2483
2484 RTGCPHYS GCPhysFirst;
2485 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
2486 if (rcStrict != VINF_SUCCESS)
2487 return rcStrict;
2488
2489 void *pvMem;
2490 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
2491 if (rcStrict != VINF_SUCCESS)
2492 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
2493
2494 /*
2495 * Fill in the mapping table entry.
2496 */
2497 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
2498 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
2499 pIemCpu->iNextMapping = iMemMap + 1;
2500 pIemCpu->cActiveMappings++;
2501
2502 *ppvMem = pvMem;
2503 return VINF_SUCCESS;
2504}
2505
2506
2507/**
2508 * Commits the guest memory if bounce buffered and unmaps it.
2509 *
2510 * @returns Strict VBox status code.
2511 * @param pIemCpu The IEM per CPU data.
2512 * @param pvMem The mapping.
2513 * @param fAccess The kind of access.
2514 */
2515static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2516{
2517 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
2518 AssertReturn(iMemMap >= 0, iMemMap);
2519
2520 /*
2521 * If it's bounce buffered, we need to write back the buffer.
2522 */
2523 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2524 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2525 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
2526
2527 /* Free the entry. */
2528 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2529 Assert(pIemCpu->cActiveMappings != 0);
2530 pIemCpu->cActiveMappings--;
2531 return VINF_SUCCESS;
2532}
2533
2534
2535/**
2536 * Fetches a data byte.
2537 *
2538 * @returns Strict VBox status code.
2539 * @param pIemCpu The IEM per CPU data.
2540 * @param pu8Dst Where to return the byte.
2541 * @param iSegReg The index of the segment register to use for
2542 * this access. The base and limits are checked.
2543 * @param GCPtrMem The address of the guest memory.
2544 */
2545static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2546{
2547 /* The lazy approach for now... */
2548 uint8_t const *pu8Src;
2549 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2550 if (rc == VINF_SUCCESS)
2551 {
2552 *pu8Dst = *pu8Src;
2553 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2554 }
2555 return rc;
2556}
2557
2558
2559/**
2560 * Fetches a data word.
2561 *
2562 * @returns Strict VBox status code.
2563 * @param pIemCpu The IEM per CPU data.
2564 * @param pu16Dst Where to return the word.
2565 * @param iSegReg The index of the segment register to use for
2566 * this access. The base and limits are checked.
2567 * @param GCPtrMem The address of the guest memory.
2568 */
2569static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2570{
2571 /* The lazy approach for now... */
2572 uint16_t const *pu16Src;
2573 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2574 if (rc == VINF_SUCCESS)
2575 {
2576 *pu16Dst = *pu16Src;
2577 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
2578 }
2579 return rc;
2580}
2581
2582
2583/**
2584 * Fetches a data dword.
2585 *
2586 * @returns Strict VBox status code.
2587 * @param pIemCpu The IEM per CPU data.
2588 * @param pu32Dst Where to return the dword.
2589 * @param iSegReg The index of the segment register to use for
2590 * this access. The base and limits are checked.
2591 * @param GCPtrMem The address of the guest memory.
2592 */
2593static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2594{
2595 /* The lazy approach for now... */
2596 uint32_t const *pu32Src;
2597 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2598 if (rc == VINF_SUCCESS)
2599 {
2600 *pu32Dst = *pu32Src;
2601 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
2602 }
2603 return rc;
2604}
2605
2606
2607/**
2608 * Fetches a data dword and sign extends it to a qword.
2609 *
2610 * @returns Strict VBox status code.
2611 * @param pIemCpu The IEM per CPU data.
2612 * @param pu64Dst Where to return the sign extended value.
2613 * @param iSegReg The index of the segment register to use for
2614 * this access. The base and limits are checked.
2615 * @param GCPtrMem The address of the guest memory.
2616 */
2617static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2618{
2619 /* The lazy approach for now... */
2620 int32_t const *pi32Src;
2621 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2622 if (rc == VINF_SUCCESS)
2623 {
2624 *pu64Dst = *pi32Src;
2625 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
2626 }
2627 return rc;
2628}
2629
2630
2631/**
2632 * Fetches a data qword.
2633 *
2634 * @returns Strict VBox status code.
2635 * @param pIemCpu The IEM per CPU data.
2636 * @param pu64Dst Where to return the qword.
2637 * @param iSegReg The index of the segment register to use for
2638 * this access. The base and limits are checked.
2639 * @param GCPtrMem The address of the guest memory.
2640 */
2641static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2642{
2643 /* The lazy approach for now... */
2644 uint64_t const *pu64Src;
2645 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2646 if (rc == VINF_SUCCESS)
2647 {
2648 *pu64Dst = *pu64Src;
2649 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
2650 }
2651 return rc;
2652}
2653
2654
2655/**
2656 * Fetches a descriptor register (lgdt, lidt).
2657 *
2658 * @returns Strict VBox status code.
2659 * @param pIemCpu The IEM per CPU data.
2660 * @param pcbLimit Where to return the limit.
2661 * @param pGCPTrBase Where to return the base.
2662 * @param iSegReg The index of the segment register to use for
2663 * this access. The base and limits are checked.
2664 * @param GCPtrMem The address of the guest memory.
2665 * @param enmOpSize The effective operand size.
2666 */
2667static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
2668 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
2669{
2670 uint8_t const *pu8Src;
2671 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
2672 (void **)&pu8Src,
2673 enmOpSize == IEMMODE_64BIT
2674 ? 2 + 8
2675 : enmOpSize == IEMMODE_32BIT
2676 ? 2 + 4
2677 : 2 + 3,
2678 iSegReg,
2679 GCPtrMem,
2680 IEM_ACCESS_DATA_R);
2681 if (rcStrict == VINF_SUCCESS)
2682 {
2683 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
2684 switch (enmOpSize)
2685 {
2686 case IEMMODE_16BIT:
2687 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
2688 break;
2689 case IEMMODE_32BIT:
2690 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
2691 break;
2692 case IEMMODE_64BIT:
2693 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
2694 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
2695 break;
2696
2697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2698 }
2699 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2700 }
2701 return rcStrict;
2702}
2703
2704
2705
2706/**
2707 * Stores a data byte.
2708 *
2709 * @returns Strict VBox status code.
2710 * @param pIemCpu The IEM per CPU data.
2711 * @param iSegReg The index of the segment register to use for
2712 * this access. The base and limits are checked.
2713 * @param GCPtrMem The address of the guest memory.
2714 * @param u8Value The value to store.
2715 */
2716static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
2717{
2718 /* The lazy approach for now... */
2719 uint8_t *pu8Dst;
2720 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2721 if (rc == VINF_SUCCESS)
2722 {
2723 *pu8Dst = u8Value;
2724 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
2725 }
2726 return rc;
2727}
2728
2729
2730/**
2731 * Stores a data word.
2732 *
2733 * @returns Strict VBox status code.
2734 * @param pIemCpu The IEM per CPU data.
2735 * @param iSegReg The index of the segment register to use for
2736 * this access. The base and limits are checked.
2737 * @param GCPtrMem The address of the guest memory.
2738 * @param u16Value The value to store.
2739 */
2740static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
2741{
2742 /* The lazy approach for now... */
2743 uint16_t *pu16Dst;
2744 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2745 if (rc == VINF_SUCCESS)
2746 {
2747 *pu16Dst = u16Value;
2748 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
2749 }
2750 return rc;
2751}
2752
2753
2754/**
2755 * Stores a data dword.
2756 *
2757 * @returns Strict VBox status code.
2758 * @param pIemCpu The IEM per CPU data.
2759 * @param iSegReg The index of the segment register to use for
2760 * this access. The base and limits are checked.
2761 * @param GCPtrMem The address of the guest memory.
2762 * @param u32Value The value to store.
2763 */
2764static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
2765{
2766 /* The lazy approach for now... */
2767 uint32_t *pu32Dst;
2768 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2769 if (rc == VINF_SUCCESS)
2770 {
2771 *pu32Dst = u32Value;
2772 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
2773 }
2774 return rc;
2775}
2776
2777
2778/**
2779 * Stores a data qword.
2780 *
2781 * @returns Strict VBox status code.
2782 * @param pIemCpu The IEM per CPU data.
2783 * @param iSegReg The index of the segment register to use for
2784 * this access. The base and limits are checked.
2785 * @param GCPtrMem The address of the guest memory.
2786 * @param u64Value The value to store.
2787 */
2788static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
2789{
2790 /* The lazy approach for now... */
2791 uint64_t *pu64Dst;
2792 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2793 if (rc == VINF_SUCCESS)
2794 {
2795 *pu64Dst = u64Value;
2796 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
2797 }
2798 return rc;
2799}
2800
2801
2802/**
2803 * Pushes a word onto the stack.
2804 *
2805 * @returns Strict VBox status code.
2806 * @param pIemCpu The IEM per CPU data.
2807 * @param u16Value The value to push.
2808 */
2809static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
2810{
2811 /* Increment the stack pointer. */
2812 uint64_t uNewRsp;
2813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2814 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
2815
2816 /* Write the word the lazy way. */
2817 uint16_t *pu16Dst;
2818 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2819 if (rc == VINF_SUCCESS)
2820 {
2821 *pu16Dst = u16Value;
2822 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
2823 }
2824
2825 /* Commit the new RSP value unless we an access handler made trouble. */
2826 if (rc == VINF_SUCCESS)
2827 pCtx->rsp = uNewRsp;
2828
2829 return rc;
2830}
2831
2832
2833/**
2834 * Pushes a dword onto the stack.
2835 *
2836 * @returns Strict VBox status code.
2837 * @param pIemCpu The IEM per CPU data.
2838 * @param u32Value The value to push.
2839 */
2840static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
2841{
2842 /* Increment the stack pointer. */
2843 uint64_t uNewRsp;
2844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2845 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
2846
2847 /* Write the word the lazy way. */
2848 uint32_t *pu32Dst;
2849 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2850 if (rc == VINF_SUCCESS)
2851 {
2852 *pu32Dst = u32Value;
2853 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
2854 }
2855
2856 /* Commit the new RSP value unless we an access handler made trouble. */
2857 if (rc == VINF_SUCCESS)
2858 pCtx->rsp = uNewRsp;
2859
2860 return rc;
2861}
2862
2863
2864/**
2865 * Pushes a qword onto the stack.
2866 *
2867 * @returns Strict VBox status code.
2868 * @param pIemCpu The IEM per CPU data.
2869 * @param u64Value The value to push.
2870 */
2871static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
2872{
2873 /* Increment the stack pointer. */
2874 uint64_t uNewRsp;
2875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2876 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
2877
2878 /* Write the word the lazy way. */
2879 uint64_t *pu64Dst;
2880 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2881 if (rc == VINF_SUCCESS)
2882 {
2883 *pu64Dst = u64Value;
2884 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
2885 }
2886
2887 /* Commit the new RSP value unless we an access handler made trouble. */
2888 if (rc == VINF_SUCCESS)
2889 pCtx->rsp = uNewRsp;
2890
2891 return rc;
2892}
2893
2894
2895/**
2896 * Pops a word from the stack.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pIemCpu The IEM per CPU data.
2900 * @param pu16Value Where to store the popped value.
2901 */
2902static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
2903{
2904 /* Increment the stack pointer. */
2905 uint64_t uNewRsp;
2906 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2907 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
2908
2909 /* Write the word the lazy way. */
2910 uint16_t const *pu16Src;
2911 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
2912 if (rc == VINF_SUCCESS)
2913 {
2914 *pu16Value = *pu16Src;
2915 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
2916
2917 /* Commit the new RSP value. */
2918 if (rc == VINF_SUCCESS)
2919 pCtx->rsp = uNewRsp;
2920 }
2921
2922 return rc;
2923}
2924
2925
2926/**
2927 * Pops a dword from the stack.
2928 *
2929 * @returns Strict VBox status code.
2930 * @param pIemCpu The IEM per CPU data.
2931 * @param pu32Value Where to store the popped value.
2932 */
2933static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
2934{
2935 /* Increment the stack pointer. */
2936 uint64_t uNewRsp;
2937 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2938 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
2939
2940 /* Write the word the lazy way. */
2941 uint32_t const *pu32Src;
2942 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
2943 if (rc == VINF_SUCCESS)
2944 {
2945 *pu32Value = *pu32Src;
2946 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
2947
2948 /* Commit the new RSP value. */
2949 if (rc == VINF_SUCCESS)
2950 pCtx->rsp = uNewRsp;
2951 }
2952
2953 return rc;
2954}
2955
2956
2957/**
2958 * Pops a qword from the stack.
2959 *
2960 * @returns Strict VBox status code.
2961 * @param pIemCpu The IEM per CPU data.
2962 * @param pu64Value Where to store the popped value.
2963 */
2964static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
2965{
2966 /* Increment the stack pointer. */
2967 uint64_t uNewRsp;
2968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2969 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
2970
2971 /* Write the word the lazy way. */
2972 uint64_t const *pu64Src;
2973 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
2974 if (rc == VINF_SUCCESS)
2975 {
2976 *pu64Value = *pu64Src;
2977 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
2978
2979 /* Commit the new RSP value. */
2980 if (rc == VINF_SUCCESS)
2981 pCtx->rsp = uNewRsp;
2982 }
2983
2984 return rc;
2985}
2986
2987
2988/**
2989 * Pushes a word onto the stack, using a temporary stack pointer.
2990 *
2991 * @returns Strict VBox status code.
2992 * @param pIemCpu The IEM per CPU data.
2993 * @param u16Value The value to push.
2994 * @param pTmpRsp Pointer to the temporary stack pointer.
2995 */
2996static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
2997{
2998 /* Increment the stack pointer. */
2999 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3000 RTUINT64U NewRsp = *pTmpRsp;
3001 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
3002
3003 /* Write the word the lazy way. */
3004 uint16_t *pu16Dst;
3005 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3006 if (rc == VINF_SUCCESS)
3007 {
3008 *pu16Dst = u16Value;
3009 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
3010 }
3011
3012 /* Commit the new RSP value unless we an access handler made trouble. */
3013 if (rc == VINF_SUCCESS)
3014 *pTmpRsp = NewRsp;
3015
3016 return rc;
3017}
3018
3019
3020/**
3021 * Pushes a dword onto the stack, using a temporary stack pointer.
3022 *
3023 * @returns Strict VBox status code.
3024 * @param pIemCpu The IEM per CPU data.
3025 * @param u32Value The value to push.
3026 * @param pTmpRsp Pointer to the temporary stack pointer.
3027 */
3028static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
3029{
3030 /* Increment the stack pointer. */
3031 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3032 RTUINT64U NewRsp = *pTmpRsp;
3033 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
3034
3035 /* Write the word the lazy way. */
3036 uint32_t *pu32Dst;
3037 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3038 if (rc == VINF_SUCCESS)
3039 {
3040 *pu32Dst = u32Value;
3041 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
3042 }
3043
3044 /* Commit the new RSP value unless we an access handler made trouble. */
3045 if (rc == VINF_SUCCESS)
3046 *pTmpRsp = NewRsp;
3047
3048 return rc;
3049}
3050
3051
3052/**
3053 * Pushes a dword onto the stack, using a temporary stack pointer.
3054 *
3055 * @returns Strict VBox status code.
3056 * @param pIemCpu The IEM per CPU data.
3057 * @param u64Value The value to push.
3058 * @param pTmpRsp Pointer to the temporary stack pointer.
3059 */
3060static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
3061{
3062 /* Increment the stack pointer. */
3063 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3064 RTUINT64U NewRsp = *pTmpRsp;
3065 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
3066
3067 /* Write the word the lazy way. */
3068 uint64_t *pu64Dst;
3069 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3070 if (rc == VINF_SUCCESS)
3071 {
3072 *pu64Dst = u64Value;
3073 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
3074 }
3075
3076 /* Commit the new RSP value unless we an access handler made trouble. */
3077 if (rc == VINF_SUCCESS)
3078 *pTmpRsp = NewRsp;
3079
3080 return rc;
3081}
3082
3083
3084/**
3085 * Pops a word from the stack, using a temporary stack pointer.
3086 *
3087 * @returns Strict VBox status code.
3088 * @param pIemCpu The IEM per CPU data.
3089 * @param pu16Value Where to store the popped value.
3090 * @param pTmpRsp Pointer to the temporary stack pointer.
3091 */
3092static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
3093{
3094 /* Increment the stack pointer. */
3095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3096 RTUINT64U NewRsp = *pTmpRsp;
3097 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
3098
3099 /* Write the word the lazy way. */
3100 uint16_t const *pu16Src;
3101 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3102 if (rc == VINF_SUCCESS)
3103 {
3104 *pu16Value = *pu16Src;
3105 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3106
3107 /* Commit the new RSP value. */
3108 if (rc == VINF_SUCCESS)
3109 *pTmpRsp = NewRsp;
3110 }
3111
3112 return rc;
3113}
3114
3115
3116/**
3117 * Pops a dword from the stack, using a temporary stack pointer.
3118 *
3119 * @returns Strict VBox status code.
3120 * @param pIemCpu The IEM per CPU data.
3121 * @param pu32Value Where to store the popped value.
3122 * @param pTmpRsp Pointer to the temporary stack pointer.
3123 */
3124static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
3125{
3126 /* Increment the stack pointer. */
3127 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3128 RTUINT64U NewRsp = *pTmpRsp;
3129 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
3130
3131 /* Write the word the lazy way. */
3132 uint32_t const *pu32Src;
3133 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3134 if (rc == VINF_SUCCESS)
3135 {
3136 *pu32Value = *pu32Src;
3137 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
3138
3139 /* Commit the new RSP value. */
3140 if (rc == VINF_SUCCESS)
3141 *pTmpRsp = NewRsp;
3142 }
3143
3144 return rc;
3145}
3146
3147
3148/**
3149 * Pops a qword from the stack, using a temporary stack pointer.
3150 *
3151 * @returns Strict VBox status code.
3152 * @param pIemCpu The IEM per CPU data.
3153 * @param pu64Value Where to store the popped value.
3154 * @param pTmpRsp Pointer to the temporary stack pointer.
3155 */
3156static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
3157{
3158 /* Increment the stack pointer. */
3159 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3160 RTUINT64U NewRsp = *pTmpRsp;
3161 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
3162
3163 /* Write the word the lazy way. */
3164 uint64_t const *pu64Src;
3165 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3166 if (rcStrict == VINF_SUCCESS)
3167 {
3168 *pu64Value = *pu64Src;
3169 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
3170
3171 /* Commit the new RSP value. */
3172 if (rcStrict == VINF_SUCCESS)
3173 *pTmpRsp = NewRsp;
3174 }
3175
3176 return rcStrict;
3177}
3178
3179
3180/**
3181 * Begin a special stack push (used by interrupt, exceptions and such).
3182 *
3183 * This will raise #SS or #PF if appropriate.
3184 *
3185 * @returns Strict VBox status code.
3186 * @param pIemCpu The IEM per CPU data.
3187 * @param cbMem The number of bytes to push onto the stack.
3188 * @param ppvMem Where to return the pointer to the stack memory.
3189 * As with the other memory functions this could be
3190 * direct access or bounce buffered access, so
3191 * don't commit register until the commit call
3192 * succeeds.
3193 * @param puNewRsp Where to return the new RSP value. This must be
3194 * passed unchanged to
3195 * iemMemStackPushCommitSpecial().
3196 */
3197static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
3198{
3199 Assert(cbMem < UINT8_MAX);
3200 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3201 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
3202 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3203}
3204
3205
3206/**
3207 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
3208 *
3209 * This will update the rSP.
3210 *
3211 * @returns Strict VBox status code.
3212 * @param pIemCpu The IEM per CPU data.
3213 * @param pvMem The pointer returned by
3214 * iemMemStackPushBeginSpecial().
3215 * @param uNewRsp The new RSP value returned by
3216 * iemMemStackPushBeginSpecial().
3217 */
3218static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
3219{
3220 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
3221 if (rcStrict == VINF_SUCCESS)
3222 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3223 return rcStrict;
3224}
3225
3226
3227/**
3228 * Begin a special stack pop (used by iret, retf and such).
3229 *
3230 * This will raise #SS or #PF if appropriate.
3231 *
3232 * @returns Strict VBox status code.
3233 * @param pIemCpu The IEM per CPU data.
3234 * @param cbMem The number of bytes to push onto the stack.
3235 * @param ppvMem Where to return the pointer to the stack memory.
3236 * @param puNewRsp Where to return the new RSP value. This must be
3237 * passed unchanged to
3238 * iemMemStackPopCommitSpecial().
3239 */
3240static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
3241{
3242 Assert(cbMem < UINT8_MAX);
3243 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3244 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
3245 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3246}
3247
3248
3249/**
3250 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
3251 *
3252 * This will update the rSP.
3253 *
3254 * @returns Strict VBox status code.
3255 * @param pIemCpu The IEM per CPU data.
3256 * @param pvMem The pointer returned by
3257 * iemMemStackPopBeginSpecial().
3258 * @param uNewRsp The new RSP value returned by
3259 * iemMemStackPopBeginSpecial().
3260 */
3261static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
3262{
3263 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
3264 if (rcStrict == VINF_SUCCESS)
3265 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3266 return rcStrict;
3267}
3268
3269
3270/**
3271 * Fetches a descriptor table entry.
3272 *
3273 * @returns Strict VBox status code.
3274 * @param pIemCpu The IEM per CPU.
3275 * @param pDesc Where to return the descriptor table entry.
3276 * @param uSel The selector which table entry to fetch.
3277 */
3278static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
3279{
3280 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3281
3282 /** @todo did the 286 require all 8 bytes to be accessible? */
3283 /*
3284 * Get the selector table base and check bounds.
3285 */
3286 RTGCPTR GCPtrBase;
3287 if (uSel & X86_SEL_LDT)
3288 {
3289 if ( !pCtx->ldtrHid.Attr.n.u1Present
3290 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
3291 {
3292 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
3293 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
3294 /** @todo is this the right exception? */
3295 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3296 }
3297
3298 Assert(pCtx->ldtrHid.Attr.n.u1Present);
3299 GCPtrBase = pCtx->ldtrHid.u64Base;
3300 }
3301 else
3302 {
3303 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
3304 {
3305 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
3306 /** @todo is this the right exception? */
3307 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3308 }
3309 GCPtrBase = pCtx->gdtr.pGdt;
3310 }
3311
3312 /*
3313 * Read the legacy descriptor and maybe the long mode extensions if
3314 * required.
3315 */
3316 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3317 if (rcStrict == VINF_SUCCESS)
3318 {
3319 if ( !IEM_IS_LONG_MODE(pIemCpu)
3320 || pDesc->Legacy.Gen.u1DescType)
3321 pDesc->Long.au64[1] = 0;
3322 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
3323 rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3324 else
3325 {
3326 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
3327 /** @todo is this the right exception? */
3328 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3329 }
3330 }
3331 return rcStrict;
3332}
3333
3334
3335/**
3336 * Marks the selector descriptor as accessed (only non-system descriptors).
3337 *
3338 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
3339 * will therefore skip the limit checks.
3340 *
3341 * @returns Strict VBox status code.
3342 * @param pIemCpu The IEM per CPU.
3343 * @param uSel The selector.
3344 */
3345static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
3346{
3347 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3348
3349 /*
3350 * Get the selector table base and check bounds.
3351 */
3352 RTGCPTR GCPtr = uSel & X86_SEL_LDT
3353 ? pCtx->ldtrHid.u64Base
3354 : pCtx->gdtr.pGdt;
3355 GCPtr += uSel & X86_SEL_MASK;
3356 GCPtr += 2 + 2;
3357 uint32_t volatile *pu32; /** @todo Does the CPU do a 32-bit or 8-bit access here? */
3358 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
3359 if (rcStrict == VINF_SUCCESS)
3360 {
3361 ASMAtomicBitSet(pu32, 0); /* X86_SEL_TYPE_ACCESSED is 1 */
3362
3363 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW);
3364 }
3365
3366 return rcStrict;
3367}
3368
3369/** @} */
3370
3371
3372/** @name Misc Helpers
3373 * @{
3374 */
3375
3376/**
3377 * Checks if we are allowed to access the given I/O port, raising the
3378 * appropriate exceptions if we aren't (or if the I/O bitmap is not
3379 * accessible).
3380 *
3381 * @returns Strict VBox status code.
3382 *
3383 * @param pIemCpu The IEM per CPU data.
3384 * @param pCtx The register context.
3385 * @param u16Port The port number.
3386 * @param cbOperand The operand size.
3387 */
3388DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
3389{
3390 if ( (pCtx->cr0 & X86_CR0_PE)
3391 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3392 || pCtx->eflags.Bits.u1VM) )
3393 {
3394 /** @todo I/O port permission bitmap check */
3395 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
3396 }
3397 return VINF_SUCCESS;
3398}
3399
3400/** @} */
3401
3402
3403/** @name C Implementations
3404 * @{
3405 */
3406
3407/**
3408 * Implements a 16-bit popa.
3409 */
3410IEM_CIMPL_DEF_0(iemCImpl_popa_16)
3411{
3412 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3413 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
3414 RTGCPTR GCPtrLast = GCPtrStart + 15;
3415 VBOXSTRICTRC rcStrict;
3416
3417 /*
3418 * The docs are a bit hard to comprehend here, but it looks like we wrap
3419 * around in real mode as long as none of the individual "popa" crosses the
3420 * end of the stack segment. In protected mode we check the whole access
3421 * in one go. For efficiency, only do the word-by-word thing if we're in
3422 * danger of wrapping around.
3423 */
3424 /** @todo do popa boundary / wrap-around checks. */
3425 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
3426 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
3427 {
3428 /* word-by-word */
3429 RTUINT64U TmpRsp;
3430 TmpRsp.u = pCtx->rsp;
3431 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
3432 if (rcStrict == VINF_SUCCESS)
3433 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
3434 if (rcStrict == VINF_SUCCESS)
3435 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
3436 if (rcStrict == VINF_SUCCESS)
3437 {
3438 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
3439 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
3440 }
3441 if (rcStrict == VINF_SUCCESS)
3442 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
3443 if (rcStrict == VINF_SUCCESS)
3444 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
3445 if (rcStrict == VINF_SUCCESS)
3446 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
3447 if (rcStrict == VINF_SUCCESS)
3448 {
3449 pCtx->rsp = TmpRsp.u;
3450 iemRegAddToRip(pIemCpu, cbInstr);
3451 }
3452 }
3453 else
3454 {
3455 uint16_t const *pa16Mem = NULL;
3456 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
3457 if (rcStrict == VINF_SUCCESS)
3458 {
3459 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
3460 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
3461 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
3462 /* skip sp */
3463 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
3464 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
3465 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
3466 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
3467 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
3468 if (rcStrict == VINF_SUCCESS)
3469 {
3470 iemRegAddToRsp(pCtx, 16);
3471 iemRegAddToRip(pIemCpu, cbInstr);
3472 }
3473 }
3474 }
3475 return rcStrict;
3476}
3477
3478
3479/**
3480 * Implements a 32-bit popa.
3481 */
3482IEM_CIMPL_DEF_0(iemCImpl_popa_32)
3483{
3484 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3485 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
3486 RTGCPTR GCPtrLast = GCPtrStart + 31;
3487 VBOXSTRICTRC rcStrict;
3488
3489 /*
3490 * The docs are a bit hard to comprehend here, but it looks like we wrap
3491 * around in real mode as long as none of the individual "popa" crosses the
3492 * end of the stack segment. In protected mode we check the whole access
3493 * in one go. For efficiency, only do the word-by-word thing if we're in
3494 * danger of wrapping around.
3495 */
3496 /** @todo do popa boundary / wrap-around checks. */
3497 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
3498 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
3499 {
3500 /* word-by-word */
3501 RTUINT64U TmpRsp;
3502 TmpRsp.u = pCtx->rsp;
3503 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
3504 if (rcStrict == VINF_SUCCESS)
3505 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
3506 if (rcStrict == VINF_SUCCESS)
3507 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
3508 if (rcStrict == VINF_SUCCESS)
3509 {
3510 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
3511 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
3512 }
3513 if (rcStrict == VINF_SUCCESS)
3514 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
3515 if (rcStrict == VINF_SUCCESS)
3516 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
3517 if (rcStrict == VINF_SUCCESS)
3518 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
3519 if (rcStrict == VINF_SUCCESS)
3520 {
3521#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
3522 pCtx->rdi &= UINT32_MAX;
3523 pCtx->rsi &= UINT32_MAX;
3524 pCtx->rbp &= UINT32_MAX;
3525 pCtx->rbx &= UINT32_MAX;
3526 pCtx->rdx &= UINT32_MAX;
3527 pCtx->rcx &= UINT32_MAX;
3528 pCtx->rax &= UINT32_MAX;
3529#endif
3530 pCtx->rsp = TmpRsp.u;
3531 iemRegAddToRip(pIemCpu, cbInstr);
3532 }
3533 }
3534 else
3535 {
3536 uint32_t const *pa32Mem;
3537 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
3538 if (rcStrict == VINF_SUCCESS)
3539 {
3540 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
3541 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
3542 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
3543 /* skip esp */
3544 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
3545 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
3546 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
3547 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
3548 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
3549 if (rcStrict == VINF_SUCCESS)
3550 {
3551 iemRegAddToRsp(pCtx, 32);
3552 iemRegAddToRip(pIemCpu, cbInstr);
3553 }
3554 }
3555 }
3556 return rcStrict;
3557}
3558
3559
3560/**
3561 * Implements a 16-bit pusha.
3562 */
3563IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
3564{
3565 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3566 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
3567 RTGCPTR GCPtrBottom = GCPtrTop - 15;
3568 VBOXSTRICTRC rcStrict;
3569
3570 /*
3571 * The docs are a bit hard to comprehend here, but it looks like we wrap
3572 * around in real mode as long as none of the individual "pushd" crosses the
3573 * end of the stack segment. In protected mode we check the whole access
3574 * in one go. For efficiency, only do the word-by-word thing if we're in
3575 * danger of wrapping around.
3576 */
3577 /** @todo do pusha boundary / wrap-around checks. */
3578 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
3579 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
3580 {
3581 /* word-by-word */
3582 RTUINT64U TmpRsp;
3583 TmpRsp.u = pCtx->rsp;
3584 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
3585 if (rcStrict == VINF_SUCCESS)
3586 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
3587 if (rcStrict == VINF_SUCCESS)
3588 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
3589 if (rcStrict == VINF_SUCCESS)
3590 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
3591 if (rcStrict == VINF_SUCCESS)
3592 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
3593 if (rcStrict == VINF_SUCCESS)
3594 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
3595 if (rcStrict == VINF_SUCCESS)
3596 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
3597 if (rcStrict == VINF_SUCCESS)
3598 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
3599 if (rcStrict == VINF_SUCCESS)
3600 {
3601 pCtx->rsp = TmpRsp.u;
3602 iemRegAddToRip(pIemCpu, cbInstr);
3603 }
3604 }
3605 else
3606 {
3607 GCPtrBottom--;
3608 uint16_t *pa16Mem = NULL;
3609 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
3610 if (rcStrict == VINF_SUCCESS)
3611 {
3612 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
3613 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
3614 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
3615 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
3616 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
3617 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
3618 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
3619 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
3620 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
3621 if (rcStrict == VINF_SUCCESS)
3622 {
3623 iemRegSubFromRsp(pCtx, 16);
3624 iemRegAddToRip(pIemCpu, cbInstr);
3625 }
3626 }
3627 }
3628 return rcStrict;
3629}
3630
3631
3632/**
3633 * Implements a 32-bit pusha.
3634 */
3635IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
3636{
3637 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3638 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
3639 RTGCPTR GCPtrBottom = GCPtrTop - 31;
3640 VBOXSTRICTRC rcStrict;
3641
3642 /*
3643 * The docs are a bit hard to comprehend here, but it looks like we wrap
3644 * around in real mode as long as none of the individual "pusha" crosses the
3645 * end of the stack segment. In protected mode we check the whole access
3646 * in one go. For efficiency, only do the word-by-word thing if we're in
3647 * danger of wrapping around.
3648 */
3649 /** @todo do pusha boundary / wrap-around checks. */
3650 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
3651 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
3652 {
3653 /* word-by-word */
3654 RTUINT64U TmpRsp;
3655 TmpRsp.u = pCtx->rsp;
3656 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
3657 if (rcStrict == VINF_SUCCESS)
3658 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
3659 if (rcStrict == VINF_SUCCESS)
3660 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
3661 if (rcStrict == VINF_SUCCESS)
3662 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
3663 if (rcStrict == VINF_SUCCESS)
3664 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
3665 if (rcStrict == VINF_SUCCESS)
3666 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
3667 if (rcStrict == VINF_SUCCESS)
3668 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
3669 if (rcStrict == VINF_SUCCESS)
3670 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
3671 if (rcStrict == VINF_SUCCESS)
3672 {
3673 pCtx->rsp = TmpRsp.u;
3674 iemRegAddToRip(pIemCpu, cbInstr);
3675 }
3676 }
3677 else
3678 {
3679 GCPtrBottom--;
3680 uint32_t *pa32Mem;
3681 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
3682 if (rcStrict == VINF_SUCCESS)
3683 {
3684 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
3685 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
3686 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
3687 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
3688 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
3689 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
3690 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
3691 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
3692 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
3693 if (rcStrict == VINF_SUCCESS)
3694 {
3695 iemRegSubFromRsp(pCtx, 32);
3696 iemRegAddToRip(pIemCpu, cbInstr);
3697 }
3698 }
3699 }
3700 return rcStrict;
3701}
3702
3703
3704/**
3705 * Implements pushf.
3706 *
3707 *
3708 * @param enmEffOpSize The effective operand size.
3709 */
3710IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
3711{
3712 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3713
3714 /*
3715 * If we're in V8086 mode some care is required (which is why we're in
3716 * doing this in a C implementation).
3717 */
3718 uint32_t fEfl = pCtx->eflags.u;
3719 if ( (fEfl & X86_EFL_VM)
3720 && X86_EFL_GET_IOPL(fEfl) != 3 )
3721 {
3722 Assert(pCtx->cr0 & X86_CR0_PE);
3723 if ( enmEffOpSize != IEMMODE_16BIT
3724 || !(pCtx->cr4 & X86_CR4_VME))
3725 return iemRaiseGeneralProtectionFault0(pIemCpu);
3726 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
3727 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
3728 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
3729 }
3730
3731 /*
3732 * Ok, clear RF and VM and push the flags.
3733 */
3734 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
3735
3736 VBOXSTRICTRC rcStrict;
3737 switch (enmEffOpSize)
3738 {
3739 case IEMMODE_16BIT:
3740 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
3741 break;
3742 case IEMMODE_32BIT:
3743 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
3744 break;
3745 case IEMMODE_64BIT:
3746 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
3747 break;
3748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3749 }
3750 if (rcStrict != VINF_SUCCESS)
3751 return rcStrict;
3752
3753 iemRegAddToRip(pIemCpu, cbInstr);
3754 return VINF_SUCCESS;
3755}
3756
3757
3758/**
3759 * Implements popf.
3760 *
3761 * @param enmEffOpSize The effective operand size.
3762 */
3763IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
3764{
3765 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3766 uint32_t const fEflOld = pCtx->eflags.u;
3767 VBOXSTRICTRC rcStrict;
3768 uint32_t fEflNew;
3769
3770 /*
3771 * V8086 is special as usual.
3772 */
3773 if (fEflOld & X86_EFL_VM)
3774 {
3775 /*
3776 * Almost anything goes if IOPL is 3.
3777 */
3778 if (X86_EFL_GET_IOPL(fEflOld) == 3)
3779 {
3780 switch (enmEffOpSize)
3781 {
3782 case IEMMODE_16BIT:
3783 {
3784 uint16_t u16Value;
3785 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
3786 if (rcStrict != VINF_SUCCESS)
3787 return rcStrict;
3788 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
3789 break;
3790 }
3791 case IEMMODE_32BIT:
3792 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
3793 if (rcStrict != VINF_SUCCESS)
3794 return rcStrict;
3795 break;
3796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3797 }
3798
3799 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
3800 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
3801 }
3802 /*
3803 * Interrupt flag virtualization with CR4.VME=1.
3804 */
3805 else if ( enmEffOpSize == IEMMODE_16BIT
3806 && (pCtx->cr4 & X86_CR4_VME) )
3807 {
3808 uint16_t u16Value;
3809 RTUINT64U TmpRsp;
3810 TmpRsp.u = pCtx->rsp;
3811 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
3812 if (rcStrict != VINF_SUCCESS)
3813 return rcStrict;
3814
3815 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
3816 * or before? */
3817 if ( ( (u16Value & X86_EFL_IF)
3818 && (fEflOld & X86_EFL_VIP))
3819 || (u16Value & X86_EFL_TF) )
3820 return iemRaiseGeneralProtectionFault0(pIemCpu);
3821
3822 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
3823 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
3824 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
3825 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
3826
3827 pCtx->rsp = TmpRsp.u;
3828 }
3829 else
3830 return iemRaiseGeneralProtectionFault0(pIemCpu);
3831
3832 }
3833 /*
3834 * Not in V8086 mode.
3835 */
3836 else
3837 {
3838 /* Pop the flags. */
3839 switch (enmEffOpSize)
3840 {
3841 case IEMMODE_16BIT:
3842 {
3843 uint16_t u16Value;
3844 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
3845 if (rcStrict != VINF_SUCCESS)
3846 return rcStrict;
3847 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
3848 break;
3849 }
3850 case IEMMODE_32BIT:
3851 case IEMMODE_64BIT:
3852 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
3853 if (rcStrict != VINF_SUCCESS)
3854 return rcStrict;
3855 break;
3856 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3857 }
3858
3859 /* Merge them with the current flags. */
3860 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
3861 || pIemCpu->uCpl == 0)
3862 {
3863 fEflNew &= X86_EFL_POPF_BITS;
3864 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
3865 }
3866 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
3867 {
3868 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
3869 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
3870 }
3871 else
3872 {
3873 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
3874 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
3875 }
3876 }
3877
3878 /*
3879 * Commit the flags.
3880 */
3881 Assert(fEflNew & RT_BIT_32(1));
3882 pCtx->eflags.u = fEflNew;
3883 iemRegAddToRip(pIemCpu, cbInstr);
3884
3885 return VINF_SUCCESS;
3886}
3887
3888
3889/**
3890 * Implements a 16-bit relative call.
3891 *
3892 *
3893 * @param offDisp The displacment offset.
3894 */
3895IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
3896{
3897 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3898 uint16_t OldPC = pCtx->ip + cbInstr;
3899 uint16_t NewPC = OldPC + offDisp;
3900 if (NewPC > pCtx->csHid.u32Limit)
3901 return iemRaiseGeneralProtectionFault0(pIemCpu);
3902
3903 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, OldPC);
3904 if (rcStrict != VINF_SUCCESS)
3905 return rcStrict;
3906
3907 pCtx->rip = NewPC;
3908 return VINF_SUCCESS;
3909}
3910
3911
3912/**
3913 * Implements a 32-bit relative call.
3914 *
3915 *
3916 * @param offDisp The displacment offset.
3917 */
3918IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
3919{
3920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3921 uint32_t OldPC = pCtx->eip + cbInstr;
3922 uint32_t NewPC = OldPC + offDisp;
3923 if (NewPC > pCtx->csHid.u32Limit)
3924 return iemRaiseGeneralProtectionFault0(pIemCpu);
3925
3926 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, OldPC);
3927 if (rcStrict != VINF_SUCCESS)
3928 return rcStrict;
3929
3930 pCtx->rip = NewPC;
3931 return VINF_SUCCESS;
3932}
3933
3934
3935/**
3936 * Implements a 64-bit relative call.
3937 *
3938 *
3939 * @param offDisp The displacment offset.
3940 */
3941IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
3942{
3943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3944 uint64_t OldPC = pCtx->rip + cbInstr;
3945 uint64_t NewPC = OldPC + offDisp;
3946 if (!IEM_IS_CANONICAL(NewPC))
3947 return iemRaiseNotCanonical(pIemCpu);
3948
3949 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, OldPC);
3950 if (rcStrict != VINF_SUCCESS)
3951 return rcStrict;
3952
3953 pCtx->rip = NewPC;
3954 return VINF_SUCCESS;
3955}
3956
3957
3958/**
3959 * Implements far jumps.
3960 *
3961 * @param uSel The selector.
3962 * @param offSeg The segment offset.
3963 */
3964IEM_CIMPL_DEF_2(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg)
3965{
3966 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3967
3968 /*
3969 * Real mode and V8086 mode are easy. The only snag seems to be that
3970 * CS.limit doesn't change and the limit check is done against the current
3971 * limit.
3972 */
3973 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3974 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3975 {
3976 if (offSeg > pCtx->csHid.u32Limit)
3977 return iemRaiseGeneralProtectionFault0(pIemCpu);
3978
3979 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
3980 pCtx->rip = offSeg;
3981 else
3982 pCtx->rip = offSeg & UINT16_MAX;
3983 pCtx->cs = uSel;
3984 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
3985 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
3986 * PE. Check with VT-x and AMD-V. */
3987#ifdef IEM_VERIFICATION_MODE
3988 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
3989#endif
3990 return VINF_SUCCESS;
3991 }
3992
3993 /*
3994 * Protected mode. Need to parse the specified descriptor...
3995 */
3996 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
3997 {
3998 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
3999 return iemRaiseGeneralProtectionFault0(pIemCpu);
4000 }
4001
4002 /* Fetch the descriptor. */
4003 IEMSELDESC Desc;
4004 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
4005 if (rcStrict != VINF_SUCCESS)
4006 return rcStrict;
4007
4008 /* Is it there? */
4009 if (!Desc.Legacy.Gen.u1Present)
4010 {
4011 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
4012 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4013 }
4014
4015 /*
4016 * Deal with it according to its type.
4017 */
4018 if (Desc.Legacy.Gen.u1DescType)
4019 {
4020 /* Only code segments. */
4021 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4022 {
4023 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
4024 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4025 }
4026
4027 /* L vs D. */
4028 if ( Desc.Legacy.Gen.u1Long
4029 && Desc.Legacy.Gen.u1DefBig
4030 && IEM_IS_LONG_MODE(pIemCpu))
4031 {
4032 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
4033 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4034 }
4035
4036 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
4037 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
4038 {
4039 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
4040 {
4041 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
4042 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
4043 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4044 }
4045 }
4046 else
4047 {
4048 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
4049 {
4050 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
4051 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4052 }
4053 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
4054 {
4055 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
4056 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4057 }
4058 }
4059
4060 /* Limit check. (Should alternatively check for non-canonical addresses
4061 here, but that is ruled out by offSeg being 32-bit, right?) */
4062 uint64_t u64Base;
4063 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
4064 if (Desc.Legacy.Gen.u1Granularity)
4065 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
4066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4067 u64Base = 0;
4068 else
4069 {
4070 if (offSeg > cbLimit)
4071 {
4072 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
4073 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4074 }
4075 u64Base = X86DESC_BASE(Desc.Legacy);
4076 }
4077
4078 /*
4079 * Ok, everything checked out fine. Now set the accessed bit before
4080 * committing the result into CS, CSHID and RIP.
4081 */
4082 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4083 {
4084 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4085 if (rcStrict != VINF_SUCCESS)
4086 return rcStrict;
4087 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4088 }
4089
4090 /* commit */
4091 pCtx->rip = offSeg;
4092 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
4093 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
4094 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
4095 pCtx->csHid.u32Limit = cbLimit;
4096 pCtx->csHid.u64Base = u64Base;
4097 /** @todo check if the hidden bits are loaded correctly for 64-bit
4098 * mode. */
4099 return VINF_SUCCESS;
4100 }
4101
4102 /*
4103 * System selector.
4104 */
4105 if (IEM_IS_LONG_MODE(pIemCpu))
4106 switch (Desc.Legacy.Gen.u4Type)
4107 {
4108 case AMD64_SEL_TYPE_SYS_LDT:
4109 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4110 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4111 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4112 case AMD64_SEL_TYPE_SYS_INT_GATE:
4113 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4114 /* Call various functions to do the work. */
4115 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4116 default:
4117 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
4118 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4119
4120 }
4121 switch (Desc.Legacy.Gen.u4Type)
4122 {
4123 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4124 case X86_SEL_TYPE_SYS_LDT:
4125 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4126 case X86_SEL_TYPE_SYS_TASK_GATE:
4127 case X86_SEL_TYPE_SYS_286_INT_GATE:
4128 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4129 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4130 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4131 case X86_SEL_TYPE_SYS_386_INT_GATE:
4132 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4133 /* Call various functions to do the work. */
4134 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4135
4136 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4137 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4138 /* Call various functions to do the work. */
4139 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4140
4141 default:
4142 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
4143 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4144 }
4145}
4146
4147
4148/**
4149 * Implements far calls.
4150 *
4151 * @param uSel The selector.
4152 * @param offSeg The segment offset.
4153 * @param enmOpSize The operand size (in case we need it).
4154 */
4155IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
4156{
4157 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4158 VBOXSTRICTRC rcStrict;
4159 uint64_t uNewRsp;
4160 void *pvRet;
4161
4162 /*
4163 * Real mode and V8086 mode are easy. The only snag seems to be that
4164 * CS.limit doesn't change and the limit check is done against the current
4165 * limit.
4166 */
4167 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4168 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4169 {
4170 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
4171
4172 /* Check stack first - may #SS(0). */
4173 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,
4174 &pvRet, &uNewRsp);
4175 if (rcStrict != VINF_SUCCESS)
4176 return rcStrict;
4177
4178 /* Check the target address range. */
4179 if (offSeg > UINT32_MAX)
4180 return iemRaiseGeneralProtectionFault0(pIemCpu);
4181
4182 /* Everything is fine, push the return address. */
4183 if (enmOpSize == IEMMODE_16BIT)
4184 {
4185 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;
4186 ((uint16_t *)pvRet)[1] = pCtx->cs;
4187 }
4188 else
4189 {
4190 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;
4191 ((uint16_t *)pvRet)[3] = pCtx->cs;
4192 }
4193 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
4194 if (rcStrict != VINF_SUCCESS)
4195 return rcStrict;
4196
4197 /* Branch. */
4198 pCtx->rip = offSeg;
4199 pCtx->cs = uSel;
4200 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
4201 /** @todo Does REM reset the accessed bit here to? (See on jmp far16
4202 * after disabling PE.) Check with VT-x and AMD-V. */
4203#ifdef IEM_VERIFICATION_MODE
4204 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
4205#endif
4206 return VINF_SUCCESS;
4207 }
4208
4209 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4210}
4211
4212
4213/**
4214 * Implements retf.
4215 *
4216 * @param enmEffOpSize The effective operand size.
4217 * @param cbPop The amount of arguments to pop from the stack
4218 * (bytes).
4219 */
4220IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
4221{
4222 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4223 VBOXSTRICTRC rcStrict;
4224 uint64_t uNewRsp;
4225
4226 /*
4227 * Real mode and V8086 mode are easy.
4228 */
4229 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4230 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4231 {
4232 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
4233 uint16_t const *pu16Frame;
4234 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
4235 (void const **)&pu16Frame, &uNewRsp);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238 uint32_t uNewEip;
4239 uint16_t uNewCs;
4240 if (enmEffOpSize == IEMMODE_32BIT)
4241 {
4242 uNewCs = pu16Frame[2];
4243 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);
4244 }
4245 else
4246 {
4247 uNewCs = pu16Frame[1];
4248 uNewEip = pu16Frame[0];
4249 }
4250 /** @todo check how this is supposed to work if sp=0xfffe. */
4251
4252 /* Check the limit of the new EIP. */
4253 /** @todo Intel pseudo code only does the limit check for 16-bit
4254 * operands, AMD does not make any distinction. What is right? */
4255 if (uNewEip > pCtx->csHid.u32Limit)
4256 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
4257
4258 /* commit the operation. */
4259 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
4260 if (rcStrict != VINF_SUCCESS)
4261 return rcStrict;
4262 pCtx->rip = uNewEip;
4263 pCtx->cs = uNewCs;
4264 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
4265 /** @todo do we load attribs and limit as well? */
4266 if (cbPop)
4267 iemRegAddToRsp(pCtx, cbPop);
4268 return VINF_SUCCESS;
4269 }
4270
4271 AssertFailed();
4272 return VERR_NOT_IMPLEMENTED;
4273}
4274
4275
4276/**
4277 * Implements retn.
4278 *
4279 * We're doing this in C because of the \#GP that might be raised if the popped
4280 * program counter is out of bounds.
4281 *
4282 * @param enmEffOpSize The effective operand size.
4283 * @param cbPop The amount of arguments to pop from the stack
4284 * (bytes).
4285 */
4286IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
4287{
4288 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4289
4290 /* Fetch the RSP from the stack. */
4291 VBOXSTRICTRC rcStrict;
4292 RTUINT64U NewRip;
4293 RTUINT64U NewRsp;
4294 NewRsp.u = pCtx->rsp;
4295 switch (enmEffOpSize)
4296 {
4297 case IEMMODE_16BIT:
4298 NewRip.u = 0;
4299 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
4300 break;
4301 case IEMMODE_32BIT:
4302 NewRip.u = 0;
4303 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
4304 break;
4305 case IEMMODE_64BIT:
4306 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
4307 break;
4308 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4309 }
4310 if (rcStrict != VINF_SUCCESS)
4311 return rcStrict;
4312
4313 /* Check the new RSP before loading it. */
4314 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
4315 * of it. The canonical test is performed here and for call. */
4316 if (enmEffOpSize != IEMMODE_64BIT)
4317 {
4318 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)
4319 {
4320 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
4321 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
4322 }
4323 }
4324 else
4325 {
4326 if (!IEM_IS_CANONICAL(NewRip.u))
4327 {
4328 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
4329 return iemRaiseNotCanonical(pIemCpu);
4330 }
4331 }
4332
4333 /* Commit it. */
4334 pCtx->rip = NewRip.u;
4335 pCtx->rsp = NewRsp.u;
4336 if (cbPop)
4337 iemRegAddToRsp(pCtx, cbPop);
4338
4339 return VINF_SUCCESS;
4340}
4341
4342
4343/**
4344 * Implements int3 and int XX.
4345 *
4346 * @param u8Int The interrupt vector number.
4347 * @param fIsBpInstr Is it the breakpoint instruction.
4348 */
4349IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
4350{
4351 /** @todo we should call TRPM to do this job. */
4352 VBOXSTRICTRC rcStrict;
4353 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4354
4355 /*
4356 * Real mode is easy.
4357 */
4358 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4359 && IEM_IS_REAL_MODE(pIemCpu))
4360 {
4361 /* read the IDT entry. */
4362 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Int + 3)
4363 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Int << X86_TRAP_ERR_SEL_SHIFT));
4364 RTFAR16 Idte;
4365 rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Int);
4366 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4367 return rcStrict;
4368
4369 /* push the stack frame. */
4370 uint16_t *pu16Frame;
4371 uint64_t uNewRsp;
4372 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
4373 if (rcStrict != VINF_SUCCESS)
4374 return rcStrict;
4375
4376 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
4377 pu16Frame[1] = (uint16_t)pCtx->cs;
4378 pu16Frame[0] = pCtx->ip + cbInstr;
4379 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
4380 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4381 return rcStrict;
4382
4383 /* load the vector address into cs:ip. */
4384 pCtx->cs = Idte.sel;
4385 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
4386 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
4387 pCtx->rip = Idte.off;
4388 pCtx->eflags.Bits.u1IF = 0;
4389 return VINF_SUCCESS;
4390 }
4391
4392 AssertFailed();
4393 return VERR_NOT_IMPLEMENTED;
4394}
4395
4396
4397/**
4398 * Implements iret.
4399 *
4400 * @param enmEffOpSize The effective operand size.
4401 */
4402IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
4403{
4404 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4405 VBOXSTRICTRC rcStrict;
4406 uint64_t uNewRsp;
4407
4408 /*
4409 * Real mode is easy, V8086 mode is relative similar.
4410 */
4411 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4412 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4413 {
4414 /* iret throws an exception if VME isn't enabled. */
4415 if ( pCtx->eflags.Bits.u1VM
4416 && !(pCtx->cr4 & X86_CR4_VME))
4417 return iemRaiseGeneralProtectionFault0(pIemCpu);
4418
4419 /* Do the stack bits, but don't commit RSP before everything checks
4420 out right. */
4421 union
4422 {
4423 uint32_t const *pu32;
4424 uint16_t const *pu16;
4425 void const *pv;
4426 } uFrame;
4427 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
4428 uint16_t uNewCs;
4429 uint32_t uNewEip;
4430 uint32_t uNewFlags;
4431 if (enmEffOpSize == IEMMODE_32BIT)
4432 {
4433 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
4434 if (rcStrict != VINF_SUCCESS)
4435 return rcStrict;
4436 uNewEip = uFrame.pu32[0];
4437 uNewCs = (uint16_t)uFrame.pu32[1];
4438 uNewFlags = uFrame.pu32[2];
4439 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
4440 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
4441 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
4442 | X86_EFL_ID;
4443 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
4444 }
4445 else
4446 {
4447 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
4448 if (rcStrict != VINF_SUCCESS)
4449 return rcStrict;
4450 uNewEip = uFrame.pu16[0];
4451 uNewCs = uFrame.pu16[1];
4452 uNewFlags = uFrame.pu16[2];
4453 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
4454 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
4455 uNewFlags |= pCtx->eflags.u & (UINT16_C(0xffff0000) | X86_EFL_1);
4456 /** @todo The intel pseudo code does not indicate what happens to
4457 * reserved flags. We just ignore them. */
4458 }
4459 /** @todo Check how this is supposed to work if sp=0xfffe. */
4460
4461 /* Check the limit of the new EIP. */
4462 /** @todo Only the AMD pseudo code check the limit here, what's
4463 * right? */
4464 if (uNewEip > pCtx->csHid.u32Limit)
4465 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
4466
4467 /* V8086 checks and flag adjustments */
4468 if (pCtx->eflags.Bits.u1VM)
4469 {
4470 if (pCtx->eflags.Bits.u2IOPL == 3)
4471 {
4472 /* Preserve IOPL and clear RF. */
4473 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
4474 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
4475 }
4476 else if ( enmEffOpSize == IEMMODE_16BIT
4477 && ( !(uNewFlags & X86_EFL_IF)
4478 || !pCtx->eflags.Bits.u1VIP )
4479 && !(uNewFlags & X86_EFL_TF) )
4480 {
4481 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
4482 uNewFlags &= ~X86_EFL_VIF;
4483 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
4484 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
4485 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
4486 }
4487 else
4488 return iemRaiseGeneralProtectionFault0(pIemCpu);
4489 }
4490
4491 /* commit the operation. */
4492 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
4493 if (rcStrict != VINF_SUCCESS)
4494 return rcStrict;
4495 pCtx->rip = uNewEip;
4496 pCtx->cs = uNewCs;
4497 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
4498 /** @todo do we load attribs and limit as well? */
4499 Assert(uNewFlags & X86_EFL_1);
4500 pCtx->eflags.u = uNewFlags;
4501
4502 return VINF_SUCCESS;
4503 }
4504
4505
4506 AssertFailed();
4507 return VERR_NOT_IMPLEMENTED;
4508}
4509
4510
4511/**
4512 * Implements 'mov SReg, r/m'.
4513 *
4514 * @param iSegReg The segment register number (valid).
4515 * @param uSel The new selector value.
4516 */
4517IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4518{
4519 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4520 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
4521 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
4522
4523 Assert(iSegReg < X86_SREG_GS && iSegReg != X86_SREG_CS);
4524
4525 /*
4526 * Real mode and V8086 mode are easy.
4527 */
4528 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4529 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4530 {
4531 *pSel = uSel;
4532 pHid->u64Base = (uint32_t)uSel << 4;
4533 /** @todo Does the CPU actually load limits and attributes in the
4534 * real/V8086 mode segment load case? It doesn't for CS in far
4535 * jumps... Affects unreal mode. */
4536 pHid->u32Limit = 0xffff;
4537 pHid->Attr.u = 0;
4538 pHid->Attr.n.u1Present = 1;
4539 pHid->Attr.n.u1DescType = 1;
4540 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4541 ? X86_SEL_TYPE_RW
4542 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4543
4544 iemRegAddToRip(pIemCpu, cbInstr);
4545 if (iSegReg == X86_SREG_SS)
4546 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4547 return VINF_SUCCESS;
4548 }
4549
4550 /*
4551 * Protected mode.
4552 *
4553 * Check if it's a null segment selector value first, that's OK for DS, ES,
4554 * FS and GS. If not null, then we have to load and parse the descriptor.
4555 */
4556 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
4557 {
4558 if (iSegReg == X86_SREG_SS)
4559 {
4560 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4561 || pIemCpu->uCpl != 0
4562 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
4563 {
4564 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
4565 return iemRaiseGeneralProtectionFault0(pIemCpu);
4566 }
4567
4568 /* In 64-bit kernel mode, the stack can be 0 because of the way
4569 interrupts are dispatched when in kernel ctx. Just load the
4570 selector value into the register and leave the hidden bits
4571 as is. */
4572 *pSel = uSel;
4573 iemRegAddToRip(pIemCpu, cbInstr);
4574 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4575 return VINF_SUCCESS;
4576 }
4577
4578 *pSel = uSel; /* Not RPL, remember :-) */
4579 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
4580 && iSegReg != X86_SREG_FS
4581 && iSegReg != X86_SREG_GS)
4582 {
4583 /** @todo figure out what this actually does, it works. Needs
4584 * testcase! */
4585 pHid->Attr.u = 0;
4586 pHid->Attr.n.u1Present = 1;
4587 pHid->Attr.n.u1Long = 1;
4588 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
4589 pHid->Attr.n.u2Dpl = 3;
4590 pHid->u32Limit = 0;
4591 pHid->u64Base = 0;
4592 }
4593 else
4594 {
4595 pHid->Attr.u = 0;
4596 pHid->u32Limit = 0;
4597 pHid->u64Base = 0;
4598 }
4599 iemRegAddToRip(pIemCpu, cbInstr);
4600 return VINF_SUCCESS;
4601 }
4602
4603 /* Fetch the descriptor. */
4604 IEMSELDESC Desc;
4605 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608
4609 /* Check GPs first. */
4610 if (!Desc.Legacy.Gen.u1DescType)
4611 {
4612 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4613 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4614 }
4615 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4616 {
4617 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4618 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4619 {
4620 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4621 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4622 }
4623 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4624 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4625 {
4626 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4627 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4628 }
4629 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
4630 {
4631 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
4632 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4633 }
4634 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
4635 {
4636 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
4637 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4638 }
4639 }
4640 else
4641 {
4642 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4643 {
4644 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4645 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4646 }
4647 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4648 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4649 {
4650#if 0 /* this is what intel says. */
4651 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4652 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4653 {
4654 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4655 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4656 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4657 }
4658#else /* this is what makes more sense. */
4659 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4660 {
4661 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4662 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4663 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4664 }
4665 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4666 {
4667 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4668 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4669 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4670 }
4671#endif
4672 }
4673 }
4674
4675 /* Is it there? */
4676 if (!Desc.Legacy.Gen.u1Present)
4677 {
4678 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4679 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4680 }
4681
4682 /* The the base and limit. */
4683 uint64_t u64Base;
4684 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
4685 if (Desc.Legacy.Gen.u1Granularity)
4686 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
4687
4688 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
4689 && iSegReg < X86_SREG_FS)
4690 u64Base = 0;
4691 else
4692 u64Base = X86DESC_BASE(Desc.Legacy);
4693
4694 /*
4695 * Ok, everything checked out fine. Now set the accessed bit before
4696 * committing the result into the registers.
4697 */
4698 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4699 {
4700 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4701 if (rcStrict != VINF_SUCCESS)
4702 return rcStrict;
4703 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4704 }
4705
4706 /* commit */
4707 *pSel = uSel;
4708 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
4709 pHid->u32Limit = cbLimit;
4710 pHid->u64Base = u64Base;
4711
4712 /** @todo check if the hidden bits are loaded correctly for 64-bit
4713 * mode. */
4714
4715 iemRegAddToRip(pIemCpu, cbInstr);
4716 if (iSegReg == X86_SREG_SS)
4717 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4718 return VINF_SUCCESS;
4719}
4720
4721
4722/**
4723 * Implements lgs, lfs, les, lds & lss.
4724 */
4725IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4726 uint16_t, uSel,
4727 uint64_t, offSeg,
4728 uint8_t, iSegReg,
4729 uint8_t, iGReg,
4730 IEMMODE, enmEffOpSize)
4731{
4732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4733 VBOXSTRICTRC rcStrict;
4734
4735 /*
4736 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4737 */
4738 /** @todo verify and test that mov, pop and lXs works the segment
4739 * register loading in the exact same way. */
4740 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4741 if (rcStrict == VINF_SUCCESS)
4742 {
4743 switch (enmEffOpSize)
4744 {
4745 case IEMMODE_16BIT:
4746 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4747 break;
4748 case IEMMODE_32BIT:
4749 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4750 break;
4751 case IEMMODE_64BIT:
4752 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4753 break;
4754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4755 }
4756 }
4757
4758 return rcStrict;
4759}
4760
4761
4762/**
4763 * Implements 'pop SReg'.
4764 *
4765 * @param iSegReg The segment register number (valid).
4766 * @param enmEffOpSize The efficient operand size (valid).
4767 */
4768IEM_CIMPL_DEF_2(iemOpCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4769{
4770 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4771 VBOXSTRICTRC rcStrict;
4772
4773 /*
4774 * Read the selector off the stack and join paths with mov ss, reg.
4775 */
4776 RTUINT64U TmpRsp;
4777 TmpRsp.u = pCtx->rsp;
4778 switch (enmEffOpSize)
4779 {
4780 case IEMMODE_16BIT:
4781 {
4782 uint16_t uSel;
4783 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4784 if (rcStrict == VINF_SUCCESS)
4785 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4786 break;
4787 }
4788
4789 case IEMMODE_32BIT:
4790 {
4791 uint32_t u32Value;
4792 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4793 if (rcStrict == VINF_SUCCESS)
4794 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4795 break;
4796 }
4797
4798 case IEMMODE_64BIT:
4799 {
4800 uint64_t u64Value;
4801 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4802 if (rcStrict == VINF_SUCCESS)
4803 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4804 break;
4805 }
4806 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4807 }
4808
4809 /*
4810 * Commit the stack on success.
4811 */
4812 if (rcStrict == VINF_SUCCESS)
4813 pCtx->rsp = TmpRsp.u;
4814 return rcStrict;
4815}
4816
4817
4818/**
4819 * Implements lgdt.
4820 *
4821 * @param iEffSeg The segment of the new ldtr contents
4822 * @param GCPtrEffSrc The address of the new ldtr contents.
4823 * @param enmEffOpSize The effective operand size.
4824 */
4825IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4826{
4827 if (pIemCpu->uCpl != 0)
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4830
4831 /*
4832 * Fetch the limit and base address.
4833 */
4834 uint16_t cbLimit;
4835 RTGCPTR GCPtrBase;
4836 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4837 if (rcStrict == VINF_SUCCESS)
4838 {
4839#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
4840 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4841#else
4842 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4843 pCtx->gdtr.cbGdt = cbLimit;
4844 pCtx->gdtr.pGdt = GCPtrBase;
4845#endif
4846 if (rcStrict == VINF_SUCCESS)
4847 iemRegAddToRip(pIemCpu, cbInstr);
4848 }
4849 return rcStrict;
4850}
4851
4852
4853/**
4854 * Implements lidt.
4855 *
4856 * @param iEffSeg The segment of the new ldtr contents
4857 * @param GCPtrEffSrc The address of the new ldtr contents.
4858 * @param enmEffOpSize The effective operand size.
4859 */
4860IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4861{
4862 if (pIemCpu->uCpl != 0)
4863 return iemRaiseGeneralProtectionFault0(pIemCpu);
4864 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4865
4866 /*
4867 * Fetch the limit and base address.
4868 */
4869 uint16_t cbLimit;
4870 RTGCPTR GCPtrBase;
4871 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4872 if (rcStrict == VINF_SUCCESS)
4873 {
4874#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
4875 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4876#else
4877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4878 pCtx->idtr.cbIdt = cbLimit;
4879 pCtx->idtr.pIdt = GCPtrBase;
4880#endif
4881 if (rcStrict == VINF_SUCCESS)
4882 iemRegAddToRip(pIemCpu, cbInstr);
4883 }
4884 return rcStrict;
4885}
4886
4887
4888/**
4889 * Implements mov GReg,CRx.
4890 *
4891 * @param iGReg The general register to store the CRx value in.
4892 * @param iCrReg The CRx register to read (valid).
4893 */
4894IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4895{
4896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4897 if (pIemCpu->uCpl != 0)
4898 return iemRaiseGeneralProtectionFault0(pIemCpu);
4899 Assert(!pCtx->eflags.Bits.u1VM);
4900
4901 /* read it */
4902 uint64_t crX;
4903 switch (iCrReg)
4904 {
4905 case 0: crX = pCtx->cr0; break;
4906 case 2: crX = pCtx->cr2; break;
4907 case 3: crX = pCtx->cr3; break;
4908 case 4: crX = pCtx->cr4; break;
4909 case 8:
4910#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
4911 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
4912#else
4913 crX = 0xff;
4914#endif
4915 break;
4916 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4917 }
4918
4919 /* store it */
4920 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4921 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4922 else
4923 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4924
4925 iemRegAddToRip(pIemCpu, cbInstr);
4926 return VINF_SUCCESS;
4927}
4928
4929
4930/**
4931 * Implements mov CRx,GReg.
4932 *
4933 * @param iCrReg The CRx register to read (valid).
4934 * @param iGReg The general register to store the CRx value in.
4935 */
4936IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
4937{
4938 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4939 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4940 VBOXSTRICTRC rcStrict;
4941 int rc;
4942
4943 if (pIemCpu->uCpl != 0)
4944 return iemRaiseGeneralProtectionFault0(pIemCpu);
4945 Assert(!pCtx->eflags.Bits.u1VM);
4946
4947 /*
4948 * Read the new value from the source register.
4949 */
4950 uint64_t NewCrX;
4951 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4952 NewCrX = iemGRegFetchU64(pIemCpu, iGReg);
4953 else
4954 NewCrX = iemGRegFetchU32(pIemCpu, iGReg);
4955
4956 /*
4957 * Try store it.
4958 * Unfortunately, CPUM only does a tiny bit of the work.
4959 */
4960 switch (iCrReg)
4961 {
4962 case 0:
4963 {
4964 /*
4965 * Perform checks.
4966 */
4967 uint64_t const OldCrX = pCtx->cr0;
4968 NewCrX |= X86_CR0_ET; /* hardcoded */
4969
4970 /* Check for reserved bits. */
4971 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4972 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4973 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4974 if (NewCrX & ~(uint64_t)fValid)
4975 {
4976 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", NewCrX, NewCrX & ~(uint64_t)fValid));
4977 return iemRaiseGeneralProtectionFault0(pIemCpu);
4978 }
4979
4980 /* Check for invalid combinations. */
4981 if ( (NewCrX & X86_CR0_PG)
4982 && !(NewCrX & X86_CR0_PE) )
4983 {
4984 Log(("Trying to set CR0.PG without CR0.PE\n"));
4985 return iemRaiseGeneralProtectionFault0(pIemCpu);
4986 }
4987
4988 if ( !(NewCrX & X86_CR0_CD)
4989 && (NewCrX & X86_CR0_NW) )
4990 {
4991 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4992 return iemRaiseGeneralProtectionFault0(pIemCpu);
4993 }
4994
4995 /* Long mode consistency checks. */
4996 if ( (NewCrX & X86_CR0_PG)
4997 && !(OldCrX & X86_CR0_PG)
4998 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4999 {
5000 if (!(pCtx->cr4 & X86_CR4_PAE))
5001 {
5002 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5003 return iemRaiseGeneralProtectionFault0(pIemCpu);
5004 }
5005 if (pCtx->csHid.Attr.n.u1Long)
5006 {
5007 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5008 return iemRaiseGeneralProtectionFault0(pIemCpu);
5009 }
5010 }
5011
5012 /** @todo check reserved PDPTR bits as AMD states. */
5013
5014 /*
5015 * Change CR0.
5016 */
5017#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5018 rc = CPUMSetGuestCR0(pVCpu, NewCrX);
5019 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
5020#else
5021 pCtx->cr0 = NewCrX;
5022#endif
5023 Assert(pCtx->cr0 == NewCrX);
5024
5025 /*
5026 * Change EFER.LMA if entering or leaving long mode.
5027 */
5028 if ( (NewCrX & X86_CR0_PG) != (OldCrX & X86_CR0_PG)
5029 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5030 {
5031 uint64_t NewEFER = pCtx->msrEFER;
5032 if (NewCrX & X86_CR0_PG)
5033 NewEFER |= MSR_K6_EFER_LME;
5034 else
5035 NewEFER &= ~MSR_K6_EFER_LME;
5036
5037#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5038 CPUMSetGuestEFER(pVCpu, NewEFER);
5039#else
5040 pCtx->msrEFER = NewEFER;
5041#endif
5042 Assert(pCtx->msrEFER == NewEFER);
5043 }
5044
5045#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5046 /*
5047 * Inform PGM.
5048 */
5049 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5050 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5051 {
5052 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5053 AssertRCReturn(rc, rc);
5054 /* ignore informational status codes */
5055 }
5056 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5057 /** @todo Status code management. */
5058#else
5059 rcStrict = VINF_SUCCESS;
5060#endif
5061 break;
5062 }
5063
5064 /*
5065 * CR2 can be changed without any restrictions.
5066 */
5067 case 2:
5068 pCtx->cr2 = NewCrX;
5069 rcStrict = VINF_SUCCESS;
5070 break;
5071
5072 /*
5073 * CR3 is relatively simple, although AMD and Intel have different
5074 * accounts of how setting reserved bits are handled. We take intel's
5075 * word for the lower bits and AMD's for the high bits (63:52).
5076 */
5077 /** @todo Testcase: Setting reserved bits in CR3, especially before
5078 * enabling paging. */
5079 case 3:
5080 {
5081 /* check / mask the value. */
5082 if (NewCrX & UINT64_C(0xfff0000000000000))
5083 {
5084 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", NewCrX));
5085 return iemRaiseGeneralProtectionFault0(pIemCpu);
5086 }
5087
5088 uint64_t fValid;
5089 if ( (pCtx->cr4 & X86_CR4_PAE)
5090 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5091 fValid = UINT64_C(0x000ffffffffff014);
5092 else if (pCtx->cr4 & X86_CR4_PAE)
5093 fValid = UINT64_C(0xfffffff4);
5094 else
5095 fValid = UINT64_C(0xfffff014);
5096 if (NewCrX & ~fValid)
5097 {
5098 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5099 NewCrX, NewCrX & ~fValid));
5100 NewCrX &= fValid;
5101 }
5102
5103 /** @todo If we're in PAE mode we should check the PDPTRs for
5104 * invalid bits. */
5105
5106 /* Make the change. */
5107#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5108 rc = CPUMSetGuestCR3(pVCpu, NewCrX);
5109 AssertRCSuccessReturn(rc, rc);
5110#else
5111 pCtx->cr3 = NewCrX;
5112#endif
5113
5114#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5115 /* Inform PGM. */
5116 if (pCtx->cr0 & X86_CR0_PG)
5117 {
5118 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
5119 AssertRCReturn(rc, rc);
5120 /* ignore informational status codes */
5121 /** @todo status code management */
5122 }
5123#endif
5124 rcStrict = VINF_SUCCESS;
5125 break;
5126 }
5127
5128 /*
5129 * CR4 is a bit more tedious as there are bits which cannot be cleared
5130 * under some circumstances and such.
5131 */
5132 case 4:
5133 {
5134 uint64_t const OldCrX = pCtx->cr0;
5135
5136 /* reserved bits */
5137 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5138 | X86_CR4_TSD | X86_CR4_DE
5139 | X86_CR4_PSE | X86_CR4_PAE
5140 | X86_CR4_MCE | X86_CR4_PGE
5141 | X86_CR4_PCE | X86_CR4_OSFSXR
5142 | X86_CR4_OSXMMEEXCPT;
5143 //if (xxx)
5144 // fValid |= X86_CR4_VMXE;
5145 //if (xxx)
5146 // fValid |= X86_CR4_OSXSAVE;
5147 if (NewCrX & ~(uint64_t)fValid)
5148 {
5149 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", NewCrX, NewCrX & ~(uint64_t)fValid));
5150 return iemRaiseGeneralProtectionFault0(pIemCpu);
5151 }
5152
5153 /* long mode checks. */
5154 if ( (OldCrX & X86_CR4_PAE)
5155 && !(NewCrX & X86_CR4_PAE)
5156 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
5157 {
5158 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5159 return iemRaiseGeneralProtectionFault0(pIemCpu);
5160 }
5161
5162
5163 /*
5164 * Change it.
5165 */
5166#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5167 rc = CPUMSetGuestCR4(pVCpu, NewCrX);
5168 AssertRCSuccessReturn(rc, rc);
5169#else
5170 pCtx->cr4 = NewCrX;
5171#endif
5172 Assert(pCtx->cr4 == NewCrX);
5173
5174 /*
5175 * Notify SELM and PGM.
5176 */
5177#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5178 /* SELM - VME may change things wrt to the TSS shadowing. */
5179 if ((NewCrX ^ OldCrX) & X86_CR4_VME)
5180 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5181
5182 /* PGM - flushing and mode. */
5183 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5184 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5185 {
5186 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5187 AssertRCReturn(rc, rc);
5188 /* ignore informational status codes */
5189 }
5190 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5191 /** @todo Status code management. */
5192#else
5193 rcStrict = VINF_SUCCESS;
5194#endif
5195 break;
5196 }
5197
5198 /*
5199 * CR8 maps to the APIC TPR.
5200 */
5201 case 8:
5202#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5203 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
5204#else
5205 rcStrict = VINF_SUCCESS;
5206#endif
5207 break;
5208
5209 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5210 }
5211
5212 /*
5213 * Advance the RIP on success.
5214 */
5215 /** @todo Status code management. */
5216 if (rcStrict == VINF_SUCCESS)
5217 iemRegAddToRip(pIemCpu, cbInstr);
5218 return rcStrict;
5219}
5220
5221
5222/**
5223 * Implements 'IN eAX, port'.
5224 *
5225 * @param u16Port The source port.
5226 * @param cbReg The register size.
5227 */
5228IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5229{
5230 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5231
5232 /*
5233 * CPL check
5234 */
5235 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5236 if (rcStrict != VINF_SUCCESS)
5237 return rcStrict;
5238
5239 /*
5240 * Perform the I/O.
5241 */
5242 uint32_t u32Value;
5243#if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5244 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
5245#else
5246 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
5247#endif
5248 if (IOM_SUCCESS(rcStrict))
5249 {
5250 switch (cbReg)
5251 {
5252 case 1: pCtx->al = (uint8_t)u32Value; break;
5253 case 2: pCtx->ax = (uint16_t)u32Value; break;
5254 case 4: pCtx->rax = u32Value; break;
5255 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5256 }
5257 iemRegAddToRip(pIemCpu, cbInstr);
5258 pIemCpu->cPotentialExits++;
5259 }
5260 /** @todo massage rcStrict. */
5261 return rcStrict;
5262}
5263
5264
5265/**
5266 * Implements 'IN eAX, DX'.
5267 *
5268 * @param cbReg The register size.
5269 */
5270IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5271{
5272 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5273}
5274
5275
5276/**
5277 * Implements 'OUT port, eAX'.
5278 *
5279 * @param u16Port The destination port.
5280 * @param cbReg The register size.
5281 */
5282IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5283{
5284 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5285
5286 /*
5287 * CPL check
5288 */
5289 if ( (pCtx->cr0 & X86_CR0_PE)
5290 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
5291 || pCtx->eflags.Bits.u1VM) )
5292 {
5293 /** @todo I/O port permission bitmap check */
5294 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
5295 }
5296
5297 /*
5298 * Perform the I/O.
5299 */
5300 uint32_t u32Value;
5301 switch (cbReg)
5302 {
5303 case 1: u32Value = pCtx->al; break;
5304 case 2: u32Value = pCtx->ax; break;
5305 case 4: u32Value = pCtx->eax; break;
5306 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5307 }
5308# if !defined(IEM_VERIFICATION_MODE) || defined(IEM_VERIFICATION_MODE_NO_REM)
5309 VBOXSTRICTRC rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
5310# else
5311 VBOXSTRICTRC rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
5312# endif
5313 if (IOM_SUCCESS(rc))
5314 {
5315 iemRegAddToRip(pIemCpu, cbInstr);
5316 pIemCpu->cPotentialExits++;
5317 /** @todo massage rc. */
5318 }
5319 return rc;
5320}
5321
5322
5323/**
5324 * Implements 'OUT DX, eAX'.
5325 *
5326 * @param cbReg The register size.
5327 */
5328IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5329{
5330 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5331}
5332
5333
5334/**
5335 * Implements 'CLI'.
5336 */
5337IEM_CIMPL_DEF_0(iemCImpl_cli)
5338{
5339 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5340
5341 if (pCtx->cr0 & X86_CR0_PE)
5342 {
5343 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
5344 if (!pCtx->eflags.Bits.u1VM)
5345 {
5346 if (pIemCpu->uCpl <= uIopl)
5347 pCtx->eflags.Bits.u1IF = 0;
5348 else if ( pIemCpu->uCpl == 3
5349 && (pCtx->cr4 & X86_CR4_PVI) )
5350 pCtx->eflags.Bits.u1VIF = 0;
5351 else
5352 return iemRaiseGeneralProtectionFault0(pIemCpu);
5353 }
5354 /* V8086 */
5355 else if (uIopl == 3)
5356 pCtx->eflags.Bits.u1IF = 0;
5357 else if ( uIopl < 3
5358 && (pCtx->cr4 & X86_CR4_VME) )
5359 pCtx->eflags.Bits.u1VIF = 0;
5360 else
5361 return iemRaiseGeneralProtectionFault0(pIemCpu);
5362 }
5363 /* real mode */
5364 else
5365 pCtx->eflags.Bits.u1IF = 0;
5366 iemRegAddToRip(pIemCpu, cbInstr);
5367 return VINF_SUCCESS;
5368}
5369
5370
5371/**
5372 * Implements 'STI'.
5373 */
5374IEM_CIMPL_DEF_0(iemCImpl_sti)
5375{
5376 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5377
5378 if (pCtx->cr0 & X86_CR0_PE)
5379 {
5380 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
5381 if (!pCtx->eflags.Bits.u1VM)
5382 {
5383 if (pIemCpu->uCpl <= uIopl)
5384 pCtx->eflags.Bits.u1IF = 1;
5385 else if ( pIemCpu->uCpl == 3
5386 && (pCtx->cr4 & X86_CR4_PVI)
5387 && !pCtx->eflags.Bits.u1VIP )
5388 pCtx->eflags.Bits.u1VIF = 1;
5389 else
5390 return iemRaiseGeneralProtectionFault0(pIemCpu);
5391 }
5392 /* V8086 */
5393 else if (uIopl == 3)
5394 pCtx->eflags.Bits.u1IF = 1;
5395 else if ( uIopl < 3
5396 && (pCtx->cr4 & X86_CR4_VME)
5397 && !pCtx->eflags.Bits.u1VIP )
5398 pCtx->eflags.Bits.u1VIF = 1;
5399 else
5400 return iemRaiseGeneralProtectionFault0(pIemCpu);
5401 }
5402 /* real mode */
5403 else
5404 pCtx->eflags.Bits.u1IF = 1;
5405
5406 iemRegAddToRip(pIemCpu, cbInstr);
5407 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5408 return VINF_SUCCESS;
5409}
5410
5411
5412/*
5413 * Instantiate the various string operation combinations.
5414 */
5415#define OP_SIZE 8
5416#define ADDR_SIZE 16
5417#include "IEMAllCImplStrInstr.cpp.h"
5418#define OP_SIZE 8
5419#define ADDR_SIZE 32
5420#include "IEMAllCImplStrInstr.cpp.h"
5421#define OP_SIZE 8
5422#define ADDR_SIZE 64
5423#include "IEMAllCImplStrInstr.cpp.h"
5424
5425#define OP_SIZE 16
5426#define ADDR_SIZE 16
5427#include "IEMAllCImplStrInstr.cpp.h"
5428#define OP_SIZE 16
5429#define ADDR_SIZE 32
5430#include "IEMAllCImplStrInstr.cpp.h"
5431#define OP_SIZE 16
5432#define ADDR_SIZE 64
5433#include "IEMAllCImplStrInstr.cpp.h"
5434
5435#define OP_SIZE 32
5436#define ADDR_SIZE 16
5437#include "IEMAllCImplStrInstr.cpp.h"
5438#define OP_SIZE 32
5439#define ADDR_SIZE 32
5440#include "IEMAllCImplStrInstr.cpp.h"
5441#define OP_SIZE 32
5442#define ADDR_SIZE 64
5443#include "IEMAllCImplStrInstr.cpp.h"
5444
5445#define OP_SIZE 64
5446#define ADDR_SIZE 32
5447#include "IEMAllCImplStrInstr.cpp.h"
5448#define OP_SIZE 64
5449#define ADDR_SIZE 64
5450#include "IEMAllCImplStrInstr.cpp.h"
5451
5452
5453/** @} */
5454
5455
5456/** @name "Microcode" macros.
5457 *
5458 * The idea is that we should be able to use the same code to interpret
5459 * instructions as well as recompiler instructions. Thus this obfuscation.
5460 *
5461 * @{
5462 */
5463#define IEM_MC_BEGIN(cArgs, cLocals) {
5464#define IEM_MC_END() }
5465#define IEM_MC_PAUSE() do {} while (0)
5466#define IEM_MC_CONTINUE() do {} while (0)
5467
5468/** Internal macro. */
5469#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5470 do \
5471 { \
5472 VBOXSTRICTRC rcStrict2 = a_Expr; \
5473 if (rcStrict2 != VINF_SUCCESS) \
5474 return rcStrict2; \
5475 } while (0)
5476
5477#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5478#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5479#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5480#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5481#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5482#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5483#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5484
5485#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5486
5487#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5488#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5489#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5490#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5491#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5492#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5493 uint32_t a_Name; \
5494 uint32_t *a_pName = &a_Name
5495#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
5496 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
5497
5498#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5499
5500#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5501#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5502#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5503#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5504#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5505#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5506#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5507#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5508#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5509#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5510#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5511#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5512#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5513#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5514
5515#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
5516#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
5517#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
5518#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
5519
5520#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
5521#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
5522/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
5523 * commit. */
5524#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
5525#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
5526#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5527
5528#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u16Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
5529#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
5530#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
5531 do { \
5532 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5533 *pu32Reg += (a_u32Value); \
5534 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5535 } while (0)
5536#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
5537
5538#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
5539#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
5540#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
5541 do { \
5542 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5543 *pu32Reg -= (a_u32Value); \
5544 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5545 } while (0)
5546#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
5547
5548#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
5549#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
5550
5551
5552
5553#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
5554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
5555#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
5557#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
5559#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5561#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5562 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5563
5564#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5565 do { \
5566 uint8_t u8Tmp; \
5567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5568 (a_u16Dst) = u8Tmp; \
5569 } while (0)
5570#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5571 do { \
5572 uint8_t u8Tmp; \
5573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5574 (a_u32Dst) = u8Tmp; \
5575 } while (0)
5576#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5577 do { \
5578 uint8_t u8Tmp; \
5579 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5580 (a_u64Dst) = u8Tmp; \
5581 } while (0)
5582#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5583 do { \
5584 uint16_t u16Tmp; \
5585 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5586 (a_u32Dst) = u16Tmp; \
5587 } while (0)
5588#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5589 do { \
5590 uint16_t u16Tmp; \
5591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5592 (a_u64Dst) = u16Tmp; \
5593 } while (0)
5594#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5595 do { \
5596 uint32_t u32Tmp; \
5597 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5598 (a_u64Dst) = u32Tmp; \
5599 } while (0)
5600
5601#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
5602 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
5603#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
5604 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
5605#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
5606 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
5607#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
5608 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
5609
5610#define IEM_MC_PUSH_U16(a_u16Value) \
5611 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
5612#define IEM_MC_PUSH_U32(a_u32Value) \
5613 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
5614#define IEM_MC_PUSH_U64(a_u64Value) \
5615 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
5616
5617#define IEM_MC_POP_U16(a_pu16Value) \
5618 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
5619#define IEM_MC_POP_U32(a_pu32Value) \
5620 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
5621#define IEM_MC_POP_U64(a_pu64Value) \
5622 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
5623
5624/** Maps guest memory for direct or bounce buffered access.
5625 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5626 * @remarks May return.
5627 */
5628#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
5629 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5630
5631/** Maps guest memory for direct or bounce buffered access.
5632 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5633 * @remarks May return.
5634 */
5635#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
5636 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5637
5638/** Commits the memory and unmaps the guest memory.
5639 * @remarks May return.
5640 */
5641#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
5642 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
5643
5644/** Calculate efficient address from R/M. */
5645#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
5646 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
5647
5648#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
5649#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
5650#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
5651
5652/**
5653 * Defers the rest of the instruction emulation to a C implementation routine
5654 * and returns, only taking the standard parameters.
5655 *
5656 * @param a_pfnCImpl The pointer to the C routine.
5657 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5658 */
5659#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5660
5661/**
5662 * Defers the rest of instruction emulation to a C implementation routine and
5663 * returns, taking one argument in addition to the standard ones.
5664 *
5665 * @param a_pfnCImpl The pointer to the C routine.
5666 * @param a0 The argument.
5667 */
5668#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5669
5670/**
5671 * Defers the rest of the instruction emulation to a C implementation routine
5672 * and returns, taking two arguments in addition to the standard ones.
5673 *
5674 * @param a_pfnCImpl The pointer to the C routine.
5675 * @param a0 The first extra argument.
5676 * @param a1 The second extra argument.
5677 */
5678#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5679
5680/**
5681 * Defers the rest of the instruction emulation to a C implementation routine
5682 * and returns, taking two arguments in addition to the standard ones.
5683 *
5684 * @param a_pfnCImpl The pointer to the C routine.
5685 * @param a0 The first extra argument.
5686 * @param a1 The second extra argument.
5687 * @param a2 The third extra argument.
5688 */
5689#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5690
5691/**
5692 * Defers the rest of the instruction emulation to a C implementation routine
5693 * and returns, taking two arguments in addition to the standard ones.
5694 *
5695 * @param a_pfnCImpl The pointer to the C routine.
5696 * @param a0 The first extra argument.
5697 * @param a1 The second extra argument.
5698 * @param a2 The third extra argument.
5699 * @param a3 The fourth extra argument.
5700 * @param a4 The fifth extra argument.
5701 */
5702#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
5703
5704/**
5705 * Defers the entire instruction emulation to a C implementation routine and
5706 * returns, only taking the standard parameters.
5707 *
5708 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5709 *
5710 * @param a_pfnCImpl The pointer to the C routine.
5711 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5712 */
5713#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5714
5715/**
5716 * Defers the entire instruction emulation to a C implementation routine and
5717 * returns, taking one argument in addition to the standard ones.
5718 *
5719 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5720 *
5721 * @param a_pfnCImpl The pointer to the C routine.
5722 * @param a0 The argument.
5723 */
5724#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5725
5726/**
5727 * Defers the entire instruction emulation to a C implementation routine and
5728 * returns, taking two arguments in addition to the standard ones.
5729 *
5730 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5731 *
5732 * @param a_pfnCImpl The pointer to the C routine.
5733 * @param a0 The first extra argument.
5734 * @param a1 The second extra argument.
5735 */
5736#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5737
5738#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
5739#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
5740#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
5741 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5742 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5743#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
5744 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5745 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5746 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5747#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
5748#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
5749#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
5750#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5751 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5752 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5753#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5754 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5755 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5756#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5757 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5758 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5759#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5760 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5761 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5762#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5763 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5764 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5765#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5766 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5767 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5768#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
5769#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
5770#define IEM_MC_ELSE() } else {
5771#define IEM_MC_ENDIF() } do {} while (0)
5772
5773/** @} */
5774
5775
5776/** @name Opcode Debug Helpers.
5777 * @{
5778 */
5779#ifdef DEBUG
5780# define IEMOP_MNEMONIC(a_szMnemonic) \
5781 Log2(("decode - %04x:%08RGv %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic))
5782# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
5783 Log2(("decode - %04x:%08RGv %s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic, a_szOps))
5784#else
5785# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
5786# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
5787#endif
5788
5789/** @} */
5790
5791
5792/** @name Opcode Helpers.
5793 * @{
5794 */
5795
5796/** The instruction allows no lock prefixing (in this encoding), throw #UD if
5797 * lock prefixed. */
5798#define IEMOP_HLP_NO_LOCK_PREFIX() \
5799 do \
5800 { \
5801 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5802 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
5803 } while (0)
5804
5805/** The instruction is not available in 64-bit mode, throw #UD if we're in
5806 * 64-bit mode. */
5807#define IEMOP_HLP_NO_64BIT() \
5808 do \
5809 { \
5810 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5811 return IEMOP_RAISE_INVALID_OPCODE(); \
5812 } while (0)
5813
5814/** The instruction defaults to 64-bit operand size if 64-bit mode. */
5815#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
5816 do \
5817 { \
5818 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5819 iemRecalEffOpSize64Default(pIemCpu); \
5820 } while (0)
5821
5822
5823
5824/**
5825 * Calculates the effective address of a ModR/M memory operand.
5826 *
5827 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
5828 *
5829 * @return Strict VBox status code.
5830 * @param pIemCpu The IEM per CPU data.
5831 * @param bRm The ModRM byte.
5832 * @param pGCPtrEff Where to return the effective address.
5833 */
5834static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
5835{
5836 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
5837 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5838#define SET_SS_DEF() \
5839 do \
5840 { \
5841 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
5842 pIemCpu->iEffSeg = X86_SREG_SS; \
5843 } while (0)
5844
5845/** @todo Check the effective address size crap! */
5846 switch (pIemCpu->enmEffAddrMode)
5847 {
5848 case IEMMODE_16BIT:
5849 {
5850 uint16_t u16EffAddr;
5851
5852 /* Handle the disp16 form with no registers first. */
5853 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5854 IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr);
5855 else
5856 {
5857 /* Get the displacment. */
5858 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5859 {
5860 case 0: u16EffAddr = 0; break;
5861 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(pIemCpu, &u16EffAddr); break;
5862 case 2: IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr); break;
5863 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5864 }
5865
5866 /* Add the base and index registers to the disp. */
5867 switch (bRm & X86_MODRM_RM_MASK)
5868 {
5869 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5870 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5871 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5872 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5873 case 4: u16EffAddr += pCtx->si; break;
5874 case 5: u16EffAddr += pCtx->di; break;
5875 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5876 case 7: u16EffAddr += pCtx->bx; break;
5877 }
5878 }
5879
5880 *pGCPtrEff = u16EffAddr;
5881 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5882 return VINF_SUCCESS;
5883 }
5884
5885 case IEMMODE_32BIT:
5886 {
5887 uint32_t u32EffAddr;
5888
5889 /* Handle the disp32 form with no registers first. */
5890 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5891 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32EffAddr);
5892 else
5893 {
5894 /* Get the register (or SIB) value. */
5895 switch ((bRm & X86_MODRM_RM_MASK))
5896 {
5897 case 0: u32EffAddr = pCtx->eax; break;
5898 case 1: u32EffAddr = pCtx->ecx; break;
5899 case 2: u32EffAddr = pCtx->edx; break;
5900 case 3: u32EffAddr = pCtx->ebx; break;
5901 case 4: /* SIB */
5902 {
5903 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
5904
5905 /* Get the index and scale it. */
5906 switch ((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK)
5907 {
5908 case 0: u32EffAddr = pCtx->eax; break;
5909 case 1: u32EffAddr = pCtx->ecx; break;
5910 case 2: u32EffAddr = pCtx->edx; break;
5911 case 3: u32EffAddr = pCtx->ebx; break;
5912 case 4: u32EffAddr = 0; /*none */ break;
5913 case 5: u32EffAddr = pCtx->ebp; break;
5914 case 6: u32EffAddr = pCtx->esi; break;
5915 case 7: u32EffAddr = pCtx->edi; break;
5916 }
5917 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5918
5919 /* add base */
5920 switch (bSib & X86_SIB_BASE_MASK)
5921 {
5922 case 0: u32EffAddr += pCtx->eax; break;
5923 case 1: u32EffAddr += pCtx->ecx; break;
5924 case 2: u32EffAddr += pCtx->edx; break;
5925 case 3: u32EffAddr += pCtx->ebx; break;
5926 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5927 case 5:
5928 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5929 {
5930 u32EffAddr += pCtx->ebp;
5931 SET_SS_DEF();
5932 }
5933 else
5934 {
5935 uint32_t u32Disp;
5936 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
5937 u32EffAddr += u32Disp;
5938 }
5939 break;
5940 case 6: u32EffAddr += pCtx->esi; break;
5941 case 7: u32EffAddr += pCtx->edi; break;
5942 }
5943 break;
5944 }
5945 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5946 case 6: u32EffAddr = pCtx->esi; break;
5947 case 7: u32EffAddr = pCtx->edi; break;
5948 }
5949
5950 /* Get and add the displacement. */
5951 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5952 {
5953 case 0:
5954 break;
5955 case 1:
5956 {
5957 int8_t i8Disp;
5958 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
5959 u32EffAddr += i8Disp;
5960 break;
5961 }
5962 case 2:
5963 {
5964 uint32_t u32Disp;
5965 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
5966 u32EffAddr += u32Disp;
5967 break;
5968 }
5969 default:
5970 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5971 }
5972
5973 }
5974 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
5975 *pGCPtrEff = u32EffAddr;
5976 else
5977 {
5978 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
5979 *pGCPtrEff = u32EffAddr & UINT16_MAX;
5980 }
5981 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5982 return VINF_SUCCESS;
5983 }
5984
5985 case IEMMODE_64BIT:
5986 {
5987 uint64_t u64EffAddr;
5988
5989 /* Handle the rip+disp32 form with no registers first. */
5990 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5991 {
5992 IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64EffAddr);
5993 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
5994 }
5995 else
5996 {
5997 /* Get the register (or SIB) value. */
5998 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
5999 {
6000 case 0: u64EffAddr = pCtx->rax; break;
6001 case 1: u64EffAddr = pCtx->rcx; break;
6002 case 2: u64EffAddr = pCtx->rdx; break;
6003 case 3: u64EffAddr = pCtx->rbx; break;
6004 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
6005 case 6: u64EffAddr = pCtx->rsi; break;
6006 case 7: u64EffAddr = pCtx->rdi; break;
6007 case 8: u64EffAddr = pCtx->r8; break;
6008 case 9: u64EffAddr = pCtx->r9; break;
6009 case 10: u64EffAddr = pCtx->r10; break;
6010 case 11: u64EffAddr = pCtx->r11; break;
6011 case 13: u64EffAddr = pCtx->r13; break;
6012 case 14: u64EffAddr = pCtx->r14; break;
6013 case 15: u64EffAddr = pCtx->r15; break;
6014 /* SIB */
6015 case 4:
6016 case 12:
6017 {
6018 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
6019
6020 /* Get the index and scale it. */
6021 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
6022 {
6023 case 0: u64EffAddr = pCtx->rax; break;
6024 case 1: u64EffAddr = pCtx->rcx; break;
6025 case 2: u64EffAddr = pCtx->rdx; break;
6026 case 3: u64EffAddr = pCtx->rbx; break;
6027 case 4: u64EffAddr = 0; /*none */ break;
6028 case 5: u64EffAddr = pCtx->rbp; break;
6029 case 6: u64EffAddr = pCtx->rsi; break;
6030 case 7: u64EffAddr = pCtx->rdi; break;
6031 case 8: u64EffAddr = pCtx->r8; break;
6032 case 9: u64EffAddr = pCtx->r9; break;
6033 case 10: u64EffAddr = pCtx->r10; break;
6034 case 11: u64EffAddr = pCtx->r11; break;
6035 case 12: u64EffAddr = pCtx->r12; break;
6036 case 13: u64EffAddr = pCtx->r13; break;
6037 case 14: u64EffAddr = pCtx->r14; break;
6038 case 15: u64EffAddr = pCtx->r15; break;
6039 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6040 }
6041 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6042
6043 /* add base */
6044 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
6045 {
6046 case 0: u64EffAddr += pCtx->rax; break;
6047 case 1: u64EffAddr += pCtx->rcx; break;
6048 case 2: u64EffAddr += pCtx->rdx; break;
6049 case 3: u64EffAddr += pCtx->rbx; break;
6050 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
6051 case 6: u64EffAddr += pCtx->rsi; break;
6052 case 7: u64EffAddr += pCtx->rdi; break;
6053 case 8: u64EffAddr += pCtx->r8; break;
6054 case 9: u64EffAddr += pCtx->r9; break;
6055 case 10: u64EffAddr += pCtx->r10; break;
6056 case 11: u64EffAddr += pCtx->r11; break;
6057 case 14: u64EffAddr += pCtx->r14; break;
6058 case 15: u64EffAddr += pCtx->r15; break;
6059 /* complicated encodings */
6060 case 5:
6061 case 13:
6062 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6063 {
6064 if (!pIemCpu->uRexB)
6065 {
6066 u64EffAddr += pCtx->rbp;
6067 SET_SS_DEF();
6068 }
6069 else
6070 u64EffAddr += pCtx->r13;
6071 }
6072 else
6073 {
6074 uint32_t u32Disp;
6075 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
6076 u64EffAddr += (int32_t)u32Disp;
6077 }
6078 break;
6079 }
6080 break;
6081 }
6082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6083 }
6084
6085 /* Get and add the displacement. */
6086 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6087 {
6088 case 0:
6089 break;
6090 case 1:
6091 {
6092 int8_t i8Disp;
6093 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
6094 u64EffAddr += i8Disp;
6095 break;
6096 }
6097 case 2:
6098 {
6099 uint32_t u32Disp;
6100 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
6101 u64EffAddr += (int32_t)u32Disp;
6102 break;
6103 }
6104 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
6105 }
6106
6107 }
6108 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
6109 *pGCPtrEff = u64EffAddr;
6110 else
6111 *pGCPtrEff = u64EffAddr & UINT16_MAX;
6112 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6113 return VINF_SUCCESS;
6114 }
6115 }
6116
6117 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6118}
6119
6120/** @} */
6121
6122
6123
6124/*
6125 * Include the instructions
6126 */
6127#include "IEMAllInstructions.cpp.h"
6128
6129
6130
6131
6132#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6133
6134/**
6135 * Sets up execution verification mode.
6136 */
6137static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
6138{
6139 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
6140
6141# ifndef IEM_VERIFICATION_MODE_NO_REM
6142 /*
6143 * Switch state.
6144 */
6145 static CPUMCTX s_DebugCtx; /* Ugly! */
6146
6147 s_DebugCtx = *pOrgCtx;
6148 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
6149# endif
6150
6151 /*
6152 * See if there is an interrupt pending in TRPM and inject it if we can.
6153 */
6154 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6155 if ( pOrgCtx->eflags.Bits.u1IF
6156 && TRPMHasTrap(pVCpu)
6157 //&& TRPMIsSoftwareInterrupt(pVCpu)
6158 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6159 {
6160 Log(("Injecting trap %#x\n", TRPMGetTrapNo(pVCpu)));
6161 iemCImpl_int(pIemCpu, 0, TRPMGetTrapNo(pVCpu), false);
6162 }
6163
6164 /*
6165 * Reset the counters.
6166 */
6167 pIemCpu->cIOReads = 0;
6168 pIemCpu->cIOWrites = 0;
6169 pIemCpu->fMulDivHack = false;
6170 pIemCpu->fShlHack = false;
6171
6172 /*
6173 * Free all verification records.
6174 */
6175 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
6176 pIemCpu->pIemEvtRecHead = NULL;
6177 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
6178 do
6179 {
6180 while (pEvtRec)
6181 {
6182 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
6183 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
6184 pIemCpu->pFreeEvtRec = pEvtRec;
6185 pEvtRec = pNext;
6186 }
6187 pEvtRec = pIemCpu->pOtherEvtRecHead;
6188 pIemCpu->pOtherEvtRecHead = NULL;
6189 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
6190 } while (pEvtRec);
6191}
6192
6193
6194# ifndef IEM_VERIFICATION_MODE_NO_REM
6195/**
6196 * Allocate an event record.
6197 * @returns Poitner to a record.
6198 */
6199static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
6200{
6201 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
6202 if (pEvtRec)
6203 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
6204 else
6205 {
6206 if (!pIemCpu->ppIemEvtRecNext)
6207 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
6208
6209 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
6210 if (!pEvtRec)
6211 return NULL;
6212 }
6213 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
6214 pEvtRec->pNext = NULL;
6215 return pEvtRec;
6216}
6217# endif
6218
6219
6220/**
6221 * IOMMMIORead notification.
6222 */
6223VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
6224{
6225# ifndef IEM_VERIFICATION_MODE_NO_REM
6226 PVMCPU pVCpu = VMMGetCpu(pVM);
6227 if (!pVCpu)
6228 return;
6229 PIEMCPU pIemCpu = &pVCpu->iem.s;
6230 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6231 if (!pEvtRec)
6232 return;
6233 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6234 pEvtRec->u.RamRead.GCPhys = GCPhys;
6235 pEvtRec->u.RamRead.cb = cbValue;
6236 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6237 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6238# endif
6239}
6240
6241
6242/**
6243 * IOMMMIOWrite notification.
6244 */
6245VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
6246{
6247# ifndef IEM_VERIFICATION_MODE_NO_REM
6248 PVMCPU pVCpu = VMMGetCpu(pVM);
6249 if (!pVCpu)
6250 return;
6251 PIEMCPU pIemCpu = &pVCpu->iem.s;
6252 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6253 if (!pEvtRec)
6254 return;
6255 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6256 pEvtRec->u.RamWrite.GCPhys = GCPhys;
6257 pEvtRec->u.RamWrite.cb = cbValue;
6258 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
6259 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
6260 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
6261 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
6262 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6263 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6264# endif
6265}
6266
6267
6268/**
6269 * IOMIOPortRead notification.
6270 */
6271VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
6272{
6273# ifndef IEM_VERIFICATION_MODE_NO_REM
6274 PVMCPU pVCpu = VMMGetCpu(pVM);
6275 if (!pVCpu)
6276 return;
6277 PIEMCPU pIemCpu = &pVCpu->iem.s;
6278 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6279 if (!pEvtRec)
6280 return;
6281 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6282 pEvtRec->u.IOPortRead.Port = Port;
6283 pEvtRec->u.IOPortRead.cbValue = cbValue;
6284 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6285 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6286# endif
6287}
6288
6289/**
6290 * IOMIOPortWrite notification.
6291 */
6292VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6293{
6294# ifndef IEM_VERIFICATION_MODE_NO_REM
6295 PVMCPU pVCpu = VMMGetCpu(pVM);
6296 if (!pVCpu)
6297 return;
6298 PIEMCPU pIemCpu = &pVCpu->iem.s;
6299 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6300 if (!pEvtRec)
6301 return;
6302 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6303 pEvtRec->u.IOPortWrite.Port = Port;
6304 pEvtRec->u.IOPortWrite.cbValue = cbValue;
6305 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6306 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6307 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6308# endif
6309}
6310
6311
6312VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
6313{
6314 AssertFailed();
6315}
6316
6317
6318VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
6319{
6320 AssertFailed();
6321}
6322
6323# ifndef IEM_VERIFICATION_MODE_NO_REM
6324
6325/**
6326 * Fakes and records an I/O port read.
6327 *
6328 * @returns VINF_SUCCESS.
6329 * @param pIemCpu The IEM per CPU data.
6330 * @param Port The I/O port.
6331 * @param pu32Value Where to store the fake value.
6332 * @param cbValue The size of the access.
6333 */
6334static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6335{
6336 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6337 if (pEvtRec)
6338 {
6339 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6340 pEvtRec->u.IOPortRead.Port = Port;
6341 pEvtRec->u.IOPortRead.cbValue = cbValue;
6342 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6343 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6344 }
6345 pIemCpu->cIOReads++;
6346 *pu32Value = 0xffffffff;
6347 return VINF_SUCCESS;
6348}
6349
6350
6351/**
6352 * Fakes and records an I/O port write.
6353 *
6354 * @returns VINF_SUCCESS.
6355 * @param pIemCpu The IEM per CPU data.
6356 * @param Port The I/O port.
6357 * @param u32Value The value being written.
6358 * @param cbValue The size of the access.
6359 */
6360static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6361{
6362 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6363 if (pEvtRec)
6364 {
6365 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6366 pEvtRec->u.IOPortWrite.Port = Port;
6367 pEvtRec->u.IOPortWrite.cbValue = cbValue;
6368 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6369 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6370 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6371 }
6372 pIemCpu->cIOWrites++;
6373 return VINF_SUCCESS;
6374}
6375
6376
6377/**
6378 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
6379 * dump to the assertion info.
6380 *
6381 * @param pEvtRec The record to dump.
6382 */
6383static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
6384{
6385 switch (pEvtRec->enmEvent)
6386 {
6387 case IEMVERIFYEVENT_IOPORT_READ:
6388 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
6389 pEvtRec->u.IOPortWrite.Port,
6390 pEvtRec->u.IOPortWrite.cbValue);
6391 break;
6392 case IEMVERIFYEVENT_IOPORT_WRITE:
6393 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
6394 pEvtRec->u.IOPortWrite.Port,
6395 pEvtRec->u.IOPortWrite.cbValue,
6396 pEvtRec->u.IOPortWrite.u32Value);
6397 break;
6398 case IEMVERIFYEVENT_RAM_READ:
6399 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
6400 pEvtRec->u.RamRead.GCPhys,
6401 pEvtRec->u.RamRead.cb);
6402 break;
6403 case IEMVERIFYEVENT_RAM_WRITE:
6404 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
6405 pEvtRec->u.RamWrite.GCPhys,
6406 pEvtRec->u.RamWrite.cb,
6407 (int)pEvtRec->u.RamWrite.cb,
6408 pEvtRec->u.RamWrite.ab);
6409 break;
6410 default:
6411 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
6412 break;
6413 }
6414}
6415
6416
6417/**
6418 * Raises an assertion on the specified record, showing the given message with
6419 * a record dump attached.
6420 *
6421 * @param pEvtRec1 The first record.
6422 * @param pEvtRec2 The second record.
6423 * @param pszMsg The message explaining why we're asserting.
6424 */
6425static void iemVerifyAssertRecords(PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
6426{
6427 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6428 iemVerifyAssertAddRecordDump(pEvtRec1);
6429 iemVerifyAssertAddRecordDump(pEvtRec2);
6430 RTAssertPanic();
6431}
6432
6433
6434/**
6435 * Raises an assertion on the specified record, showing the given message with
6436 * a record dump attached.
6437 *
6438 * @param pEvtRec1 The first record.
6439 * @param pszMsg The message explaining why we're asserting.
6440 */
6441static void iemVerifyAssertRecord(PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
6442{
6443 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6444 iemVerifyAssertAddRecordDump(pEvtRec);
6445 RTAssertPanic();
6446}
6447
6448
6449/**
6450 * Verifies a write record.
6451 *
6452 * @param pIemCpu The IEM per CPU data.
6453 * @param pEvtRec The write record.
6454 */
6455static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
6456{
6457 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
6458 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
6459 if ( RT_FAILURE(rc)
6460 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
6461 {
6462 /* fend off ins */
6463 if ( !pIemCpu->cIOReads
6464 || pEvtRec->u.RamWrite.ab[0] != 0xcc
6465 || ( pEvtRec->u.RamWrite.cb != 1
6466 && pEvtRec->u.RamWrite.cb != 2
6467 && pEvtRec->u.RamWrite.cb != 4) )
6468 {
6469 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6470 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
6471 RTAssertMsg2Add("REM: %.*Rhxs\n"
6472 "IEM: %.*Rhxs\n",
6473 pEvtRec->u.RamWrite.cb, abBuf,
6474 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
6475 iemVerifyAssertAddRecordDump(pEvtRec);
6476 RTAssertPanic();
6477 }
6478 }
6479
6480}
6481
6482# endif /* !IEM_VERIFICATION_MODE_NO_REM */
6483
6484/**
6485 * Performs the post-execution verfication checks.
6486 */
6487static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
6488{
6489# if defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_NO_REM)
6490 /*
6491 * Switch back the state.
6492 */
6493 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
6494 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
6495 Assert(pOrgCtx != pDebugCtx);
6496 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6497
6498 /*
6499 * Execute the instruction in REM.
6500 */
6501 int rc = REMR3EmulateInstruction(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu));
6502 AssertRC(rc);
6503
6504 /*
6505 * Compare the register states.
6506 */
6507 unsigned cDiffs = 0;
6508 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
6509 {
6510 Log(("REM and IEM ends up with different registers!\n"));
6511
6512# define CHECK_FIELD(a_Field) \
6513 do \
6514 { \
6515 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6516 { \
6517 switch (sizeof(pOrgCtx->a_Field)) \
6518 { \
6519 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6520 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6521 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6522 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6523 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
6524 } \
6525 cDiffs++; \
6526 } \
6527 } while (0)
6528
6529# define CHECK_BIT_FIELD(a_Field) \
6530 do \
6531 { \
6532 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6533 { \
6534 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
6535 cDiffs++; \
6536 } \
6537 } while (0)
6538
6539 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
6540 {
6541 if (pIemCpu->cInstructions != 1)
6542 {
6543 RTAssertMsg2Weak(" the FPU state differs\n");
6544 cDiffs++;
6545 }
6546 else
6547 RTAssertMsg2Weak(" the FPU state differs - happens the first time...\n");
6548 }
6549 CHECK_FIELD(rip);
6550 uint32_t fFlagsMask = UINT32_MAX;
6551 if (pIemCpu->fMulDivHack)
6552 fFlagsMask &= ~(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
6553 if (pIemCpu->fShlHack)
6554 fFlagsMask &= ~(X86_EFL_OF);
6555 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
6556 {
6557 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
6558 CHECK_BIT_FIELD(rflags.Bits.u1CF);
6559 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
6560 CHECK_BIT_FIELD(rflags.Bits.u1PF);
6561 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
6562 CHECK_BIT_FIELD(rflags.Bits.u1AF);
6563 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
6564 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
6565 CHECK_BIT_FIELD(rflags.Bits.u1SF);
6566 CHECK_BIT_FIELD(rflags.Bits.u1TF);
6567 CHECK_BIT_FIELD(rflags.Bits.u1IF);
6568 CHECK_BIT_FIELD(rflags.Bits.u1DF);
6569 CHECK_BIT_FIELD(rflags.Bits.u1OF);
6570 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
6571 CHECK_BIT_FIELD(rflags.Bits.u1NT);
6572 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
6573 CHECK_BIT_FIELD(rflags.Bits.u1RF);
6574 CHECK_BIT_FIELD(rflags.Bits.u1VM);
6575 CHECK_BIT_FIELD(rflags.Bits.u1AC);
6576 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
6577 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
6578 CHECK_BIT_FIELD(rflags.Bits.u1ID);
6579 }
6580
6581 if (pIemCpu->cIOReads != 1)
6582 CHECK_FIELD(rax);
6583 CHECK_FIELD(rcx);
6584 CHECK_FIELD(rdx);
6585 CHECK_FIELD(rbx);
6586 CHECK_FIELD(rsp);
6587 CHECK_FIELD(rbp);
6588 CHECK_FIELD(rsi);
6589 CHECK_FIELD(rdi);
6590 CHECK_FIELD(r8);
6591 CHECK_FIELD(r9);
6592 CHECK_FIELD(r10);
6593 CHECK_FIELD(r11);
6594 CHECK_FIELD(r12);
6595 CHECK_FIELD(r13);
6596 CHECK_FIELD(cs);
6597 CHECK_FIELD(csHid.u64Base);
6598 CHECK_FIELD(csHid.u32Limit);
6599 CHECK_FIELD(csHid.Attr.u);
6600 CHECK_FIELD(ss);
6601 CHECK_FIELD(ssHid.u64Base);
6602 CHECK_FIELD(ssHid.u32Limit);
6603 CHECK_FIELD(ssHid.Attr.u);
6604 CHECK_FIELD(ds);
6605 CHECK_FIELD(dsHid.u64Base);
6606 CHECK_FIELD(dsHid.u32Limit);
6607 CHECK_FIELD(dsHid.Attr.u);
6608 CHECK_FIELD(es);
6609 CHECK_FIELD(esHid.u64Base);
6610 CHECK_FIELD(esHid.u32Limit);
6611 CHECK_FIELD(esHid.Attr.u);
6612 CHECK_FIELD(fs);
6613 CHECK_FIELD(fsHid.u64Base);
6614 CHECK_FIELD(fsHid.u32Limit);
6615 CHECK_FIELD(fsHid.Attr.u);
6616 CHECK_FIELD(gs);
6617 CHECK_FIELD(gsHid.u64Base);
6618 CHECK_FIELD(gsHid.u32Limit);
6619 CHECK_FIELD(gsHid.Attr.u);
6620 CHECK_FIELD(cr0);
6621 CHECK_FIELD(cr2);
6622 CHECK_FIELD(cr3);
6623 CHECK_FIELD(cr4);
6624 CHECK_FIELD(dr[0]);
6625 CHECK_FIELD(dr[1]);
6626 CHECK_FIELD(dr[2]);
6627 CHECK_FIELD(dr[3]);
6628 CHECK_FIELD(dr[6]);
6629 CHECK_FIELD(dr[7]);
6630 CHECK_FIELD(gdtr.cbGdt);
6631 CHECK_FIELD(gdtr.pGdt);
6632 CHECK_FIELD(idtr.cbIdt);
6633 CHECK_FIELD(idtr.pIdt);
6634 CHECK_FIELD(ldtr);
6635 CHECK_FIELD(ldtrHid.u64Base);
6636 CHECK_FIELD(ldtrHid.u32Limit);
6637 CHECK_FIELD(ldtrHid.Attr.u);
6638 CHECK_FIELD(tr);
6639 CHECK_FIELD(trHid.u64Base);
6640 CHECK_FIELD(trHid.u32Limit);
6641 CHECK_FIELD(trHid.Attr.u);
6642 CHECK_FIELD(SysEnter.cs);
6643 CHECK_FIELD(SysEnter.eip);
6644 CHECK_FIELD(SysEnter.esp);
6645 CHECK_FIELD(msrEFER);
6646 CHECK_FIELD(msrSTAR);
6647 CHECK_FIELD(msrPAT);
6648 CHECK_FIELD(msrLSTAR);
6649 CHECK_FIELD(msrCSTAR);
6650 CHECK_FIELD(msrSFMASK);
6651 CHECK_FIELD(msrKERNELGSBASE);
6652
6653 if (cDiffs != 0)
6654 AssertFailed();
6655# undef CHECK_FIELD
6656# undef CHECK_BIT_FIELD
6657 }
6658
6659 /*
6660 * If the register state compared fine, check the verification event
6661 * records.
6662 */
6663 if (cDiffs == 0)
6664 {
6665 /*
6666 * Compare verficiation event records.
6667 * - I/O port accesses should be a 1:1 match.
6668 */
6669 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
6670 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
6671 while (pIemRec && pOtherRec)
6672 {
6673 /* Since we might miss RAM writes and reads, ignore reads and check
6674 that any written memory is the same extra ones. */
6675 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
6676 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
6677 && pIemRec->pNext)
6678 {
6679 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6680 iemVerifyWriteRecord(pIemCpu, pIemRec);
6681 pIemRec = pIemRec->pNext;
6682 }
6683
6684 /* Do the compare. */
6685 if (pIemRec->enmEvent != pOtherRec->enmEvent)
6686 {
6687 iemVerifyAssertRecords(pIemRec, pOtherRec, "Type mismatches");
6688 break;
6689 }
6690 bool fEquals;
6691 switch (pIemRec->enmEvent)
6692 {
6693 case IEMVERIFYEVENT_IOPORT_READ:
6694 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
6695 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
6696 break;
6697 case IEMVERIFYEVENT_IOPORT_WRITE:
6698 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
6699 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
6700 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
6701 break;
6702 case IEMVERIFYEVENT_RAM_READ:
6703 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
6704 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
6705 break;
6706 case IEMVERIFYEVENT_RAM_WRITE:
6707 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
6708 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
6709 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
6710 break;
6711 default:
6712 fEquals = false;
6713 break;
6714 }
6715 if (!fEquals)
6716 {
6717 iemVerifyAssertRecords(pIemRec, pOtherRec, "Mismatch");
6718 break;
6719 }
6720
6721 /* advance */
6722 pIemRec = pIemRec->pNext;
6723 pOtherRec = pOtherRec->pNext;
6724 }
6725
6726 /* Ignore extra writes and reads. */
6727 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
6728 {
6729 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6730 iemVerifyWriteRecord(pIemCpu, pIemRec);
6731 pIemRec = pIemRec->pNext;
6732 }
6733 if (pIemRec != NULL)
6734 iemVerifyAssertRecord(pIemRec, "Extra IEM record!");
6735 else if (pOtherRec != NULL)
6736 iemVerifyAssertRecord(pIemRec, "Extra Other record!");
6737 }
6738 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6739# endif
6740}
6741
6742#endif /* IEM_VERIFICATION_MODE && IN_RING3 */
6743
6744
6745/**
6746 * Execute one instruction.
6747 *
6748 * @return Strict VBox status code.
6749 * @param pVCpu The current virtual CPU.
6750 */
6751VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6752{
6753 PIEMCPU pIemCpu = &pVCpu->iem.s;
6754
6755#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6756 iemExecVerificationModeSetup(pIemCpu);
6757#endif
6758#ifdef DEBUG
6759 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6760 char szInstr[256];
6761 uint32_t cbInstr = 0;
6762 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6763 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6764 szInstr, sizeof(szInstr), &cbInstr);
6765
6766 Log2(("**** "
6767 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6768 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6769 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6770 " %s\n"
6771 ,
6772 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6773 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6774 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6775 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6776 szInstr));
6777#endif
6778
6779 /*
6780 * Do the decoding and emulation.
6781 */
6782 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6783 if (rcStrict != VINF_SUCCESS)
6784 return rcStrict;
6785
6786 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
6787 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6788 if (rcStrict == VINF_SUCCESS)
6789 pIemCpu->cInstructions++;
6790//#ifdef DEBUG
6791// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
6792//#endif
6793
6794 /* Execute the next instruction as well if a cli, pop ss or
6795 mov ss, Gr has just completed successfully. */
6796 if ( rcStrict == VINF_SUCCESS
6797 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6798 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
6799 {
6800 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6801 if (rcStrict == VINF_SUCCESS)
6802 {
6803 b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
6804 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6805 if (rcStrict == VINF_SUCCESS)
6806 pIemCpu->cInstructions++;
6807 }
6808 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
6809 }
6810
6811 /*
6812 * Assert some sanity.
6813 */
6814#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6815 iemExecVerificationModeCheck(pIemCpu);
6816#endif
6817 return rcStrict;
6818}
6819
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette