VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 36768

Last change on this file since 36768 was 36768, checked in by vboxsync, 14 years ago

IEM: Initial commit, work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 207.5 KB
Line 
1/* $Id: IEMAll.cpp 36768 2011-04-20 18:33:29Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_EM /** @todo add log group */
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/dbgf.h>
53#ifdef IEM_VERIFICATION_MODE
54# include <VBox/vmm/rem.h>
55#endif
56#include "IEMInternal.h"
57#include <VBox/vmm/vm.h>
58#include <VBox/log.h>
59#include <VBox/err.h>
60#include <VBox/param.h>
61#include <VBox/x86.h>
62#include <iprt/assert.h>
63#include <iprt/string.h>
64
65
66/*******************************************************************************
67* Structures and Typedefs *
68*******************************************************************************/
69/** @typedef PFNIEMOP
70 * Pointer to an opcode decoder function.
71 */
72
73/** @def FNIEMOP_DEF
74 * Define an opcode decoder function.
75 *
76 * We're using macors for this so that adding and removing parameters as well as
77 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
78 *
79 * @param a_Name The function name.
80 */
81
82
83#if defined(__GNUC__) && defined(RT_ARCH_X86)
84typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
85# define FNIEMOP_DEF(a_Name) \
86 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
87# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
88 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
89# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
90 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
91
92#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
93typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
94# define FNIEMOP_PROTO(a_Name) \
95 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
96# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
97 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
98# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
99 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
100
101#else
102typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
103# define FNIEMOP_DEF(a_Name) \
104 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
105# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
106 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
107# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
108 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
109
110#endif
111
112
113/**
114 * Function table for a binary operator providing implementation based on
115 * operand size.
116 */
117typedef struct IEMOPBINSIZES
118{
119 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
120 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
121 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
122 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
123} IEMOPBINSIZES;
124/** Pointer to a binary operator function table. */
125typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
126
127
128/**
129 * Function table for a unary operator providing implementation based on
130 * operand size.
131 */
132typedef struct IEMOPUNARYSIZES
133{
134 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
135 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
136 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
137 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
138} IEMOPUNARYSIZES;
139/** Pointer to a unary operator function table. */
140typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
141
142
143/**
144 * Function table for a shift operator providing implementation based on
145 * operand size.
146 */
147typedef struct IEMOPSHIFTSIZES
148{
149 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
150 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
151 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
152 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
153} IEMOPSHIFTSIZES;
154/** Pointer to a shift operator function table. */
155typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
156
157
158/**
159 * Function table for a multiplication or division operation.
160 */
161typedef struct IEMOPMULDIVSIZES
162{
163 PFNIEMAIMPLMULDIVU8 pfnU8;
164 PFNIEMAIMPLMULDIVU16 pfnU16;
165 PFNIEMAIMPLMULDIVU32 pfnU32;
166 PFNIEMAIMPLMULDIVU64 pfnU64;
167} IEMOPMULDIVSIZES;
168/** Pointer to a multiplication or division operation function table. */
169typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
170
171
172/**
173 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
174 */
175typedef union IEMSELDESC
176{
177 /** The legacy view. */
178 X86DESC Legacy;
179 /** The long mode view. */
180 X86DESC64 Long;
181} IEMSELDESC;
182/** Pointer to a selector descriptor table entry. */
183typedef IEMSELDESC *PIEMSELDESC;
184
185
186/*******************************************************************************
187* Defined Constants And Macros *
188*******************************************************************************/
189/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
190 * due to GCC lacking knowledge about the value range of a switch. */
191#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_INTERNAL_ERROR_4)
192
193/**
194 * Call an opcode decoder function.
195 *
196 * We're using macors for this so that adding and removing parameters can be
197 * done as we please. See FNIEMOP_DEF.
198 */
199#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
200
201/**
202 * Call a common opcode decoder function taking one extra argument.
203 *
204 * We're using macors for this so that adding and removing parameters can be
205 * done as we please. See FNIEMOP_DEF_1.
206 */
207#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
208
209/**
210 * Call a common opcode decoder function taking one extra argument.
211 *
212 * We're using macors for this so that adding and removing parameters can be
213 * done as we please. See FNIEMOP_DEF_1.
214 */
215#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
216
217/**
218 * Check if we're currently executing in real or virtual 8086 mode.
219 *
220 * @returns @c true if it is, @c false if not.
221 * @param a_pIemCpu The IEM state of the current CPU.
222 */
223#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
224
225/**
226 * Check if we're currently executing in long mode.
227 *
228 * @returns @c true if it is, @c false if not.
229 * @param a_pIemCpu The IEM state of the current CPU.
230 */
231#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
232
233/**
234 * Check if we're currently executing in real mode.
235 *
236 * @returns @c true if it is, @c false if not.
237 * @param a_pIemCpu The IEM state of the current CPU.
238 */
239#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
240
241/**
242 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
243 */
244#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
245
246/**
247 * Check if the address is canonical.
248 */
249#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
250
251
252/*******************************************************************************
253* Global Variables *
254*******************************************************************************/
255extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
256
257
258/** Function table for the ADD instruction. */
259static const IEMOPBINSIZES g_iemAImpl_add =
260{
261 iemAImpl_add_u8, iemAImpl_add_u8_locked,
262 iemAImpl_add_u16, iemAImpl_add_u16_locked,
263 iemAImpl_add_u32, iemAImpl_add_u32_locked,
264 iemAImpl_add_u64, iemAImpl_add_u64_locked
265};
266
267/** Function table for the ADC instruction. */
268static const IEMOPBINSIZES g_iemAImpl_adc =
269{
270 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
271 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
272 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
273 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
274};
275
276/** Function table for the SUB instruction. */
277static const IEMOPBINSIZES g_iemAImpl_sub =
278{
279 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
280 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
281 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
282 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
283};
284
285/** Function table for the SBB instruction. */
286static const IEMOPBINSIZES g_iemAImpl_sbb =
287{
288 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
289 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
290 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
291 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
292};
293
294/** Function table for the OR instruction. */
295static const IEMOPBINSIZES g_iemAImpl_or =
296{
297 iemAImpl_or_u8, iemAImpl_or_u8_locked,
298 iemAImpl_or_u16, iemAImpl_or_u16_locked,
299 iemAImpl_or_u32, iemAImpl_or_u32_locked,
300 iemAImpl_or_u64, iemAImpl_or_u64_locked
301};
302
303/** Function table for the XOR instruction. */
304static const IEMOPBINSIZES g_iemAImpl_xor =
305{
306 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
307 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
308 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
309 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
310};
311
312/** Function table for the AND instruction. */
313static const IEMOPBINSIZES g_iemAImpl_and =
314{
315 iemAImpl_and_u8, iemAImpl_and_u8_locked,
316 iemAImpl_and_u16, iemAImpl_and_u16_locked,
317 iemAImpl_and_u32, iemAImpl_and_u32_locked,
318 iemAImpl_and_u64, iemAImpl_and_u64_locked
319};
320
321/** Function table for the CMP instruction.
322 * @remarks Making operand order ASSUMPTIONS.
323 */
324static const IEMOPBINSIZES g_iemAImpl_cmp =
325{
326 iemAImpl_cmp_u8, NULL,
327 iemAImpl_cmp_u16, NULL,
328 iemAImpl_cmp_u32, NULL,
329 iemAImpl_cmp_u64, NULL
330};
331
332/** Function table for the TEST instruction.
333 * @remarks Making operand order ASSUMPTIONS.
334 */
335static const IEMOPBINSIZES g_iemAImpl_test =
336{
337 iemAImpl_test_u8, NULL,
338 iemAImpl_test_u16, NULL,
339 iemAImpl_test_u32, NULL,
340 iemAImpl_test_u64, NULL
341};
342
343/** Group 1 /r lookup table. */
344static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
345{
346 &g_iemAImpl_add,
347 &g_iemAImpl_or,
348 &g_iemAImpl_adc,
349 &g_iemAImpl_sbb,
350 &g_iemAImpl_and,
351 &g_iemAImpl_sub,
352 &g_iemAImpl_xor,
353 &g_iemAImpl_cmp
354};
355
356/** Function table for the INC instruction. */
357static const IEMOPUNARYSIZES g_iemAImpl_inc =
358{
359 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
360 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
361 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
362 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
363};
364
365/** Function table for the DEC instruction. */
366static const IEMOPUNARYSIZES g_iemAImpl_dec =
367{
368 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
369 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
370 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
371 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
372};
373
374/** Function table for the NEG instruction. */
375static const IEMOPUNARYSIZES g_iemAImpl_neg =
376{
377 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
378 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
379 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
380 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
381};
382
383/** Function table for the NOT instruction. */
384static const IEMOPUNARYSIZES g_iemAImpl_not =
385{
386 iemAImpl_not_u8, iemAImpl_not_u8_locked,
387 iemAImpl_not_u16, iemAImpl_not_u16_locked,
388 iemAImpl_not_u32, iemAImpl_not_u32_locked,
389 iemAImpl_not_u64, iemAImpl_not_u64_locked
390};
391
392
393/** Function table for the ROL instruction. */
394static const IEMOPSHIFTSIZES g_iemAImpl_rol =
395{
396 iemAImpl_rol_u8,
397 iemAImpl_rol_u16,
398 iemAImpl_rol_u32,
399 iemAImpl_rol_u64
400};
401
402/** Function table for the ROR instruction. */
403static const IEMOPSHIFTSIZES g_iemAImpl_ror =
404{
405 iemAImpl_ror_u8,
406 iemAImpl_ror_u16,
407 iemAImpl_ror_u32,
408 iemAImpl_ror_u64
409};
410
411/** Function table for the RCL instruction. */
412static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
413{
414 iemAImpl_rcl_u8,
415 iemAImpl_rcl_u16,
416 iemAImpl_rcl_u32,
417 iemAImpl_rcl_u64
418};
419
420/** Function table for the RCR instruction. */
421static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
422{
423 iemAImpl_rcr_u8,
424 iemAImpl_rcr_u16,
425 iemAImpl_rcr_u32,
426 iemAImpl_rcr_u64
427};
428
429/** Function table for the SHL instruction. */
430static const IEMOPSHIFTSIZES g_iemAImpl_shl =
431{
432 iemAImpl_shl_u8,
433 iemAImpl_shl_u16,
434 iemAImpl_shl_u32,
435 iemAImpl_shl_u64
436};
437
438/** Function table for the SHR instruction. */
439static const IEMOPSHIFTSIZES g_iemAImpl_shr =
440{
441 iemAImpl_shr_u8,
442 iemAImpl_shr_u16,
443 iemAImpl_shr_u32,
444 iemAImpl_shr_u64
445};
446
447/** Function table for the SAR instruction. */
448static const IEMOPSHIFTSIZES g_iemAImpl_sar =
449{
450 iemAImpl_sar_u8,
451 iemAImpl_sar_u16,
452 iemAImpl_sar_u32,
453 iemAImpl_sar_u64
454};
455
456
457/** Function table for the MUL instruction. */
458static const IEMOPMULDIVSIZES g_iemAImpl_mul =
459{
460 iemAImpl_mul_u8,
461 iemAImpl_mul_u16,
462 iemAImpl_mul_u32,
463 iemAImpl_mul_u64
464};
465
466/** Function table for the IMUL instruction working implicitly on rAX. */
467static const IEMOPMULDIVSIZES g_iemAImpl_imul =
468{
469 iemAImpl_imul_u8,
470 iemAImpl_imul_u16,
471 iemAImpl_imul_u32,
472 iemAImpl_imul_u64
473};
474
475/** Function table for the DIV instruction. */
476static const IEMOPMULDIVSIZES g_iemAImpl_div =
477{
478 iemAImpl_div_u8,
479 iemAImpl_div_u16,
480 iemAImpl_div_u32,
481 iemAImpl_div_u64
482};
483
484/** Function table for the MUL instruction. */
485static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
486{
487 iemAImpl_idiv_u8,
488 iemAImpl_idiv_u16,
489 iemAImpl_idiv_u32,
490 iemAImpl_idiv_u64
491};
492
493
494/*******************************************************************************
495* Internal Functions *
496*******************************************************************************/
497static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
498static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
499static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
500static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
501static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
502
503
504/**
505 * Initializes the decoder state.
506 *
507 * @param pIemCpu The per CPU IEM state.
508 */
509DECLINLINE(void) iemInitDecode(PIEMCPU pIemCpu)
510{
511 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
512
513 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
514 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
515 ? IEMMODE_64BIT
516 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
517 ? IEMMODE_32BIT
518 : IEMMODE_16BIT;
519 pIemCpu->enmCpuMode = enmMode;
520 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
521 pIemCpu->enmEffAddrMode = enmMode;
522 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
523 pIemCpu->enmEffOpSize = enmMode;
524 pIemCpu->fPrefixes = 0;
525 pIemCpu->uRexReg = 0;
526 pIemCpu->uRexB = 0;
527 pIemCpu->uRexIndex = 0;
528 pIemCpu->iEffSeg = X86_SREG_DS;
529 pIemCpu->offOpcode = 0;
530 pIemCpu->cbOpcode = 0;
531 pIemCpu->cActiveMappings = 0;
532 pIemCpu->iNextMapping = 0;
533}
534
535
536/**
537 * Prefetch opcodes the first time when starting executing.
538 *
539 * @returns Strict VBox status code.
540 * @param pIemCpu The IEM state.
541 */
542static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
543{
544 iemInitDecode(pIemCpu);
545
546 /*
547 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
548 *
549 * First translate CS:rIP to a physical address.
550 */
551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
552 uint32_t cbToTryRead;
553 RTGCPTR GCPtrPC;
554 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
555 {
556 cbToTryRead = PAGE_SIZE;
557 GCPtrPC = pCtx->rip;
558 if (!IEM_IS_CANONICAL(GCPtrPC))
559 return iemRaiseGeneralProtectionFault0(pIemCpu);
560 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
561 }
562 else
563 {
564 uint32_t GCPtrPC32 = pCtx->eip;
565 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
566 if (GCPtrPC32 > pCtx->csHid.u32Limit)
567 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
568 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
569 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
570 }
571
572 RTGCPHYS GCPhys;
573 uint64_t fFlags;
574 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
575 if (RT_FAILURE(rc))
576 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
577 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
578 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
579 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
580 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
581 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
582 /** @todo Check reserved bits and such stuff. PGM is better at doing
583 * that, so do it when implementing the guest virtual address
584 * TLB... */
585
586 /*
587 * Read the bytes at this address.
588 */
589 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
590 if (cbToTryRead > cbLeftOnPage)
591 cbToTryRead = cbLeftOnPage;
592 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
593 cbToTryRead = sizeof(pIemCpu->abOpcode);
594 if (!pIemCpu->fByPassHandlers)
595 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
596 else
597 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
598 if (rc != VINF_SUCCESS)
599 return rc;
600 pIemCpu->cbOpcode = cbToTryRead;
601
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
608 * exception if it fails.
609 *
610 * @returns Strict VBox status code.
611 * @param pIemCpu The IEM state.
612 * @param cbMin Where to return the opcode byte.
613 */
614static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
615{
616 /*
617 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
618 *
619 * First translate CS:rIP to a physical address.
620 */
621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
622 uint32_t cbToTryRead;
623 RTGCPTR GCPtrNext;
624 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
625 {
626 cbToTryRead = PAGE_SIZE;
627 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
628 if (!IEM_IS_CANONICAL(GCPtrNext))
629 return iemRaiseGeneralProtectionFault0(pIemCpu);
630 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
631 Assert(cbToTryRead >= cbMin); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
632 }
633 else
634 {
635 uint32_t GCPtrNext32 = pCtx->eip;
636 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
637 GCPtrNext32 += pIemCpu->cbOpcode;
638 if (GCPtrNext32 > pCtx->csHid.u32Limit)
639 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
640 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
641 if (cbToTryRead < cbMin)
642 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
643 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
644 }
645
646 RTGCPHYS GCPhys;
647 uint64_t fFlags;
648 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
649 if (RT_FAILURE(rc))
650 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
651 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
652 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
653 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
654 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
655 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
656 /** @todo Check reserved bits and such stuff. PGM is better at doing
657 * that, so do it when implementing the guest virtual address
658 * TLB... */
659
660 /*
661 * Read the bytes at this address.
662 */
663 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
664 if (cbToTryRead > cbLeftOnPage)
665 cbToTryRead = cbLeftOnPage;
666 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
667 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
668 if (!pIemCpu->fByPassHandlers)
669 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
670 else
671 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
672 if (rc != VINF_SUCCESS)
673 return rc;
674 pIemCpu->cbOpcode += cbToTryRead;
675
676 return VINF_SUCCESS;
677}
678
679
680/**
681 * Deals with the problematic cases that iemOpcodeGetNextByte doesn't like.
682 *
683 * @returns Strict VBox status code.
684 * @param pIemCpu The IEM state.
685 * @param pb Where to return the opcode byte.
686 */
687static VBOXSTRICTRC iemOpcodeGetNextByteSlow(PIEMCPU pIemCpu, uint8_t *pb)
688{
689 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
690 if (rcStrict == VINF_SUCCESS)
691 {
692 uint8_t offOpcode = pIemCpu->offOpcode;
693 *pb = pIemCpu->abOpcode[offOpcode];
694 pIemCpu->offOpcode = offOpcode + 1;
695 }
696 else
697 *pb = 0;
698 return rcStrict;
699}
700
701
702/**
703 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
704 *
705 * @returns Strict VBox status code.
706 * @param pIemCpu The IEM state.
707 * @param pu16 Where to return the opcode dword.
708 */
709static VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
710{
711 uint8_t u8;
712 VBOXSTRICTRC rcStrict = iemOpcodeGetNextByteSlow(pIemCpu, &u8);
713 if (rcStrict == VINF_SUCCESS)
714 *pu16 = (int8_t)u8;
715 return rcStrict;
716}
717
718
719/**
720 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
721 *
722 * @returns Strict VBox status code.
723 * @param pIemCpu The IEM state.
724 * @param pu16 Where to return the opcode word.
725 */
726static VBOXSTRICTRC iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
727{
728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
729 if (rcStrict == VINF_SUCCESS)
730 {
731 uint8_t offOpcode = pIemCpu->offOpcode;
732 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
733 pIemCpu->offOpcode = offOpcode + 2;
734 }
735 else
736 *pu16 = 0;
737 return rcStrict;
738}
739
740
741/**
742 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
743 *
744 * @returns Strict VBox status code.
745 * @param pIemCpu The IEM state.
746 * @param pu32 Where to return the opcode dword.
747 */
748static VBOXSTRICTRC iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
749{
750 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
751 if (rcStrict == VINF_SUCCESS)
752 {
753 uint8_t offOpcode = pIemCpu->offOpcode;
754 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
755 pIemCpu->abOpcode[offOpcode + 1],
756 pIemCpu->abOpcode[offOpcode + 2],
757 pIemCpu->abOpcode[offOpcode + 3]);
758 pIemCpu->offOpcode = offOpcode + 4;
759 }
760 else
761 *pu32 = 0;
762 return rcStrict;
763}
764
765
766/**
767 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
768 *
769 * @returns Strict VBox status code.
770 * @param pIemCpu The IEM state.
771 * @param pu64 Where to return the opcode qword.
772 */
773static VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
774{
775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
776 if (rcStrict == VINF_SUCCESS)
777 {
778 uint8_t offOpcode = pIemCpu->offOpcode;
779 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
780 pIemCpu->abOpcode[offOpcode + 1],
781 pIemCpu->abOpcode[offOpcode + 2],
782 pIemCpu->abOpcode[offOpcode + 3]);
783 pIemCpu->offOpcode = offOpcode + 4;
784 }
785 else
786 *pu64 = 0;
787 return rcStrict;
788}
789
790
791/**
792 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
793 *
794 * @returns Strict VBox status code.
795 * @param pIemCpu The IEM state.
796 * @param pu64 Where to return the opcode qword.
797 */
798static VBOXSTRICTRC iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
799{
800 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
801 if (rcStrict == VINF_SUCCESS)
802 {
803 uint8_t offOpcode = pIemCpu->offOpcode;
804 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
805 pIemCpu->abOpcode[offOpcode + 1],
806 pIemCpu->abOpcode[offOpcode + 2],
807 pIemCpu->abOpcode[offOpcode + 3],
808 pIemCpu->abOpcode[offOpcode + 4],
809 pIemCpu->abOpcode[offOpcode + 5],
810 pIemCpu->abOpcode[offOpcode + 6],
811 pIemCpu->abOpcode[offOpcode + 7]);
812 pIemCpu->offOpcode = offOpcode + 8;
813 }
814 else
815 *pu64 = 0;
816 return rcStrict;
817}
818
819
820/**
821 * Fetches the next opcode byte.
822 *
823 * @returns Strict VBox status code.
824 * @param pIemCpu The IEM state.
825 * @param pu8 Where to return the opcode byte.
826 */
827DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
828{
829 uint8_t const offOpcode = pIemCpu->offOpcode;
830 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
831 return iemOpcodeGetNextByteSlow(pIemCpu, pu8);
832
833 *pu8 = pIemCpu->abOpcode[offOpcode];
834 pIemCpu->offOpcode = offOpcode + 1;
835 return VINF_SUCCESS;
836}
837
838/**
839 * Fetches the next opcode byte, returns automatically on failure.
840 *
841 * @param pIemCpu The IEM state.
842 * @param a_pu8 Where to return the opcode byte.
843 */
844#define IEM_OPCODE_GET_NEXT_BYTE(a_pIemCpu, a_pu8) \
845 do \
846 { \
847 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8((a_pIemCpu), (a_pu8)); \
848 if (rcStrict2 != VINF_SUCCESS) \
849 return rcStrict2; \
850 } while (0)
851
852
853/**
854 * Fetches the next signed byte from the opcode stream.
855 *
856 * @returns Strict VBox status code.
857 * @param pIemCpu The IEM state.
858 * @param pi8 Where to return the signed byte.
859 */
860DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
861{
862 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
863}
864
865/**
866 * Fetches the next signed byte from the opcode stream, returning automatically
867 * on failure.
868 *
869 * @param pIemCpu The IEM state.
870 * @param pi8 Where to return the signed byte.
871 */
872#define IEM_OPCODE_GET_NEXT_S8(a_pIemCpu, a_pi8) \
873 do \
874 { \
875 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8((a_pIemCpu), (a_pi8)); \
876 if (rcStrict2 != VINF_SUCCESS) \
877 return rcStrict2; \
878 } while (0)
879
880
881/**
882 * Fetches the next signed byte from the opcode stream, extending it to
883 * unsigned 16-bit.
884 *
885 * @returns Strict VBox status code.
886 * @param pIemCpu The IEM state.
887 * @param pu16 Where to return the unsigned word.
888 */
889DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
890{
891 uint8_t const offOpcode = pIemCpu->offOpcode;
892 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
893 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
894
895 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
896 pIemCpu->offOpcode = offOpcode + 1;
897 return VINF_SUCCESS;
898}
899
900
901/**
902 * Fetches the next signed byte from the opcode stream and sign-extending it to
903 * a word, returning automatically on failure.
904 *
905 * @param pIemCpu The IEM state.
906 * @param pu16 Where to return the word.
907 */
908#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pIemCpu, a_pu16) \
909 do \
910 { \
911 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16((a_pIemCpu), (a_pu16)); \
912 if (rcStrict2 != VINF_SUCCESS) \
913 return rcStrict2; \
914 } while (0)
915
916
917/**
918 * Fetches the next opcode word.
919 *
920 * @returns Strict VBox status code.
921 * @param pIemCpu The IEM state.
922 * @param pu16 Where to return the opcode word.
923 */
924DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
925{
926 uint8_t const offOpcode = pIemCpu->offOpcode;
927 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
928 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
929
930 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
931 pIemCpu->offOpcode = offOpcode + 2;
932 return VINF_SUCCESS;
933}
934
935/**
936 * Fetches the next opcode word, returns automatically on failure.
937 *
938 * @param pIemCpu The IEM state.
939 * @param a_pu16 Where to return the opcode word.
940 */
941#define IEM_OPCODE_GET_NEXT_U16(a_pIemCpu, a_pu16) \
942 do \
943 { \
944 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16((a_pIemCpu), (a_pu16)); \
945 if (rcStrict2 != VINF_SUCCESS) \
946 return rcStrict2; \
947 } while (0)
948
949
950/**
951 * Fetches the next opcode dword.
952 *
953 * @returns Strict VBox status code.
954 * @param pIemCpu The IEM state.
955 * @param pu32 Where to return the opcode double word.
956 */
957DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
958{
959 uint8_t const offOpcode = pIemCpu->offOpcode;
960 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
961 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
962
963 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
964 pIemCpu->abOpcode[offOpcode + 1],
965 pIemCpu->abOpcode[offOpcode + 2],
966 pIemCpu->abOpcode[offOpcode + 3]);
967 pIemCpu->offOpcode = offOpcode + 4;
968 return VINF_SUCCESS;
969}
970
971/**
972 * Fetches the next opcode dword, returns automatically on failure.
973 *
974 * @param pIemCpu The IEM state.
975 * @param a_u32 Where to return the opcode dword.
976 */
977#define IEM_OPCODE_GET_NEXT_U32(a_pIemCpu, a_pu32) \
978 do \
979 { \
980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32((a_pIemCpu), (a_pu32)); \
981 if (rcStrict2 != VINF_SUCCESS) \
982 return rcStrict2; \
983 } while (0)
984
985
986/**
987 * Fetches the next opcode dword, sign extending it into a quad word.
988 *
989 * @returns Strict VBox status code.
990 * @param pIemCpu The IEM state.
991 * @param pu64 Where to return the opcode quad word.
992 */
993DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
994{
995 uint8_t const offOpcode = pIemCpu->offOpcode;
996 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
997 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
998
999 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1000 pIemCpu->abOpcode[offOpcode + 1],
1001 pIemCpu->abOpcode[offOpcode + 2],
1002 pIemCpu->abOpcode[offOpcode + 3]);
1003 *pu64 = i32;
1004 pIemCpu->offOpcode = offOpcode + 4;
1005 return VINF_SUCCESS;
1006}
1007
1008/**
1009 * Fetches the next opcode double word and sign extends it to a quad word,
1010 * returns automatically on failure.
1011 *
1012 * @param pIemCpu The IEM state.
1013 * @param a_pu64 Where to return the opcode quad word.
1014 */
1015#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pIemCpu, a_pu64) \
1016 do \
1017 { \
1018 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64((a_pIemCpu), (a_pu64)); \
1019 if (rcStrict2 != VINF_SUCCESS) \
1020 return rcStrict2; \
1021 } while (0)
1022
1023
1024/**
1025 * Fetches the next opcode qword.
1026 *
1027 * @returns Strict VBox status code.
1028 * @param pIemCpu The IEM state.
1029 * @param pu64 Where to return the opcode qword.
1030 */
1031DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1032{
1033 uint8_t const offOpcode = pIemCpu->offOpcode;
1034 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1035 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1036
1037 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1038 pIemCpu->abOpcode[offOpcode + 1],
1039 pIemCpu->abOpcode[offOpcode + 2],
1040 pIemCpu->abOpcode[offOpcode + 3],
1041 pIemCpu->abOpcode[offOpcode + 4],
1042 pIemCpu->abOpcode[offOpcode + 5],
1043 pIemCpu->abOpcode[offOpcode + 6],
1044 pIemCpu->abOpcode[offOpcode + 7]);
1045 pIemCpu->offOpcode = offOpcode + 8;
1046 return VINF_SUCCESS;
1047}
1048
1049/**
1050 * Fetches the next opcode word, returns automatically on failure.
1051 *
1052 * @param pIemCpu The IEM state.
1053 * @param a_pu64 Where to return the opcode qword.
1054 */
1055#define IEM_OPCODE_GET_NEXT_U64(a_pIemCpu, a_pu64) \
1056 do \
1057 { \
1058 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64((a_pIemCpu), (a_pu64)); \
1059 if (rcStrict2 != VINF_SUCCESS) \
1060 return rcStrict2; \
1061 } while (0)
1062
1063
1064/** @name Raising Exceptions.
1065 *
1066 * @{
1067 */
1068
1069static VBOXSTRICTRC iemRaiseDivideError(PIEMCPU pIemCpu)
1070{
1071 AssertFailed(/** @todo implement this */);
1072 return VERR_NOT_IMPLEMENTED;
1073}
1074
1075
1076static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
1077{
1078 AssertFailed(/** @todo implement this */);
1079 return VERR_NOT_IMPLEMENTED;
1080}
1081
1082
1083static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
1084{
1085 AssertFailed(/** @todo implement this */);
1086 return VERR_NOT_IMPLEMENTED;
1087}
1088
1089
1090static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1091{
1092 AssertFailed(/** @todo implement this */);
1093 return VERR_NOT_IMPLEMENTED;
1094}
1095
1096
1097static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1098{
1099 AssertFailed(/** @todo implement this */);
1100 return VERR_NOT_IMPLEMENTED;
1101}
1102
1103
1104static VBOXSTRICTRC iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
1105{
1106 AssertFailed(/** @todo implement this */);
1107 return VERR_NOT_IMPLEMENTED;
1108}
1109
1110
1111static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
1112{
1113 AssertFailed(/** @todo implement this */);
1114 return VERR_NOT_IMPLEMENTED;
1115}
1116
1117
1118static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
1119{
1120 AssertFailed(/** @todo implement this */);
1121 return VERR_NOT_IMPLEMENTED;
1122}
1123
1124
1125/**
1126 * Macro for calling iemCImplRaiseInvalidLockPrefix().
1127 *
1128 * This enables us to add/remove arguments and force different levels of
1129 * inlining as we wish.
1130 *
1131 * @return Strict VBox status code.
1132 */
1133#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
1134IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
1135{
1136 AssertFailed();
1137 return VERR_NOT_IMPLEMENTED;
1138}
1139
1140
1141/**
1142 * Macro for calling iemCImplRaiseInvalidOpcode().
1143 *
1144 * This enables us to add/remove arguments and force different levels of
1145 * inlining as we wish.
1146 *
1147 * @return Strict VBox status code.
1148 */
1149#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
1150IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
1151{
1152 AssertFailed();
1153 return VERR_NOT_IMPLEMENTED;
1154}
1155
1156
1157/** @} */
1158
1159
1160/*
1161 *
1162 * Helpers routines.
1163 * Helpers routines.
1164 * Helpers routines.
1165 *
1166 */
1167
1168/**
1169 * Recalculates the effective operand size.
1170 *
1171 * @param pIemCpu The IEM state.
1172 */
1173static void iemRecalEffOpSize(PIEMCPU pIemCpu)
1174{
1175 switch (pIemCpu->enmCpuMode)
1176 {
1177 case IEMMODE_16BIT:
1178 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1179 break;
1180 case IEMMODE_32BIT:
1181 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1182 break;
1183 case IEMMODE_64BIT:
1184 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1185 {
1186 case 0:
1187 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
1188 break;
1189 case IEM_OP_PRF_SIZE_OP:
1190 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1191 break;
1192 case IEM_OP_PRF_SIZE_REX_W:
1193 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1194 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1195 break;
1196 }
1197 break;
1198 default:
1199 AssertFailed();
1200 }
1201}
1202
1203
1204/**
1205 * Sets the default operand size to 64-bit and recalculates the effective
1206 * operand size.
1207 *
1208 * @param pIemCpu The IEM state.
1209 */
1210static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
1211{
1212 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1213 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1214 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1215 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1216 else
1217 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1218}
1219
1220
1221/*
1222 *
1223 * Common opcode decoders.
1224 * Common opcode decoders.
1225 * Common opcode decoders.
1226 *
1227 */
1228
1229/** Stubs an opcode. */
1230#define FNIEMOP_STUB(a_Name) \
1231 FNIEMOP_DEF(a_Name) \
1232 { \
1233 IEMOP_MNEMONIC(#a_Name); \
1234 AssertMsgFailed(("After %d instructions\n", pIemCpu->cInstructions)); \
1235 return VERR_NOT_IMPLEMENTED; \
1236 } \
1237 typedef int ignore_semicolon
1238
1239
1240
1241/** @name Register Access.
1242 * @{
1243 */
1244
1245/**
1246 * Gets a reference (pointer) to the specified hidden segment register.
1247 *
1248 * @returns Hidden register reference.
1249 * @param pIemCpu The per CPU data.
1250 * @param iSegReg The segment register.
1251 */
1252static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
1253{
1254 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1255 switch (iSegReg)
1256 {
1257 case X86_SREG_ES: return &pCtx->esHid;
1258 case X86_SREG_CS: return &pCtx->csHid;
1259 case X86_SREG_SS: return &pCtx->ssHid;
1260 case X86_SREG_DS: return &pCtx->dsHid;
1261 case X86_SREG_FS: return &pCtx->fsHid;
1262 case X86_SREG_GS: return &pCtx->gsHid;
1263 }
1264 AssertFailedReturn(NULL);
1265}
1266
1267
1268/**
1269 * Gets a reference (pointer) to the specified segment register (the selector
1270 * value).
1271 *
1272 * @returns Pointer to the selector variable.
1273 * @param pIemCpu The per CPU data.
1274 * @param iSegReg The segment register.
1275 */
1276static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
1277{
1278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1279 switch (iSegReg)
1280 {
1281 case X86_SREG_ES: return &pCtx->es;
1282 case X86_SREG_CS: return &pCtx->cs;
1283 case X86_SREG_SS: return &pCtx->ss;
1284 case X86_SREG_DS: return &pCtx->ds;
1285 case X86_SREG_FS: return &pCtx->fs;
1286 case X86_SREG_GS: return &pCtx->gs;
1287 }
1288 AssertFailedReturn(NULL);
1289}
1290
1291
1292/**
1293 * Fetches the selector value of a segment register.
1294 *
1295 * @returns The selector value.
1296 * @param pIemCpu The per CPU data.
1297 * @param iSegReg The segment register.
1298 */
1299static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
1300{
1301 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1302 switch (iSegReg)
1303 {
1304 case X86_SREG_ES: return pCtx->es;
1305 case X86_SREG_CS: return pCtx->cs;
1306 case X86_SREG_SS: return pCtx->ss;
1307 case X86_SREG_DS: return pCtx->ds;
1308 case X86_SREG_FS: return pCtx->fs;
1309 case X86_SREG_GS: return pCtx->gs;
1310 }
1311 AssertFailedReturn(0xffff);
1312}
1313
1314
1315/**
1316 * Gets a reference (pointer) to the specified general register.
1317 *
1318 * @returns Register reference.
1319 * @param pIemCpu The per CPU data.
1320 * @param iReg The general register.
1321 */
1322static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
1323{
1324 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1325 switch (iReg)
1326 {
1327 case X86_GREG_xAX: return &pCtx->rax;
1328 case X86_GREG_xCX: return &pCtx->rcx;
1329 case X86_GREG_xDX: return &pCtx->rdx;
1330 case X86_GREG_xBX: return &pCtx->rbx;
1331 case X86_GREG_xSP: return &pCtx->rsp;
1332 case X86_GREG_xBP: return &pCtx->rbp;
1333 case X86_GREG_xSI: return &pCtx->rsi;
1334 case X86_GREG_xDI: return &pCtx->rdi;
1335 case X86_GREG_x8: return &pCtx->r8;
1336 case X86_GREG_x9: return &pCtx->r9;
1337 case X86_GREG_x10: return &pCtx->r10;
1338 case X86_GREG_x11: return &pCtx->r11;
1339 case X86_GREG_x12: return &pCtx->r12;
1340 case X86_GREG_x13: return &pCtx->r13;
1341 case X86_GREG_x14: return &pCtx->r14;
1342 case X86_GREG_x15: return &pCtx->r15;
1343 }
1344 AssertFailedReturn(NULL);
1345}
1346
1347
1348/**
1349 * Gets a reference (pointer) to the specified 8-bit general register.
1350 *
1351 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1352 *
1353 * @returns Register reference.
1354 * @param pIemCpu The per CPU data.
1355 * @param iReg The register.
1356 */
1357static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
1358{
1359 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
1360 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
1361
1362 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
1363 if (iReg >= 4)
1364 pu8Reg++;
1365 return pu8Reg;
1366}
1367
1368
1369/**
1370 * Fetches the value of a 8-bit general register.
1371 *
1372 * @returns The register value.
1373 * @param pIemCpu The per CPU data.
1374 * @param iReg The register.
1375 */
1376static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
1377{
1378 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
1379 return *pbSrc;
1380}
1381
1382
1383/**
1384 * Fetches the value of a 16-bit general register.
1385 *
1386 * @returns The register value.
1387 * @param pIemCpu The per CPU data.
1388 * @param iReg The register.
1389 */
1390static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
1391{
1392 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
1393}
1394
1395
1396/**
1397 * Fetches the value of a 32-bit general register.
1398 *
1399 * @returns The register value.
1400 * @param pIemCpu The per CPU data.
1401 * @param iReg The register.
1402 */
1403static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
1404{
1405 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
1406}
1407
1408
1409/**
1410 * Fetches the value of a 64-bit general register.
1411 *
1412 * @returns The register value.
1413 * @param pIemCpu The per CPU data.
1414 * @param iReg The register.
1415 */
1416static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
1417{
1418 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
1419}
1420
1421
1422/**
1423 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
1424 *
1425 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1426 * segment limit.
1427 *
1428 * @param pIemCpu The per CPU data.
1429 * @param offNextInstr The offset of the next instruction.
1430 */
1431static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
1432{
1433 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1434 switch (pIemCpu->enmEffOpSize)
1435 {
1436 case IEMMODE_16BIT:
1437 {
1438 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1439 if ( uNewIp > pCtx->csHid.u32Limit
1440 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1441 return iemRaiseGeneralProtectionFault0(pIemCpu);
1442 pCtx->rip = uNewIp;
1443 break;
1444 }
1445
1446 case IEMMODE_32BIT:
1447 {
1448 Assert(pCtx->rip <= UINT32_MAX);
1449 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1450
1451 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1452 if (uNewEip > pCtx->csHid.u32Limit)
1453 return iemRaiseGeneralProtectionFault0(pIemCpu);
1454 pCtx->rip = uNewEip;
1455 break;
1456 }
1457
1458 case IEMMODE_64BIT:
1459 {
1460 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1461
1462 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1463 if (!IEM_IS_CANONICAL(uNewRip))
1464 return iemRaiseGeneralProtectionFault0(pIemCpu);
1465 pCtx->rip = uNewRip;
1466 break;
1467 }
1468
1469 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1470 }
1471
1472 return VINF_SUCCESS;
1473}
1474
1475
1476/**
1477 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
1478 *
1479 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1480 * segment limit.
1481 *
1482 * @returns Strict VBox status code.
1483 * @param pIemCpu The per CPU data.
1484 * @param offNextInstr The offset of the next instruction.
1485 */
1486static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
1487{
1488 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1489 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
1490
1491 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1492 if ( uNewIp > pCtx->csHid.u32Limit
1493 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1494 return iemRaiseGeneralProtectionFault0(pIemCpu);
1495 /** @todo Test 16-bit jump in 64-bit mode. */
1496 pCtx->rip = uNewIp;
1497
1498 return VINF_SUCCESS;
1499}
1500
1501
1502/**
1503 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
1504 *
1505 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1506 * segment limit.
1507 *
1508 * @returns Strict VBox status code.
1509 * @param pIemCpu The per CPU data.
1510 * @param offNextInstr The offset of the next instruction.
1511 */
1512static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
1513{
1514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1515 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
1516
1517 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
1518 {
1519 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1520
1521 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1522 if (uNewEip > pCtx->csHid.u32Limit)
1523 return iemRaiseGeneralProtectionFault0(pIemCpu);
1524 pCtx->rip = uNewEip;
1525 }
1526 else
1527 {
1528 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1529
1530 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1531 if (!IEM_IS_CANONICAL(uNewRip))
1532 return iemRaiseGeneralProtectionFault0(pIemCpu);
1533 pCtx->rip = uNewRip;
1534 }
1535 return VINF_SUCCESS;
1536}
1537
1538
1539/**
1540 * Performs a near jump to the specified address.
1541 *
1542 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1543 * segment limit.
1544 *
1545 * @param pIemCpu The per CPU data.
1546 * @param uNewRip The new RIP value.
1547 */
1548static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
1549{
1550 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1551 switch (pIemCpu->enmEffOpSize)
1552 {
1553 case IEMMODE_16BIT:
1554 {
1555 Assert(uNewRip <= UINT16_MAX);
1556 if ( uNewRip > pCtx->csHid.u32Limit
1557 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1558 return iemRaiseGeneralProtectionFault0(pIemCpu);
1559 /** @todo Test 16-bit jump in 64-bit mode. */
1560 pCtx->rip = uNewRip;
1561 break;
1562 }
1563
1564 case IEMMODE_32BIT:
1565 {
1566 Assert(uNewRip <= UINT32_MAX);
1567 Assert(pCtx->rip <= UINT32_MAX);
1568 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1569
1570 if (uNewRip > pCtx->csHid.u32Limit)
1571 return iemRaiseGeneralProtectionFault0(pIemCpu);
1572 pCtx->rip = uNewRip;
1573 break;
1574 }
1575
1576 case IEMMODE_64BIT:
1577 {
1578 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1579
1580 if (!IEM_IS_CANONICAL(uNewRip))
1581 return iemRaiseGeneralProtectionFault0(pIemCpu);
1582 pCtx->rip = uNewRip;
1583 break;
1584 }
1585
1586 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1587 }
1588
1589 return VINF_SUCCESS;
1590}
1591
1592
1593/**
1594 * Get the address of the top of the stack.
1595 *
1596 * @param pCtx The CPU context which SP/ESP/RSP should be
1597 * read.
1598 */
1599DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
1600{
1601 if (pCtx->ssHid.Attr.n.u1Long)
1602 return pCtx->rsp;
1603 if (pCtx->ssHid.Attr.n.u1DefBig)
1604 return pCtx->esp;
1605 return pCtx->sp;
1606}
1607
1608
1609/**
1610 * Updates the RIP/EIP/IP to point to the next instruction.
1611 *
1612 * @param pIemCpu The per CPU data.
1613 * @param cbInstr The number of bytes to add.
1614 */
1615static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
1616{
1617 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1618 switch (pIemCpu->enmCpuMode)
1619 {
1620 case IEMMODE_16BIT:
1621 Assert(pCtx->rip <= UINT16_MAX);
1622 pCtx->eip += cbInstr;
1623 pCtx->eip &= UINT32_C(0xffff);
1624 break;
1625
1626 case IEMMODE_32BIT:
1627 pCtx->eip += cbInstr;
1628 Assert(pCtx->rip <= UINT32_MAX);
1629 break;
1630
1631 case IEMMODE_64BIT:
1632 pCtx->rip += cbInstr;
1633 break;
1634 default: AssertFailed();
1635 }
1636}
1637
1638
1639/**
1640 * Updates the RIP/EIP/IP to point to the next instruction.
1641 *
1642 * @param pIemCpu The per CPU data.
1643 */
1644static void iemRegUpdateRip(PIEMCPU pIemCpu)
1645{
1646 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
1647}
1648
1649
1650/**
1651 * Adds to the stack pointer.
1652 *
1653 * @param pCtx The CPU context which SP/ESP/RSP should be
1654 * updated.
1655 * @param cbToAdd The number of bytes to add.
1656 */
1657DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
1658{
1659 if (pCtx->ssHid.Attr.n.u1Long)
1660 pCtx->rsp += cbToAdd;
1661 else if (pCtx->ssHid.Attr.n.u1DefBig)
1662 pCtx->esp += cbToAdd;
1663 else
1664 pCtx->sp += cbToAdd;
1665}
1666
1667
1668/**
1669 * Subtracts from the stack pointer.
1670 *
1671 * @param pCtx The CPU context which SP/ESP/RSP should be
1672 * updated.
1673 * @param cbToSub The number of bytes to subtract.
1674 */
1675DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
1676{
1677 if (pCtx->ssHid.Attr.n.u1Long)
1678 pCtx->rsp -= cbToSub;
1679 else if (pCtx->ssHid.Attr.n.u1DefBig)
1680 pCtx->esp -= cbToSub;
1681 else
1682 pCtx->sp -= cbToSub;
1683}
1684
1685
1686/**
1687 * Adds to the temporary stack pointer.
1688 *
1689 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1690 * @param cbToAdd The number of bytes to add.
1691 * @param pCtx Where to get the current stack mode.
1692 */
1693DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
1694{
1695 if (pCtx->ssHid.Attr.n.u1Long)
1696 pTmpRsp->u += cbToAdd;
1697 else if (pCtx->ssHid.Attr.n.u1DefBig)
1698 pTmpRsp->DWords.dw0 += cbToAdd;
1699 else
1700 pTmpRsp->Words.w0 += cbToAdd;
1701}
1702
1703
1704/**
1705 * Subtracts from the temporary stack pointer.
1706 *
1707 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1708 * @param cbToSub The number of bytes to subtract.
1709 * @param pCtx Where to get the current stack mode.
1710 */
1711DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
1712{
1713 if (pCtx->ssHid.Attr.n.u1Long)
1714 pTmpRsp->u -= cbToSub;
1715 else if (pCtx->ssHid.Attr.n.u1DefBig)
1716 pTmpRsp->DWords.dw0 -= cbToSub;
1717 else
1718 pTmpRsp->Words.w0 -= cbToSub;
1719}
1720
1721
1722/**
1723 * Calculates the effective stack address for a push of the specified size as
1724 * well as the new RSP value (upper bits may be masked).
1725 *
1726 * @returns Effective stack addressf for the push.
1727 * @param pCtx Where to get the current stack mode.
1728 * @param cbItem The size of the stack item to pop.
1729 * @param puNewRsp Where to return the new RSP value.
1730 */
1731DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1732{
1733 RTUINT64U uTmpRsp;
1734 RTGCPTR GCPtrTop;
1735 uTmpRsp.u = pCtx->rsp;
1736
1737 if (pCtx->ssHid.Attr.n.u1Long)
1738 GCPtrTop = uTmpRsp.u -= cbItem;
1739 else if (pCtx->ssHid.Attr.n.u1DefBig)
1740 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
1741 else
1742 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
1743 *puNewRsp = uTmpRsp.u;
1744 return GCPtrTop;
1745}
1746
1747
1748/**
1749 * Gets the current stack pointer and calculates the value after a pop of the
1750 * specified size.
1751 *
1752 * @returns Current stack pointer.
1753 * @param pCtx Where to get the current stack mode.
1754 * @param cbItem The size of the stack item to pop.
1755 * @param puNewRsp Where to return the new RSP value.
1756 */
1757DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1758{
1759 RTUINT64U uTmpRsp;
1760 RTGCPTR GCPtrTop;
1761 uTmpRsp.u = pCtx->rsp;
1762
1763 if (pCtx->ssHid.Attr.n.u1Long)
1764 {
1765 GCPtrTop = uTmpRsp.u;
1766 uTmpRsp.u += cbItem;
1767 }
1768 else if (pCtx->ssHid.Attr.n.u1DefBig)
1769 {
1770 GCPtrTop = uTmpRsp.DWords.dw0;
1771 uTmpRsp.DWords.dw0 += cbItem;
1772 }
1773 else
1774 {
1775 GCPtrTop = uTmpRsp.Words.w0;
1776 uTmpRsp.Words.w0 += cbItem;
1777 }
1778 *puNewRsp = uTmpRsp.u;
1779 return GCPtrTop;
1780}
1781
1782
1783/**
1784 * Calculates the effective stack address for a push of the specified size as
1785 * well as the new temporary RSP value (upper bits may be masked).
1786 *
1787 * @returns Effective stack addressf for the push.
1788 * @param pTmpRsp The temporary stack pointer. This is updated.
1789 * @param cbItem The size of the stack item to pop.
1790 * @param puNewRsp Where to return the new RSP value.
1791 */
1792DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
1793{
1794 RTGCPTR GCPtrTop;
1795
1796 if (pCtx->ssHid.Attr.n.u1Long)
1797 GCPtrTop = pTmpRsp->u -= cbItem;
1798 else if (pCtx->ssHid.Attr.n.u1DefBig)
1799 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
1800 else
1801 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
1802 return GCPtrTop;
1803}
1804
1805
1806/**
1807 * Gets the effective stack address for a pop of the specified size and
1808 * calculates and updates the temporary RSP.
1809 *
1810 * @returns Current stack pointer.
1811 * @param pTmpRsp The temporary stack pointer. This is updated.
1812 * @param pCtx Where to get the current stack mode.
1813 * @param cbItem The size of the stack item to pop.
1814 */
1815DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
1816{
1817 RTGCPTR GCPtrTop;
1818 if (pCtx->ssHid.Attr.n.u1Long)
1819 {
1820 GCPtrTop = pTmpRsp->u;
1821 pTmpRsp->u += cbItem;
1822 }
1823 else if (pCtx->ssHid.Attr.n.u1DefBig)
1824 {
1825 GCPtrTop = pTmpRsp->DWords.dw0;
1826 pTmpRsp->DWords.dw0 += cbItem;
1827 }
1828 else
1829 {
1830 GCPtrTop = pTmpRsp->Words.w0;
1831 pTmpRsp->Words.w0 += cbItem;
1832 }
1833 return GCPtrTop;
1834}
1835
1836
1837/**
1838 * Checks if an AMD CPUID feature bit is set.
1839 *
1840 * @returns true / false.
1841 *
1842 * @param pIemCpu The IEM per CPU data.
1843 * @param fEdx The EDX bit to test, or 0 if ECX.
1844 * @param fEcx The ECX bit to test, or 0 if EDX.
1845 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX.
1846 */
1847static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
1848{
1849 uint32_t uEax, uEbx, uEcx, uEdx;
1850 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
1851 return (fEcx && (uEcx & fEcx))
1852 || (fEdx && (uEdx & fEdx));
1853}
1854
1855/** @} */
1856
1857
1858/** @name Memory access.
1859 *
1860 * @{
1861 */
1862
1863
1864/**
1865 * Checks if the given segment can be written to, raise the appropriate
1866 * exception if not.
1867 *
1868 * @returns VBox strict status code.
1869 *
1870 * @param pIemCpu The IEM per CPU data.
1871 * @param pHid Pointer to the hidden register.
1872 * @param iSegReg The register number.
1873 */
1874static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
1875{
1876 if (!pHid->Attr.n.u1Present)
1877 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
1878
1879 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
1880 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
1881 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
1882 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
1883
1884 /** @todo DPL/RPL/CPL? */
1885
1886 return VINF_SUCCESS;
1887}
1888
1889
1890/**
1891 * Checks if the given segment can be read from, raise the appropriate
1892 * exception if not.
1893 *
1894 * @returns VBox strict status code.
1895 *
1896 * @param pIemCpu The IEM per CPU data.
1897 * @param pHid Pointer to the hidden register.
1898 * @param iSegReg The register number.
1899 */
1900static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
1901{
1902 if (!pHid->Attr.n.u1Present)
1903 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
1904
1905 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
1906 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
1907 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
1908
1909 /** @todo DPL/RPL/CPL? */
1910
1911 return VINF_SUCCESS;
1912}
1913
1914
1915/**
1916 * Applies the segment limit, base and attributes.
1917 *
1918 * This may raise a \#GP or \#SS.
1919 *
1920 * @returns VBox strict status code.
1921 *
1922 * @param pIemCpu The IEM per CPU data.
1923 * @param fAccess The kind of access which is being performed.
1924 * @param iSegReg The index of the segment register to apply.
1925 * This is UINT8_MAX if none (for IDT, GDT, LDT,
1926 * TSS, ++).
1927 * @param pGCPtrMem Pointer to the guest memory address to apply
1928 * segmentation to. Input and output parameter.
1929 */
1930static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
1931 size_t cbMem, PRTGCPTR pGCPtrMem)
1932{
1933 if (iSegReg == UINT8_MAX)
1934 return VINF_SUCCESS;
1935
1936 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
1937 switch (pIemCpu->enmCpuMode)
1938 {
1939 case IEMMODE_16BIT:
1940 case IEMMODE_32BIT:
1941 {
1942 RTGCPTR32 GCPtrFirst32 = *pGCPtrMem;
1943 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + cbMem - 1;
1944
1945 Assert(pSel->Attr.n.u1Present);
1946 Assert(pSel->Attr.n.u1DescType);
1947 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
1948 {
1949 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
1950 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
1951 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
1952
1953 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1954 {
1955 /** @todo CPL check. */
1956 }
1957
1958 /*
1959 * There are two kinds of data selectors, normal and expand down.
1960 */
1961 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
1962 {
1963 if ( GCPtrFirst32 > pSel->u32Limit
1964 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
1965 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
1966
1967 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
1968 }
1969 else
1970 {
1971 /** @todo implement expand down segments. */
1972 AssertFailed(/** @todo implement this */);
1973 return VERR_NOT_IMPLEMENTED;
1974 }
1975 }
1976 else
1977 {
1978
1979 /*
1980 * Code selector and usually be used to read thru, writing is
1981 * only permitted in real and V8086 mode.
1982 */
1983 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
1984 || ( (fAccess & IEM_ACCESS_TYPE_READ)
1985 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
1986 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
1987 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
1988
1989 if ( GCPtrFirst32 > pSel->u32Limit
1990 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
1991 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
1992
1993 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1994 {
1995 /** @todo CPL check. */
1996 }
1997
1998 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
1999 }
2000 return VINF_SUCCESS;
2001 }
2002
2003 case IEMMODE_64BIT:
2004 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
2005 *pGCPtrMem += pSel->u64Base;
2006 return VINF_SUCCESS;
2007
2008 default:
2009 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
2010 }
2011}
2012
2013
2014/**
2015 * Translates a virtual address to a physical physical address and checks if we
2016 * can access the page as specified.
2017 *
2018 * @param pIemCpu The IEM per CPU data.
2019 * @param GCPtrMem The virtual address.
2020 * @param fAccess The intended access.
2021 * @param pGCPhysMem Where to return the physical address.
2022 */
2023static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
2024 PRTGCPHYS pGCPhysMem)
2025{
2026 /** @todo Need a different PGM interface here. We're currently using
2027 * generic / REM interfaces. this won't cut it for R0 & RC. */
2028 RTGCPHYS GCPhys;
2029 uint64_t fFlags;
2030 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
2031 if (RT_FAILURE(rc))
2032 {
2033 /** @todo Check unassigned memory in unpaged mode. */
2034 *pGCPhysMem = NIL_RTGCPHYS;
2035 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
2036 }
2037
2038 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)
2039 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */
2040 && !(fFlags & X86_PTE_RW)
2041 && ( pIemCpu->uCpl != 0
2042 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) )
2043 || ( !(fFlags & X86_PTE_US) /* Kernel memory */
2044 && pIemCpu->uCpl == 3)
2045 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */
2046 && (fFlags & X86_PTE_PAE_NX)
2047 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
2048 )
2049 )
2050 {
2051 *pGCPhysMem = NIL_RTGCPHYS;
2052 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
2053 }
2054
2055 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
2056 *pGCPhysMem = GCPhys;
2057 return VINF_SUCCESS;
2058}
2059
2060
2061
2062/**
2063 * Maps a physical page.
2064 *
2065 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2066 * @param pIemCpu The IEM per CPU data.
2067 * @param GCPhysMem The physical address.
2068 * @param fAccess The intended access.
2069 * @param ppvMem Where to return the mapping address.
2070 */
2071static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
2072{
2073#ifdef IEM_VERIFICATION_MODE
2074 /* Force the alternative path so we can ignore writes. */
2075 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2076 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2077#endif
2078
2079 /*
2080 * If we can map the page without trouble, do a block processing
2081 * until the end of the current page.
2082 */
2083 /** @todo need some better API. */
2084 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
2085 GCPhysMem,
2086 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2087 ppvMem);
2088}
2089
2090
2091/**
2092 * Looks up a memory mapping entry.
2093 *
2094 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
2095 * @param pIemCpu The IEM per CPU data.
2096 * @param pvMem The memory address.
2097 * @param fAccess The access to.
2098 */
2099DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2100{
2101 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
2102 if ( pIemCpu->aMemMappings[0].pv == pvMem
2103 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2104 return 0;
2105 if ( pIemCpu->aMemMappings[1].pv == pvMem
2106 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2107 return 1;
2108 if ( pIemCpu->aMemMappings[2].pv == pvMem
2109 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2110 return 2;
2111 return VERR_NOT_FOUND;
2112}
2113
2114
2115/**
2116 * Finds a free memmap entry when using iNextMapping doesn't work.
2117 *
2118 * @returns Memory mapping index, 1024 on failure.
2119 * @param pIemCpu The IEM per CPU data.
2120 */
2121static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
2122{
2123 /*
2124 * The easy case.
2125 */
2126 if (pIemCpu->cActiveMappings == 0)
2127 {
2128 pIemCpu->iNextMapping = 1;
2129 return 0;
2130 }
2131
2132 /* There should be enough mappings for all instructions. */
2133 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
2134
2135 AssertFailed(); /** @todo implement me. */
2136 return 1024;
2137
2138}
2139
2140
2141/**
2142 * Commits a bounce buffer that needs writing back and unmaps it.
2143 *
2144 * @returns Strict VBox status code.
2145 * @param pIemCpu The IEM per CPU data.
2146 * @param iMemMap The index of the buffer to commit.
2147 */
2148static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
2149{
2150 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
2151 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
2152
2153 /*
2154 * Do the writing.
2155 */
2156 int rc;
2157#ifndef IEM_VERIFICATION_MODE /* No memory changes in verification mode. */
2158 if (!pIemCpu->aMemBbMappings[iMemMap].fUnassigned)
2159 {
2160 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2161 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2162 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2163 if (!pIemCpu->fByPassHandlers)
2164 {
2165 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2166 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2167 pbBuf,
2168 cbFirst);
2169 if (cbSecond && rc == VINF_SUCCESS)
2170 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2171 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2172 pbBuf + cbFirst,
2173 cbSecond);
2174 }
2175 else
2176 {
2177 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2178 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2179 pbBuf,
2180 cbFirst);
2181 if (cbSecond && rc == VINF_SUCCESS)
2182 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2183 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2184 pbBuf + cbFirst,
2185 cbSecond);
2186 }
2187 }
2188 else
2189#endif
2190 rc = VINF_SUCCESS;
2191
2192 /*
2193 * Free the mapping entry.
2194 */
2195 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2196 Assert(pIemCpu->cActiveMappings != 0);
2197 pIemCpu->cActiveMappings--;
2198 return rc;
2199}
2200
2201
2202/**
2203 * iemMemMap worker that deals with a request crossing pages.
2204 */
2205static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
2206 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
2207{
2208 /*
2209 * Do the address translations.
2210 */
2211 RTGCPHYS GCPhysFirst;
2212 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
2213 if (rcStrict != VINF_SUCCESS)
2214 return rcStrict;
2215
2216 RTGCPHYS GCPhysSecond;
2217 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
2218 if (rcStrict != VINF_SUCCESS)
2219 return rcStrict;
2220 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2221
2222 /*
2223 * Read in the current memory content if it's a read of execute access.
2224 */
2225 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2226 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
2227 uint32_t const cbSecondPage = cbMem - cbFirstPage;
2228
2229 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2230 {
2231 int rc;
2232 if (!pIemCpu->fByPassHandlers)
2233 {
2234 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
2235 if (rc != VINF_SUCCESS)
2236 return rc;
2237 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
2238 if (rc != VINF_SUCCESS)
2239 return rc;
2240 }
2241 else
2242 {
2243 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
2244 if (rc != VINF_SUCCESS)
2245 return rc;
2246 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
2247 if (rc != VINF_SUCCESS)
2248 return rc;
2249 }
2250 }
2251#ifdef VBOX_STRICT
2252 else
2253 memset(pbBuf, 0xcc, cbMem);
2254#endif
2255#ifdef VBOX_STRICT
2256 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2257 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2258#endif
2259
2260 /*
2261 * Commit the bounce buffer entry.
2262 */
2263 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2264 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
2265 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
2266 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
2267 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
2268 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2269 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2270 pIemCpu->cActiveMappings++;
2271
2272 *ppvMem = pbBuf;
2273 return VINF_SUCCESS;
2274}
2275
2276
2277/**
2278 * iemMemMap woker that deals with iemMemPageMap failures.
2279 */
2280static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
2281 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
2282{
2283 /*
2284 * Filter out conditions we can handle and the ones which shouldn't happen.
2285 */
2286 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
2287 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
2288 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
2289 {
2290 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
2291 return rcMap;
2292 }
2293 pIemCpu->cPotentialExits++;
2294
2295 /*
2296 * Read in the current memory content if it's a read of execute access.
2297 */
2298 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2299 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2300 {
2301 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
2302 memset(pbBuf, 0xff, cbMem);
2303 else
2304 {
2305 int rc;
2306 if (!pIemCpu->fByPassHandlers)
2307 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
2308 else
2309 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
2310 if (rc != VINF_SUCCESS)
2311 return rc;
2312 }
2313 }
2314#ifdef VBOX_STRICT
2315 else
2316 memset(pbBuf, 0xcc, cbMem);
2317#endif
2318#ifdef VBOX_STRICT
2319 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2320 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2321#endif
2322
2323 /*
2324 * Commit the bounce buffer entry.
2325 */
2326 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2327 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
2328 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
2329 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
2330 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
2331 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2332 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2333 pIemCpu->cActiveMappings++;
2334
2335 *ppvMem = pbBuf;
2336 return VINF_SUCCESS;
2337}
2338
2339
2340
2341/**
2342 * Maps the specified guest memory for the given kind of access.
2343 *
2344 * This may be using bounce buffering of the memory if it's crossing a page
2345 * boundary or if there is an access handler installed for any of it. Because
2346 * of lock prefix guarantees, we're in for some extra clutter when this
2347 * happens.
2348 *
2349 * This may raise a \#GP, \#SS, \#PF or \#AC.
2350 *
2351 * @returns VBox strict status code.
2352 *
2353 * @param pIemCpu The IEM per CPU data.
2354 * @param ppvMem Where to return the pointer to the mapped
2355 * memory.
2356 * @param cbMem The number of bytes to map. This is usually 1,
2357 * 2, 4, 6, 8, 12 or 16. When used by string
2358 * operations it can be up to a page.
2359 * @param iSegReg The index of the segment register to use for
2360 * this access. The base and limits are checked.
2361 * Use UINT8_MAX to indicate that no segmentation
2362 * is required (for IDT, GDT and LDT accesses).
2363 * @param GCPtrMem The address of the guest memory.
2364 * @param a_fAccess How the memory is being accessed. The
2365 * IEM_ACCESS_TYPE_XXX bit is used to figure out
2366 * how to map the memory, while the
2367 * IEM_ACCESS_WHAT_XXX bit is used when raising
2368 * exceptions.
2369 */
2370static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
2371{
2372 /*
2373 * Check the input and figure out which mapping entry to use.
2374 */
2375 Assert(cbMem <= 16);
2376 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
2377
2378 unsigned iMemMap = pIemCpu->iNextMapping;
2379 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
2380 {
2381 iMemMap = iemMemMapFindFree(pIemCpu);
2382 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
2383 }
2384
2385 /*
2386 * Map the memory, checking that we can actually access it. If something
2387 * slightly complicated happens, fall back on bounce buffering.
2388 */
2389 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
2390 if (rcStrict != VINF_SUCCESS)
2391 return rcStrict;
2392
2393 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
2394 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
2395
2396 RTGCPHYS GCPhysFirst;
2397 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
2398 if (rcStrict != VINF_SUCCESS)
2399 return rcStrict;
2400
2401 void *pvMem;
2402 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
2403 if (rcStrict != VINF_SUCCESS)
2404 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
2405
2406 /*
2407 * Fill in the mapping table entry.
2408 */
2409 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
2410 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
2411 pIemCpu->iNextMapping = iMemMap + 1;
2412 pIemCpu->cActiveMappings++;
2413
2414 *ppvMem = pvMem;
2415 return VINF_SUCCESS;
2416}
2417
2418
2419/**
2420 * Commits the guest memory if bounce buffered and unmaps it.
2421 *
2422 * @returns Strict VBox status code.
2423 * @param pIemCpu The IEM per CPU data.
2424 * @param pvMem The mapping.
2425 * @param fAccess The kind of access.
2426 */
2427static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2428{
2429 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
2430 AssertReturn(iMemMap >= 0, iMemMap);
2431
2432 /*
2433 * If it's bounce buffered, we need to write back the buffer.
2434 */
2435 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2436 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2437 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
2438
2439 /* Free the entry. */
2440 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2441 Assert(pIemCpu->cActiveMappings != 0);
2442 pIemCpu->cActiveMappings--;
2443 return VINF_SUCCESS;
2444}
2445
2446
2447/**
2448 * Fetches a data byte.
2449 *
2450 * @returns Strict VBox status code.
2451 * @param pIemCpu The IEM per CPU data.
2452 * @param pu8Dst Where to return the byte.
2453 * @param iSegReg The index of the segment register to use for
2454 * this access. The base and limits are checked.
2455 * @param GCPtrMem The address of the guest memory.
2456 */
2457static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2458{
2459 /* The lazy approach for now... */
2460 uint8_t const *pu8Src;
2461 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2462 if (rc == VINF_SUCCESS)
2463 {
2464 *pu8Dst = *pu8Src;
2465 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2466 }
2467 return rc;
2468}
2469
2470
2471/**
2472 * Fetches a data word.
2473 *
2474 * @returns Strict VBox status code.
2475 * @param pIemCpu The IEM per CPU data.
2476 * @param pu16Dst Where to return the word.
2477 * @param iSegReg The index of the segment register to use for
2478 * this access. The base and limits are checked.
2479 * @param GCPtrMem The address of the guest memory.
2480 */
2481static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2482{
2483 /* The lazy approach for now... */
2484 uint16_t const *pu16Src;
2485 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2486 if (rc == VINF_SUCCESS)
2487 {
2488 *pu16Dst = *pu16Src;
2489 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
2490 }
2491 return rc;
2492}
2493
2494
2495/**
2496 * Fetches a data dword.
2497 *
2498 * @returns Strict VBox status code.
2499 * @param pIemCpu The IEM per CPU data.
2500 * @param pu32Dst Where to return the dword.
2501 * @param iSegReg The index of the segment register to use for
2502 * this access. The base and limits are checked.
2503 * @param GCPtrMem The address of the guest memory.
2504 */
2505static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2506{
2507 /* The lazy approach for now... */
2508 uint32_t const *pu32Src;
2509 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2510 if (rc == VINF_SUCCESS)
2511 {
2512 *pu32Dst = *pu32Src;
2513 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
2514 }
2515 return rc;
2516}
2517
2518
2519/**
2520 * Fetches a data dword and sign extends it to a qword.
2521 *
2522 * @returns Strict VBox status code.
2523 * @param pIemCpu The IEM per CPU data.
2524 * @param pu64Dst Where to return the sign extended value.
2525 * @param iSegReg The index of the segment register to use for
2526 * this access. The base and limits are checked.
2527 * @param GCPtrMem The address of the guest memory.
2528 */
2529static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2530{
2531 /* The lazy approach for now... */
2532 int32_t const *pi32Src;
2533 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2534 if (rc == VINF_SUCCESS)
2535 {
2536 *pu64Dst = *pi32Src;
2537 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
2538 }
2539 return rc;
2540}
2541
2542
2543/**
2544 * Fetches a data qword.
2545 *
2546 * @returns Strict VBox status code.
2547 * @param pIemCpu The IEM per CPU data.
2548 * @param pu64Dst Where to return the qword.
2549 * @param iSegReg The index of the segment register to use for
2550 * this access. The base and limits are checked.
2551 * @param GCPtrMem The address of the guest memory.
2552 */
2553static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2554{
2555 /* The lazy approach for now... */
2556 uint64_t const *pu64Src;
2557 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2558 if (rc == VINF_SUCCESS)
2559 {
2560 *pu64Dst = *pu64Src;
2561 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
2562 }
2563 return rc;
2564}
2565
2566
2567/**
2568 * Fetches a descriptor register (lgdt, lidt).
2569 *
2570 * @returns Strict VBox status code.
2571 * @param pIemCpu The IEM per CPU data.
2572 * @param pcbLimit Where to return the limit.
2573 * @param pGCPTrBase Where to return the base.
2574 * @param iSegReg The index of the segment register to use for
2575 * this access. The base and limits are checked.
2576 * @param GCPtrMem The address of the guest memory.
2577 * @param enmOpSize The effective operand size.
2578 */
2579static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
2580 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
2581{
2582 uint8_t const *pu8Src;
2583 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
2584 (void **)&pu8Src,
2585 enmOpSize == IEMMODE_64BIT
2586 ? 2 + 8
2587 : enmOpSize == IEMMODE_32BIT
2588 ? 2 + 4
2589 : 2 + 3,
2590 iSegReg,
2591 GCPtrMem,
2592 IEM_ACCESS_DATA_R);
2593 if (rcStrict == VINF_SUCCESS)
2594 {
2595 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
2596 switch (enmOpSize)
2597 {
2598 case IEMMODE_16BIT:
2599 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
2600 break;
2601 case IEMMODE_32BIT:
2602 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
2603 break;
2604 case IEMMODE_64BIT:
2605 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
2606 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
2607 break;
2608
2609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2610 }
2611 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2612 }
2613 return rcStrict;
2614}
2615
2616
2617
2618/**
2619 * Stores a data byte.
2620 *
2621 * @returns Strict VBox status code.
2622 * @param pIemCpu The IEM per CPU data.
2623 * @param iSegReg The index of the segment register to use for
2624 * this access. The base and limits are checked.
2625 * @param GCPtrMem The address of the guest memory.
2626 * @param u8Value The value to store.
2627 */
2628static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
2629{
2630 /* The lazy approach for now... */
2631 uint8_t *pu8Dst;
2632 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2633 if (rc == VINF_SUCCESS)
2634 {
2635 *pu8Dst = u8Value;
2636 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
2637 }
2638 return rc;
2639}
2640
2641
2642/**
2643 * Stores a data word.
2644 *
2645 * @returns Strict VBox status code.
2646 * @param pIemCpu The IEM per CPU data.
2647 * @param iSegReg The index of the segment register to use for
2648 * this access. The base and limits are checked.
2649 * @param GCPtrMem The address of the guest memory.
2650 * @param u16Value The value to store.
2651 */
2652static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
2653{
2654 /* The lazy approach for now... */
2655 uint16_t *pu16Dst;
2656 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2657 if (rc == VINF_SUCCESS)
2658 {
2659 *pu16Dst = u16Value;
2660 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
2661 }
2662 return rc;
2663}
2664
2665
2666/**
2667 * Stores a data dword.
2668 *
2669 * @returns Strict VBox status code.
2670 * @param pIemCpu The IEM per CPU data.
2671 * @param iSegReg The index of the segment register to use for
2672 * this access. The base and limits are checked.
2673 * @param GCPtrMem The address of the guest memory.
2674 * @param u32Value The value to store.
2675 */
2676static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
2677{
2678 /* The lazy approach for now... */
2679 uint32_t *pu32Dst;
2680 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2681 if (rc == VINF_SUCCESS)
2682 {
2683 *pu32Dst = u32Value;
2684 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
2685 }
2686 return rc;
2687}
2688
2689
2690/**
2691 * Stores a data qword.
2692 *
2693 * @returns Strict VBox status code.
2694 * @param pIemCpu The IEM per CPU data.
2695 * @param iSegReg The index of the segment register to use for
2696 * this access. The base and limits are checked.
2697 * @param GCPtrMem The address of the guest memory.
2698 * @param u64Value The value to store.
2699 */
2700static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
2701{
2702 /* The lazy approach for now... */
2703 uint64_t *pu64Dst;
2704 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
2705 if (rc == VINF_SUCCESS)
2706 {
2707 *pu64Dst = u64Value;
2708 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
2709 }
2710 return rc;
2711}
2712
2713
2714/**
2715 * Pushes a word onto the stack.
2716 *
2717 * @returns Strict VBox status code.
2718 * @param pIemCpu The IEM per CPU data.
2719 * @param u16Value The value to push.
2720 */
2721static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
2722{
2723 /* Increment the stack pointer. */
2724 uint64_t uNewRsp;
2725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2726 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
2727
2728 /* Write the word the lazy way. */
2729 uint16_t *pu16Dst;
2730 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2731 if (rc == VINF_SUCCESS)
2732 {
2733 *pu16Dst = u16Value;
2734 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
2735 }
2736
2737 /* Commit the new RSP value unless we an access handler made trouble. */
2738 if (rc == VINF_SUCCESS)
2739 pCtx->rsp = uNewRsp;
2740
2741 return rc;
2742}
2743
2744
2745/**
2746 * Pushes a dword onto the stack.
2747 *
2748 * @returns Strict VBox status code.
2749 * @param pIemCpu The IEM per CPU data.
2750 * @param u32Value The value to push.
2751 */
2752static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
2753{
2754 /* Increment the stack pointer. */
2755 uint64_t uNewRsp;
2756 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2757 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
2758
2759 /* Write the word the lazy way. */
2760 uint32_t *pu32Dst;
2761 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2762 if (rc == VINF_SUCCESS)
2763 {
2764 *pu32Dst = u32Value;
2765 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
2766 }
2767
2768 /* Commit the new RSP value unless we an access handler made trouble. */
2769 if (rc == VINF_SUCCESS)
2770 pCtx->rsp = uNewRsp;
2771
2772 return rc;
2773}
2774
2775
2776/**
2777 * Pushes a qword onto the stack.
2778 *
2779 * @returns Strict VBox status code.
2780 * @param pIemCpu The IEM per CPU data.
2781 * @param u64Value The value to push.
2782 */
2783static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
2784{
2785 /* Increment the stack pointer. */
2786 uint64_t uNewRsp;
2787 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2788 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
2789
2790 /* Write the word the lazy way. */
2791 uint64_t *pu64Dst;
2792 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2793 if (rc == VINF_SUCCESS)
2794 {
2795 *pu64Dst = u64Value;
2796 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
2797 }
2798
2799 /* Commit the new RSP value unless we an access handler made trouble. */
2800 if (rc == VINF_SUCCESS)
2801 pCtx->rsp = uNewRsp;
2802
2803 return rc;
2804}
2805
2806
2807/**
2808 * Pops a word from the stack.
2809 *
2810 * @returns Strict VBox status code.
2811 * @param pIemCpu The IEM per CPU data.
2812 * @param pu16Value Where to store the popped value.
2813 */
2814static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
2815{
2816 /* Increment the stack pointer. */
2817 uint64_t uNewRsp;
2818 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2819 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
2820
2821 /* Write the word the lazy way. */
2822 uint16_t const *pu16Src;
2823 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
2824 if (rc == VINF_SUCCESS)
2825 {
2826 *pu16Value = *pu16Src;
2827 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
2828
2829 /* Commit the new RSP value. */
2830 if (rc == VINF_SUCCESS)
2831 pCtx->rsp = uNewRsp;
2832 }
2833
2834 return rc;
2835}
2836
2837
2838/**
2839 * Pops a dword from the stack.
2840 *
2841 * @returns Strict VBox status code.
2842 * @param pIemCpu The IEM per CPU data.
2843 * @param pu32Value Where to store the popped value.
2844 */
2845static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
2846{
2847 /* Increment the stack pointer. */
2848 uint64_t uNewRsp;
2849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2850 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
2851
2852 /* Write the word the lazy way. */
2853 uint32_t const *pu32Src;
2854 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
2855 if (rc == VINF_SUCCESS)
2856 {
2857 *pu32Value = *pu32Src;
2858 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
2859
2860 /* Commit the new RSP value. */
2861 if (rc == VINF_SUCCESS)
2862 pCtx->rsp = uNewRsp;
2863 }
2864
2865 return rc;
2866}
2867
2868
2869/**
2870 * Pops a qword from the stack.
2871 *
2872 * @returns Strict VBox status code.
2873 * @param pIemCpu The IEM per CPU data.
2874 * @param pu64Value Where to store the popped value.
2875 */
2876static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
2877{
2878 /* Increment the stack pointer. */
2879 uint64_t uNewRsp;
2880 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2881 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
2882
2883 /* Write the word the lazy way. */
2884 uint64_t const *pu64Src;
2885 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
2886 if (rc == VINF_SUCCESS)
2887 {
2888 *pu64Value = *pu64Src;
2889 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
2890
2891 /* Commit the new RSP value. */
2892 if (rc == VINF_SUCCESS)
2893 pCtx->rsp = uNewRsp;
2894 }
2895
2896 return rc;
2897}
2898
2899
2900/**
2901 * Pushes a word onto the stack, using a temporary stack pointer.
2902 *
2903 * @returns Strict VBox status code.
2904 * @param pIemCpu The IEM per CPU data.
2905 * @param u16Value The value to push.
2906 * @param pTmpRsp Pointer to the temporary stack pointer.
2907 */
2908static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
2909{
2910 /* Increment the stack pointer. */
2911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2912 RTUINT64U NewRsp = *pTmpRsp;
2913 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
2914
2915 /* Write the word the lazy way. */
2916 uint16_t *pu16Dst;
2917 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2918 if (rc == VINF_SUCCESS)
2919 {
2920 *pu16Dst = u16Value;
2921 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
2922 }
2923
2924 /* Commit the new RSP value unless we an access handler made trouble. */
2925 if (rc == VINF_SUCCESS)
2926 *pTmpRsp = NewRsp;
2927
2928 return rc;
2929}
2930
2931
2932/**
2933 * Pushes a dword onto the stack, using a temporary stack pointer.
2934 *
2935 * @returns Strict VBox status code.
2936 * @param pIemCpu The IEM per CPU data.
2937 * @param u32Value The value to push.
2938 * @param pTmpRsp Pointer to the temporary stack pointer.
2939 */
2940static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
2941{
2942 /* Increment the stack pointer. */
2943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2944 RTUINT64U NewRsp = *pTmpRsp;
2945 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
2946
2947 /* Write the word the lazy way. */
2948 uint32_t *pu32Dst;
2949 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2950 if (rc == VINF_SUCCESS)
2951 {
2952 *pu32Dst = u32Value;
2953 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
2954 }
2955
2956 /* Commit the new RSP value unless we an access handler made trouble. */
2957 if (rc == VINF_SUCCESS)
2958 *pTmpRsp = NewRsp;
2959
2960 return rc;
2961}
2962
2963
2964/**
2965 * Pushes a dword onto the stack, using a temporary stack pointer.
2966 *
2967 * @returns Strict VBox status code.
2968 * @param pIemCpu The IEM per CPU data.
2969 * @param u64Value The value to push.
2970 * @param pTmpRsp Pointer to the temporary stack pointer.
2971 */
2972static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
2973{
2974 /* Increment the stack pointer. */
2975 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2976 RTUINT64U NewRsp = *pTmpRsp;
2977 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
2978
2979 /* Write the word the lazy way. */
2980 uint64_t *pu64Dst;
2981 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
2982 if (rc == VINF_SUCCESS)
2983 {
2984 *pu64Dst = u64Value;
2985 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
2986 }
2987
2988 /* Commit the new RSP value unless we an access handler made trouble. */
2989 if (rc == VINF_SUCCESS)
2990 *pTmpRsp = NewRsp;
2991
2992 return rc;
2993}
2994
2995
2996/**
2997 * Pops a word from the stack, using a temporary stack pointer.
2998 *
2999 * @returns Strict VBox status code.
3000 * @param pIemCpu The IEM per CPU data.
3001 * @param pu16Value Where to store the popped value.
3002 * @param pTmpRsp Pointer to the temporary stack pointer.
3003 */
3004static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
3005{
3006 /* Increment the stack pointer. */
3007 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3008 RTUINT64U NewRsp = *pTmpRsp;
3009 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
3010
3011 /* Write the word the lazy way. */
3012 uint16_t const *pu16Src;
3013 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3014 if (rc == VINF_SUCCESS)
3015 {
3016 *pu16Value = *pu16Src;
3017 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3018
3019 /* Commit the new RSP value. */
3020 if (rc == VINF_SUCCESS)
3021 *pTmpRsp = NewRsp;
3022 }
3023
3024 return rc;
3025}
3026
3027
3028/**
3029 * Pops a dword from the stack, using a temporary stack pointer.
3030 *
3031 * @returns Strict VBox status code.
3032 * @param pIemCpu The IEM per CPU data.
3033 * @param pu32Value Where to store the popped value.
3034 * @param pTmpRsp Pointer to the temporary stack pointer.
3035 */
3036static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
3037{
3038 /* Increment the stack pointer. */
3039 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3040 RTUINT64U NewRsp = *pTmpRsp;
3041 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
3042
3043 /* Write the word the lazy way. */
3044 uint32_t const *pu32Src;
3045 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3046 if (rc == VINF_SUCCESS)
3047 {
3048 *pu32Value = *pu32Src;
3049 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
3050
3051 /* Commit the new RSP value. */
3052 if (rc == VINF_SUCCESS)
3053 *pTmpRsp = NewRsp;
3054 }
3055
3056 return rc;
3057}
3058
3059
3060/**
3061 * Pops a qword from the stack, using a temporary stack pointer.
3062 *
3063 * @returns Strict VBox status code.
3064 * @param pIemCpu The IEM per CPU data.
3065 * @param pu64Value Where to store the popped value.
3066 * @param pTmpRsp Pointer to the temporary stack pointer.
3067 */
3068static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
3069{
3070 /* Increment the stack pointer. */
3071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3072 RTUINT64U NewRsp = *pTmpRsp;
3073 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
3074
3075 /* Write the word the lazy way. */
3076 uint64_t const *pu64Src;
3077 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3078 if (rcStrict == VINF_SUCCESS)
3079 {
3080 *pu64Value = *pu64Src;
3081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
3082
3083 /* Commit the new RSP value. */
3084 if (rcStrict == VINF_SUCCESS)
3085 *pTmpRsp = NewRsp;
3086 }
3087
3088 return rcStrict;
3089}
3090
3091
3092/**
3093 * Begin a special stack push (used by interrupt, exceptions and such).
3094 *
3095 * This will raise #SS or #PF if appropriate.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pIemCpu The IEM per CPU data.
3099 * @param cbMem The number of bytes to push onto the stack.
3100 * @param ppvMem Where to return the pointer to the stack memory.
3101 * As with the other memory functions this could be
3102 * direct access or bounce buffered access, so
3103 * don't commit register until the commit call
3104 * succeeds.
3105 * @param puNewRsp Where to return the new RSP value. This must be
3106 * passed unchanged to
3107 * iemMemStackPushCommitSpecial().
3108 */
3109static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
3110{
3111 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3112 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, cbMem, puNewRsp);
3113 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3114}
3115
3116
3117/**
3118 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
3119 *
3120 * This will update the rSP.
3121 *
3122 * @returns Strict VBox status code.
3123 * @param pIemCpu The IEM per CPU data.
3124 * @param pvMem The pointer returned by
3125 * iemMemStackPushBeginSpecial().
3126 * @param uNewRsp The new RSP value returned by
3127 * iemMemStackPushBeginSpecial().
3128 */
3129static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
3130{
3131 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
3132 if (rcStrict == VINF_SUCCESS)
3133 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3134 return rcStrict;
3135}
3136
3137
3138/**
3139 * Begin a special stack pop (used by iret, retf and such).
3140 *
3141 * This will raise #SS or #PF if appropriate.
3142 *
3143 * @returns Strict VBox status code.
3144 * @param pIemCpu The IEM per CPU data.
3145 * @param cbMem The number of bytes to push onto the stack.
3146 * @param ppvMem Where to return the pointer to the stack memory.
3147 * @param puNewRsp Where to return the new RSP value. This must be
3148 * passed unchanged to
3149 * iemMemStackPopCommitSpecial().
3150 */
3151static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
3152{
3153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3154 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, cbMem, puNewRsp);
3155 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3156}
3157
3158
3159/**
3160 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
3161 *
3162 * This will update the rSP.
3163 *
3164 * @returns Strict VBox status code.
3165 * @param pIemCpu The IEM per CPU data.
3166 * @param pvMem The pointer returned by
3167 * iemMemStackPopBeginSpecial().
3168 * @param uNewRsp The new RSP value returned by
3169 * iemMemStackPopBeginSpecial().
3170 */
3171static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
3172{
3173 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
3174 if (rcStrict == VINF_SUCCESS)
3175 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3176 return rcStrict;
3177}
3178
3179
3180/**
3181 * Fetches a descriptor table entry.
3182 *
3183 * @returns Strict VBox status code.
3184 * @param pIemCpu The IEM per CPU.
3185 * @param pDesc Where to return the descriptor table entry.
3186 * @param uSel The selector which table entry to fetch.
3187 */
3188static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
3189{
3190 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3191
3192 /** @todo did the 286 require all 8 bytes to be accessible? */
3193 /*
3194 * Get the selector table base and check bounds.
3195 */
3196 RTGCPTR GCPtrBase;
3197 if (uSel & X86_SEL_LDT)
3198 {
3199 if ( !pCtx->ldtrHid.Attr.n.u1Present
3200 || (uSel | 0x7) > pCtx->ldtrHid.u32Limit )
3201 {
3202 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
3203 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
3204 /** @todo is this the right exception? */
3205 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3206 }
3207
3208 Assert(pCtx->ldtrHid.Attr.n.u1Present);
3209 GCPtrBase = pCtx->ldtrHid.u64Base;
3210 }
3211 else
3212 {
3213 if ((uSel | 0x7) > pCtx->gdtr.cbGdt)
3214 {
3215 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
3216 /** @todo is this the right exception? */
3217 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3218 }
3219 GCPtrBase = pCtx->gdtr.pGdt;
3220 }
3221
3222 /*
3223 * Read the legacy descriptor and maybe the long mode extensions if
3224 * required.
3225 */
3226 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3227 if (rcStrict == VINF_SUCCESS)
3228 {
3229 if ( !IEM_IS_LONG_MODE(pIemCpu)
3230 || pDesc->Legacy.Gen.u1DescType)
3231 pDesc->Long.au64[1] = 0;
3232 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
3233 rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3234 else
3235 {
3236 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
3237 /** @todo is this the right exception? */
3238 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3239 }
3240 }
3241 return rcStrict;
3242}
3243
3244
3245/**
3246 * Marks the selector descriptor as accessed (only non-system descriptors).
3247 *
3248 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
3249 * will therefore skip the limit checks.
3250 *
3251 * @returns Strict VBox status code.
3252 * @param pIemCpu The IEM per CPU.
3253 * @param uSel The selector.
3254 */
3255static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
3256{
3257 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3258
3259 /*
3260 * Get the selector table base and check bounds.
3261 */
3262 RTGCPTR GCPtr = uSel & X86_SEL_LDT
3263 ? pCtx->ldtrHid.u64Base
3264 : pCtx->gdtr.pGdt;
3265 GCPtr += uSel & X86_SEL_MASK;
3266 GCPtr += 2 + 2;
3267 uint32_t volatile *pu32; /** @todo Does the CPU do a 32-bit or 8-bit access here? */
3268 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
3269 if (rcStrict == VINF_SUCCESS)
3270 {
3271 ASMAtomicBitSet(pu32, 0); /* X86_SEL_TYPE_ACCESSED is 1 */
3272
3273 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW);
3274 }
3275
3276 return rcStrict;
3277}
3278
3279/** @} */
3280
3281
3282/** @name Misc Helpers
3283 * @{
3284 */
3285
3286/**
3287 * Checks if we are allowed to access the given I/O port, raising the
3288 * appropriate exceptions if we aren't (or if the I/O bitmap is not
3289 * accessible).
3290 *
3291 * @returns Strict VBox status code.
3292 *
3293 * @param pIemCpu The IEM per CPU data.
3294 * @param pCtx The register context.
3295 * @param u16Port The port number.
3296 * @param cbOperand The operand size.
3297 */
3298DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
3299{
3300 if ( (pCtx->cr0 & X86_CR0_PE)
3301 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3302 || pCtx->eflags.Bits.u1VM) )
3303 {
3304 /** @todo I/O port permission bitmap check */
3305 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
3306 }
3307 return VINF_SUCCESS;
3308}
3309
3310/** @} */
3311
3312
3313/** @name C Implementations
3314 * @{
3315 */
3316
3317/**
3318 * Implements a 16-bit popa.
3319 */
3320IEM_CIMPL_DEF_0(iemCImpl_popa_16)
3321{
3322 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3323 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
3324 RTGCPTR GCPtrLast = GCPtrStart + 15;
3325 VBOXSTRICTRC rcStrict;
3326
3327 /*
3328 * The docs are a bit hard to comprehend here, but it looks like we wrap
3329 * around in real mode as long as none of the individual "popa" crosses the
3330 * end of the stack segment. In protected mode we check the whole access
3331 * in one go. For efficiency, only do the word-by-word thing if we're in
3332 * danger of wrapping around.
3333 */
3334 /** @todo do popa boundary / wrap-around checks. */
3335 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
3336 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
3337 {
3338 /* word-by-word */
3339 RTUINT64U TmpRsp;
3340 TmpRsp.u = pCtx->rsp;
3341 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
3342 if (rcStrict == VINF_SUCCESS)
3343 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
3344 if (rcStrict == VINF_SUCCESS)
3345 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
3346 if (rcStrict == VINF_SUCCESS)
3347 {
3348 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
3349 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
3350 }
3351 if (rcStrict == VINF_SUCCESS)
3352 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
3353 if (rcStrict == VINF_SUCCESS)
3354 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
3355 if (rcStrict == VINF_SUCCESS)
3356 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
3357 if (rcStrict == VINF_SUCCESS)
3358 {
3359 pCtx->rsp = TmpRsp.u;
3360 iemRegAddToRip(pIemCpu, cbInstr);
3361 }
3362 }
3363 else
3364 {
3365 uint16_t const *pa16Mem = NULL;
3366 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
3367 if (rcStrict == VINF_SUCCESS)
3368 {
3369 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
3370 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
3371 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
3372 /* skip sp */
3373 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
3374 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
3375 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
3376 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
3377 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
3378 if (rcStrict == VINF_SUCCESS)
3379 {
3380 iemRegAddToRsp(pCtx, 16);
3381 iemRegAddToRip(pIemCpu, cbInstr);
3382 }
3383 }
3384 }
3385 return rcStrict;
3386}
3387
3388
3389/**
3390 * Implements a 32-bit popa.
3391 */
3392IEM_CIMPL_DEF_0(iemCImpl_popa_32)
3393{
3394 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3395 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
3396 RTGCPTR GCPtrLast = GCPtrStart + 31;
3397 VBOXSTRICTRC rcStrict;
3398
3399 /*
3400 * The docs are a bit hard to comprehend here, but it looks like we wrap
3401 * around in real mode as long as none of the individual "popa" crosses the
3402 * end of the stack segment. In protected mode we check the whole access
3403 * in one go. For efficiency, only do the word-by-word thing if we're in
3404 * danger of wrapping around.
3405 */
3406 /** @todo do popa boundary / wrap-around checks. */
3407 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
3408 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
3409 {
3410 /* word-by-word */
3411 RTUINT64U TmpRsp;
3412 TmpRsp.u = pCtx->rsp;
3413 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
3414 if (rcStrict == VINF_SUCCESS)
3415 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
3416 if (rcStrict == VINF_SUCCESS)
3417 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
3418 if (rcStrict == VINF_SUCCESS)
3419 {
3420 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
3421 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
3422 }
3423 if (rcStrict == VINF_SUCCESS)
3424 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
3425 if (rcStrict == VINF_SUCCESS)
3426 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
3427 if (rcStrict == VINF_SUCCESS)
3428 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
3429 if (rcStrict == VINF_SUCCESS)
3430 {
3431#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
3432 pCtx->rdi &= UINT32_MAX;
3433 pCtx->rsi &= UINT32_MAX;
3434 pCtx->rbp &= UINT32_MAX;
3435 pCtx->rbx &= UINT32_MAX;
3436 pCtx->rdx &= UINT32_MAX;
3437 pCtx->rcx &= UINT32_MAX;
3438 pCtx->rax &= UINT32_MAX;
3439#endif
3440 pCtx->rsp = TmpRsp.u;
3441 iemRegAddToRip(pIemCpu, cbInstr);
3442 }
3443 }
3444 else
3445 {
3446 uint32_t const *pa32Mem;
3447 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
3448 if (rcStrict == VINF_SUCCESS)
3449 {
3450 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
3451 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
3452 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
3453 /* skip esp */
3454 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
3455 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
3456 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
3457 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
3458 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
3459 if (rcStrict == VINF_SUCCESS)
3460 {
3461 iemRegAddToRsp(pCtx, 32);
3462 iemRegAddToRip(pIemCpu, cbInstr);
3463 }
3464 }
3465 }
3466 return rcStrict;
3467}
3468
3469
3470/**
3471 * Implements a 16-bit pusha.
3472 */
3473IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
3474{
3475 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3476 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
3477 RTGCPTR GCPtrBottom = GCPtrTop - 15;
3478 VBOXSTRICTRC rcStrict;
3479
3480 /*
3481 * The docs are a bit hard to comprehend here, but it looks like we wrap
3482 * around in real mode as long as none of the individual "pushd" crosses the
3483 * end of the stack segment. In protected mode we check the whole access
3484 * in one go. For efficiency, only do the word-by-word thing if we're in
3485 * danger of wrapping around.
3486 */
3487 /** @todo do pusha boundary / wrap-around checks. */
3488 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
3489 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
3490 {
3491 /* word-by-word */
3492 RTUINT64U TmpRsp;
3493 TmpRsp.u = pCtx->rsp;
3494 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
3495 if (rcStrict == VINF_SUCCESS)
3496 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
3497 if (rcStrict == VINF_SUCCESS)
3498 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
3499 if (rcStrict == VINF_SUCCESS)
3500 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
3501 if (rcStrict == VINF_SUCCESS)
3502 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
3503 if (rcStrict == VINF_SUCCESS)
3504 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
3505 if (rcStrict == VINF_SUCCESS)
3506 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
3507 if (rcStrict == VINF_SUCCESS)
3508 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
3509 if (rcStrict == VINF_SUCCESS)
3510 {
3511 pCtx->rsp = TmpRsp.u;
3512 iemRegAddToRip(pIemCpu, cbInstr);
3513 }
3514 }
3515 else
3516 {
3517 uint16_t *pa16Mem = NULL;
3518 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
3519 if (rcStrict == VINF_SUCCESS)
3520 {
3521 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
3522 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
3523 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
3524 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
3525 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
3526 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
3527 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
3528 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
3529 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
3530 if (rcStrict == VINF_SUCCESS)
3531 {
3532 iemRegSubFromRsp(pCtx, 16);
3533 iemRegAddToRip(pIemCpu, cbInstr);
3534 }
3535 }
3536 }
3537 return rcStrict;
3538}
3539
3540
3541/**
3542 * Implements a 32-bit pusha.
3543 */
3544IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
3545{
3546 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3547 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
3548 RTGCPTR GCPtrBottom = GCPtrTop - 31;
3549 VBOXSTRICTRC rcStrict;
3550
3551 /*
3552 * The docs are a bit hard to comprehend here, but it looks like we wrap
3553 * around in real mode as long as none of the individual "pusha" crosses the
3554 * end of the stack segment. In protected mode we check the whole access
3555 * in one go. For efficiency, only do the word-by-word thing if we're in
3556 * danger of wrapping around.
3557 */
3558 /** @todo do pusha boundary / wrap-around checks. */
3559 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
3560 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
3561 {
3562 /* word-by-word */
3563 RTUINT64U TmpRsp;
3564 TmpRsp.u = pCtx->rsp;
3565 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
3566 if (rcStrict == VINF_SUCCESS)
3567 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
3568 if (rcStrict == VINF_SUCCESS)
3569 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
3570 if (rcStrict == VINF_SUCCESS)
3571 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
3572 if (rcStrict == VINF_SUCCESS)
3573 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
3574 if (rcStrict == VINF_SUCCESS)
3575 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
3576 if (rcStrict == VINF_SUCCESS)
3577 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
3578 if (rcStrict == VINF_SUCCESS)
3579 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
3580 if (rcStrict == VINF_SUCCESS)
3581 {
3582 pCtx->rsp = TmpRsp.u;
3583 iemRegAddToRip(pIemCpu, cbInstr);
3584 }
3585 }
3586 else
3587 {
3588 uint32_t *pa32Mem;
3589 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
3590 if (rcStrict == VINF_SUCCESS)
3591 {
3592 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
3593 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
3594 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
3595 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
3596 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
3597 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
3598 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
3599 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
3600 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
3601 if (rcStrict == VINF_SUCCESS)
3602 {
3603 iemRegSubFromRsp(pCtx, 32);
3604 iemRegAddToRip(pIemCpu, cbInstr);
3605 }
3606 }
3607 }
3608 return rcStrict;
3609}
3610
3611
3612/**
3613 * Implements pushf.
3614 *
3615 *
3616 * @param enmEffOpSize The effective operand size.
3617 */
3618IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
3619{
3620 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3621
3622 /*
3623 * If we're in V8086 mode some care is required (which is why we're in
3624 * doing this in a C implementation).
3625 */
3626 uint32_t fEfl = pCtx->eflags.u;
3627 if ( (fEfl & X86_EFL_VM)
3628 && X86_EFL_GET_IOPL(fEfl) != 3 )
3629 {
3630 Assert(pCtx->cr0 & X86_CR0_PE);
3631 if ( enmEffOpSize != IEMMODE_16BIT
3632 || !(pCtx->cr4 & X86_CR4_VME))
3633 return iemRaiseGeneralProtectionFault0(pIemCpu);
3634 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
3635 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
3636 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
3637 }
3638
3639 /*
3640 * Ok, clear RF and VM and push the flags.
3641 */
3642 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
3643
3644 VBOXSTRICTRC rcStrict;
3645 switch (enmEffOpSize)
3646 {
3647 case IEMMODE_16BIT:
3648 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
3649 break;
3650 case IEMMODE_32BIT:
3651 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
3652 break;
3653 case IEMMODE_64BIT:
3654 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
3655 break;
3656 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3657 }
3658 if (rcStrict != VINF_SUCCESS)
3659 return rcStrict;
3660
3661 iemRegAddToRip(pIemCpu, cbInstr);
3662 return VINF_SUCCESS;
3663}
3664
3665
3666/**
3667 * Implements popf.
3668 *
3669 * @param enmEffOpSize The effective operand size.
3670 */
3671IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
3672{
3673 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3674 uint32_t const fEflOld = pCtx->eflags.u;
3675 VBOXSTRICTRC rcStrict;
3676 uint32_t fEflNew;
3677
3678 /*
3679 * V8086 is special as usual.
3680 */
3681 if (fEflOld & X86_EFL_VM)
3682 {
3683 /*
3684 * Almost anything goes if IOPL is 3.
3685 */
3686 if (X86_EFL_GET_IOPL(fEflOld) == 3)
3687 {
3688 switch (enmEffOpSize)
3689 {
3690 case IEMMODE_16BIT:
3691 {
3692 uint16_t u16Value;
3693 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
3694 if (rcStrict != VINF_SUCCESS)
3695 return rcStrict;
3696 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
3697 break;
3698 }
3699 case IEMMODE_32BIT:
3700 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
3701 if (rcStrict != VINF_SUCCESS)
3702 return rcStrict;
3703 break;
3704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3705 }
3706
3707 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
3708 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
3709 }
3710 /*
3711 * Interrupt flag virtualization with CR4.VME=1.
3712 */
3713 else if ( enmEffOpSize == IEMMODE_16BIT
3714 && (pCtx->cr4 & X86_CR4_VME) )
3715 {
3716 uint16_t u16Value;
3717 RTUINT64U TmpRsp;
3718 TmpRsp.u = pCtx->rsp;
3719 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
3720 if (rcStrict != VINF_SUCCESS)
3721 return rcStrict;
3722
3723 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
3724 * or before? */
3725 if ( ( (u16Value & X86_EFL_IF)
3726 && (fEflOld & X86_EFL_VIP))
3727 || (u16Value & X86_EFL_TF) )
3728 return iemRaiseGeneralProtectionFault0(pIemCpu);
3729
3730 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
3731 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
3732 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
3733 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
3734
3735 pCtx->rsp = TmpRsp.u;
3736 }
3737 else
3738 return iemRaiseGeneralProtectionFault0(pIemCpu);
3739
3740 }
3741 /*
3742 * Not in V8086 mode.
3743 */
3744 else
3745 {
3746 /* Pop the flags. */
3747 switch (enmEffOpSize)
3748 {
3749 case IEMMODE_16BIT:
3750 {
3751 uint16_t u16Value;
3752 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
3756 break;
3757 }
3758 case IEMMODE_32BIT:
3759 case IEMMODE_64BIT:
3760 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
3761 if (rcStrict != VINF_SUCCESS)
3762 return rcStrict;
3763 break;
3764 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3765 }
3766
3767 /* Merge them with the current flags. */
3768 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
3769 || pIemCpu->uCpl == 0)
3770 {
3771 fEflNew &= X86_EFL_POPF_BITS;
3772 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
3773 }
3774 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
3775 {
3776 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
3777 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
3778 }
3779 else
3780 {
3781 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
3782 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
3783 }
3784 }
3785
3786 /*
3787 * Commit the flags.
3788 */
3789 pCtx->eflags.u = fEflNew;
3790 iemRegAddToRip(pIemCpu, cbInstr);
3791
3792 return VINF_SUCCESS;
3793}
3794
3795
3796/**
3797 * Implements a 16-bit relative call.
3798 *
3799 *
3800 * @param offDisp The displacment offset.
3801 */
3802IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
3803{
3804 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3805 uint16_t OldPC = pCtx->ip + cbInstr;
3806 uint16_t NewPC = OldPC + offDisp;
3807 if (NewPC > pCtx->csHid.u32Limit)
3808 return iemRaiseGeneralProtectionFault0(pIemCpu);
3809
3810 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, OldPC);
3811 if (rcStrict != VINF_SUCCESS)
3812 return rcStrict;
3813
3814 pCtx->rip = NewPC;
3815 return VINF_SUCCESS;
3816}
3817
3818
3819/**
3820 * Implements a 32-bit relative call.
3821 *
3822 *
3823 * @param offDisp The displacment offset.
3824 */
3825IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
3826{
3827 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3828 uint32_t OldPC = pCtx->eip + cbInstr;
3829 uint32_t NewPC = OldPC + offDisp;
3830 if (NewPC > pCtx->csHid.u32Limit)
3831 return iemRaiseGeneralProtectionFault0(pIemCpu);
3832
3833 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, OldPC);
3834 if (rcStrict != VINF_SUCCESS)
3835 return rcStrict;
3836
3837 pCtx->rip = NewPC;
3838 return VINF_SUCCESS;
3839}
3840
3841
3842/**
3843 * Implements a 64-bit relative call.
3844 *
3845 *
3846 * @param offDisp The displacment offset.
3847 */
3848IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
3849{
3850 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3851 uint64_t OldPC = pCtx->rip + cbInstr;
3852
3853 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, OldPC);
3854 if (rcStrict != VINF_SUCCESS)
3855 return rcStrict;
3856
3857 pCtx->rip = OldPC + offDisp;
3858 return VINF_SUCCESS;
3859}
3860
3861
3862/**
3863 * Implements far jumps.
3864 *
3865 * @param uSel The selector.
3866 * @param offSeg The segment offset.
3867 */
3868IEM_CIMPL_DEF_2(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg)
3869{
3870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3871
3872 /*
3873 * Real mode and V8086 mode are easy. The only snag seems to be that
3874 * CS.limit doesn't change and the limit check is done against the current
3875 * limit.
3876 */
3877 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3878 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3879 {
3880 if (offSeg > pCtx->csHid.u32Limit)
3881 return iemRaiseGeneralProtectionFault0(pIemCpu);
3882
3883 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
3884 pCtx->rip = offSeg;
3885 else
3886 pCtx->rip = offSeg & UINT16_MAX;
3887 pCtx->cs = uSel;
3888 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
3889 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
3890 * PE. Check with VT-x and AMD-V. */
3891#ifdef IEM_VERIFICATION_MODE
3892 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
3893#endif
3894 return VINF_SUCCESS;
3895 }
3896
3897 /*
3898 * Protected mode. Need to parse the specified descriptor...
3899 */
3900 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
3901 {
3902 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
3903 return iemRaiseGeneralProtectionFault0(pIemCpu);
3904 }
3905
3906 /* Fetch the descriptor. */
3907 IEMSELDESC Desc;
3908 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
3909 if (rcStrict != VINF_SUCCESS)
3910 return rcStrict;
3911
3912 /* Is it there? */
3913 if (!Desc.Legacy.Gen.u1Present)
3914 {
3915 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
3916 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
3917 }
3918
3919 /*
3920 * Deal with it according to its type.
3921 */
3922 if (Desc.Legacy.Gen.u1DescType)
3923 {
3924 /* Only code segments. */
3925 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3926 {
3927 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
3928 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3929 }
3930
3931 /* L vs D. */
3932 if ( Desc.Legacy.Gen.u1Long
3933 && Desc.Legacy.Gen.u1DefBig
3934 && IEM_IS_LONG_MODE(pIemCpu))
3935 {
3936 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
3937 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3938 }
3939
3940 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
3941 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3942 {
3943 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3944 {
3945 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
3946 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3947 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3948 }
3949 }
3950 else
3951 {
3952 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3953 {
3954 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3955 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3956 }
3957 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
3958 {
3959 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
3960 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3961 }
3962 }
3963
3964 /* Limit check. (Should alternatively check for non-canonical addresses
3965 here, but that is ruled out by offSeg being 32-bit, right?) */
3966 uint64_t u64Base;
3967 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
3968 if (Desc.Legacy.Gen.u1Granularity)
3969 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
3970 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3971 u64Base = 0;
3972 else
3973 {
3974 if (offSeg > offSeg)
3975 {
3976 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
3977 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3978 }
3979 u64Base = X86DESC_BASE(Desc.Legacy);
3980 }
3981
3982 /*
3983 * Ok, everything checked out fine. Now set the accessed bit before
3984 * committing the result into CS, CSHID and RIP.
3985 */
3986 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3987 {
3988 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
3989 if (rcStrict != VINF_SUCCESS)
3990 return rcStrict;
3991 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3992 }
3993
3994 /* commit */
3995 pCtx->rip = offSeg;
3996 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);
3997 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
3998 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
3999 pCtx->csHid.u32Limit = cbLimit;
4000 pCtx->csHid.u64Base = u64Base;
4001 /** @todo check if the hidden bits are loaded correctly for 64-bit
4002 * mode. */
4003 return VINF_SUCCESS;
4004 }
4005
4006 /*
4007 * System selector.
4008 */
4009 if (IEM_IS_LONG_MODE(pIemCpu))
4010 switch (Desc.Legacy.Gen.u4Type)
4011 {
4012 case AMD64_SEL_TYPE_SYS_LDT:
4013 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4014 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4015 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4016 case AMD64_SEL_TYPE_SYS_INT_GATE:
4017 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4018 /* Call various functions to do the work. */
4019 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4020 default:
4021 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
4022 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4023
4024 }
4025 switch (Desc.Legacy.Gen.u4Type)
4026 {
4027 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4028 case X86_SEL_TYPE_SYS_LDT:
4029 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4030 case X86_SEL_TYPE_SYS_TASK_GATE:
4031 case X86_SEL_TYPE_SYS_286_INT_GATE:
4032 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4033 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4034 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4035 case X86_SEL_TYPE_SYS_386_INT_GATE:
4036 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4037 /* Call various functions to do the work. */
4038 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4039
4040 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4041 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4042 /* Call various functions to do the work. */
4043 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4044
4045 default:
4046 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
4047 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4048 }
4049}
4050
4051
4052/**
4053 * Implements far calls.
4054 *
4055 * @param uSel The selector.
4056 * @param offSeg The segment offset.
4057 * @param enmOpSize The operand size (in case we need it).
4058 */
4059IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
4060{
4061 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4062 VBOXSTRICTRC rcStrict;
4063 uint64_t uNewRsp;
4064 void *pvRet;
4065
4066 /*
4067 * Real mode and V8086 mode are easy. The only snag seems to be that
4068 * CS.limit doesn't change and the limit check is done against the current
4069 * limit.
4070 */
4071 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4072 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4073 {
4074 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
4075
4076 /* Check stack first - may #SS(0). */
4077 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 4 + (enmOpSize == IEMMODE_32BIT) * 2,
4078 &pvRet, &uNewRsp);
4079 if (rcStrict != VINF_SUCCESS)
4080 return rcStrict;
4081
4082 /* Check the target address range. */
4083 if (offSeg > UINT32_MAX)
4084 return iemRaiseGeneralProtectionFault0(pIemCpu);
4085
4086 /* Everything is fine, push the return address. */
4087 if (enmOpSize == IEMMODE_16BIT)
4088 {
4089 ((uint16_t *)pvRet)[0] = pCtx->ip;
4090 ((uint16_t *)pvRet)[1] = pCtx->cs;
4091 }
4092 else
4093 {
4094 ((uint32_t *)pvRet)[0] = pCtx->eip;
4095 ((uint16_t *)pvRet)[3] = pCtx->cs;
4096 }
4097 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
4098 if (rcStrict != VINF_SUCCESS)
4099 return rcStrict;
4100
4101 /* Branch. */
4102 pCtx->rip = offSeg;
4103 pCtx->cs = uSel;
4104 pCtx->csHid.u64Base = (uint32_t)uSel << 4;
4105 /** @todo Does REM reset the accessed bit here to? (See on jmp far16
4106 * after disabling PE.) Check with VT-x and AMD-V. */
4107#ifdef IEM_VERIFICATION_MODE
4108 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
4109#endif
4110 return VINF_SUCCESS;
4111 }
4112
4113 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
4114}
4115
4116
4117/**
4118 * Implements retf.
4119 *
4120 * @param enmEffOpSize The effective operand size.
4121 * @param cbPop The amount of arguments to pop from the stack
4122 * (bytes).
4123 */
4124IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
4125{
4126 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4127 VBOXSTRICTRC rcStrict;
4128 uint64_t uNewRsp;
4129
4130 /*
4131 * Real mode and V8086 mode are easy.
4132 */
4133 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4134 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4135 {
4136 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
4137 uint16_t const *pu16Frame;
4138 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
4139 (void const **)&pu16Frame, &uNewRsp);
4140 if (rcStrict != VINF_SUCCESS)
4141 return rcStrict;
4142 uint32_t uNewEip;
4143 uint16_t uNewCs;
4144 if (enmEffOpSize == IEMMODE_32BIT)
4145 {
4146 uNewCs = pu16Frame[2];
4147 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);
4148 }
4149 else
4150 {
4151 uNewCs = pu16Frame[1];
4152 uNewEip = pu16Frame[0];
4153 }
4154 /** @todo check how this is supposed to work if sp=0xfffe. */
4155
4156 /* Check the limit of the new EIP. */
4157 /** @todo Intel pseudo code only does the limit check for 16-bit
4158 * operands, AMD does make any distinction. What is right? */
4159 if (uNewEip > pCtx->csHid.u32Limit)
4160 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
4161
4162 /* commit the operation. */
4163 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
4164 if (rcStrict != VINF_SUCCESS)
4165 return rcStrict;
4166 pCtx->rip = uNewEip;
4167 pCtx->cs = uNewCs;
4168 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
4169 /** @todo do we load attribs and limit as well? */
4170 if (cbPop)
4171 iemRegAddToRsp(pCtx, cbPop);
4172 return VINF_SUCCESS;
4173 }
4174
4175 AssertFailed();
4176 return VERR_NOT_IMPLEMENTED;
4177}
4178
4179
4180/**
4181 * Implements int3 and int XX.
4182 *
4183 * @param u8Int The interrupt vector number.
4184 * @param fIsBpInstr Is it the breakpoint instruction.
4185 */
4186IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
4187{
4188 /** @todo we should call TRPM to do this job. */
4189 VBOXSTRICTRC rcStrict;
4190 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4191
4192 /*
4193 * Real mode is easy.
4194 */
4195 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4196 && IEM_IS_REAL_MODE(pIemCpu))
4197 {
4198 /* read the IDT entry. */
4199 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Int + 3)
4200 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Int << X86_TRAP_ERR_SEL_SHIFT));
4201 RTFAR16 Idte;
4202 rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Int);
4203 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4204 return rcStrict;
4205
4206 /* push the stack frame. */
4207 uint16_t *pu16Frame;
4208 uint64_t uNewRsp;
4209 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
4210 if (rcStrict != VINF_SUCCESS)
4211 return rcStrict;
4212
4213 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
4214 pu16Frame[1] = (uint16_t)pCtx->cs;
4215 pu16Frame[0] = (uint16_t)pCtx->ip;
4216 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
4217 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4218 return rcStrict;
4219
4220 /* load the vector address into cs:ip. */
4221 pCtx->cs = Idte.sel;
4222 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
4223 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
4224 pCtx->rip = Idte.off;
4225 return VINF_SUCCESS;
4226 }
4227
4228 AssertFailed();
4229 return VERR_NOT_IMPLEMENTED;
4230}
4231
4232
4233/**
4234 * Implements iret.
4235 *
4236 * @param enmEffOpSize The effective operand size.
4237 */
4238IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
4239{
4240 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4241 VBOXSTRICTRC rcStrict;
4242 uint64_t uNewRsp;
4243
4244 /*
4245 * Real mode is easy, V8086 mode is relative similar.
4246 */
4247 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4248 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4249 {
4250 /* iret throws an exception if VME isn't enabled. */
4251 if ( pCtx->eflags.Bits.u1VM
4252 && !(pCtx->cr4 & X86_CR4_VME))
4253 return iemRaiseGeneralProtectionFault0(pIemCpu);
4254
4255 /* Do the stack bits, but don't commit RSP before everything checks
4256 out right. */
4257 union
4258 {
4259 uint32_t const *pu32;
4260 uint16_t const *pu16;
4261 void const *pv;
4262 } uFrame;
4263 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
4264 uint16_t uNewCs;
4265 uint32_t uNewEip;
4266 uint32_t uNewFlags;
4267 if (enmEffOpSize == IEMMODE_32BIT)
4268 {
4269 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
4270 if (rcStrict != VINF_SUCCESS)
4271 return rcStrict;
4272 uNewEip = uFrame.pu32[0];
4273 uNewCs = (uint16_t)uFrame.pu32[1];
4274 uNewFlags = uFrame.pu32[2];
4275 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
4276 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
4277 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
4278 | X86_EFL_ID;
4279 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP);
4280 }
4281 else
4282 {
4283 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
4284 if (rcStrict != VINF_SUCCESS)
4285 return rcStrict;
4286 uNewEip = uFrame.pu16[0];
4287 uNewCs = uFrame.pu16[1];
4288 uNewFlags = uFrame.pu16[2];
4289 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
4290 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
4291 uNewFlags |= pCtx->eflags.u & UINT16_C(0xffff0000);
4292 /** @todo The intel pseudo code does not indicate what happens to
4293 * reserved flags. We just ignore them. */
4294 }
4295 /** @todo Check how this is supposed to work if sp=0xfffe. */
4296
4297 /* Check the limit of the new EIP. */
4298 /** @todo Only the AMD pseudo code check the limit here, what's
4299 * right? */
4300 if (uNewEip > pCtx->csHid.u32Limit)
4301 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
4302
4303 /* V8086 checks and flag adjustments */
4304 if (pCtx->eflags.Bits.u1VM)
4305 {
4306 if (pCtx->eflags.Bits.u2IOPL == 3)
4307 {
4308 /* Preserve IOPL and clear RF. */
4309 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
4310 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
4311 }
4312 else if ( enmEffOpSize == IEMMODE_16BIT
4313 && ( !(uNewFlags & X86_EFL_IF)
4314 || !pCtx->eflags.Bits.u1VIP )
4315 && !(uNewFlags & X86_EFL_TF) )
4316 {
4317 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
4318 uNewFlags &= ~X86_EFL_VIF;
4319 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
4320 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
4321 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
4322 }
4323 else
4324 return iemRaiseGeneralProtectionFault0(pIemCpu);
4325 }
4326
4327 /* commit the operation. */
4328 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
4329 if (rcStrict != VINF_SUCCESS)
4330 return rcStrict;
4331 pCtx->rip = uNewEip;
4332 pCtx->cs = uNewCs;
4333 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;
4334 /** @todo do we load attribs and limit as well? */
4335 pCtx->eflags.u = uNewFlags;
4336
4337 return VINF_SUCCESS;
4338 }
4339
4340
4341 AssertFailed();
4342 return VERR_NOT_IMPLEMENTED;
4343}
4344
4345
4346/**
4347 * Implements 'mov SReg, r/m'.
4348 *
4349 * @param iSegReg The segment register number (valid).
4350 * @param uSel The new selector value.
4351 */
4352IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4353{
4354 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4355 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
4356 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
4357
4358 Assert(iSegReg < X86_SREG_GS && iSegReg != X86_SREG_CS);
4359
4360 /*
4361 * Real mode and V8086 mode are easy.
4362 */
4363 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
4364 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4365 {
4366 *pSel = uSel;
4367 pHid->u64Base = (uint32_t)uSel << 4;
4368 /** @todo Does the CPU actually load limits and attributes in the
4369 * real/V8086 mode segment load case? It doesn't for CS in far
4370 * jumps... Affects unreal mode. */
4371 pHid->u32Limit = 0xffff;
4372 pHid->Attr.u = 0;
4373 pHid->Attr.n.u1Present = 1;
4374 pHid->Attr.n.u1DescType = 1;
4375 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4376 ? X86_SEL_TYPE_RW
4377 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4378
4379 iemRegAddToRip(pIemCpu, cbInstr);
4380 if (iSegReg == X86_SREG_SS)
4381 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4382 return VINF_SUCCESS;
4383 }
4384
4385 /*
4386 * Protected mode.
4387 *
4388 * Check if it's a null segment selector value first, that's OK for DS, ES,
4389 * FS and GS. If not null, then we have to load and parse the descriptor.
4390 */
4391 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
4392 {
4393 if (iSegReg == X86_SREG_SS)
4394 {
4395 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4396 || pIemCpu->uCpl != 0
4397 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
4398 {
4399 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
4400 return iemRaiseGeneralProtectionFault0(pIemCpu);
4401 }
4402
4403 /* In 64-bit kernel mode, the stack can be 0 because of the way
4404 interrupts are dispatched when in kernel ctx. Just load the
4405 selector value into the register and leave the hidden bits
4406 as is. */
4407 *pSel = uSel;
4408 iemRegAddToRip(pIemCpu, cbInstr);
4409 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4410 return VINF_SUCCESS;
4411 }
4412
4413 *pSel = uSel; /* Not RPL, remember :-) */
4414 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4415 {
4416 /** @todo figure out what this actually does, it works. Needs
4417 * testcase! */
4418 pHid->Attr.u = 0;
4419 pHid->Attr.n.u1Present = 1;
4420 pHid->Attr.n.u1Long = 1;
4421 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
4422 pHid->Attr.n.u2Dpl = 3;
4423 pHid->u32Limit = 0;
4424 pHid->u64Base = 0;
4425 }
4426 else
4427 {
4428 pHid->Attr.u = 0;
4429 pHid->u32Limit = 0;
4430 pHid->u64Base = 0;
4431 }
4432 iemRegAddToRip(pIemCpu, cbInstr);
4433 return VINF_SUCCESS;
4434 }
4435
4436 /* Fetch the descriptor. */
4437 IEMSELDESC Desc;
4438 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
4439 if (rcStrict != VINF_SUCCESS)
4440 return rcStrict;
4441
4442 /* Check GPs first. */
4443 if (!Desc.Legacy.Gen.u1DescType)
4444 {
4445 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4446 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4447 }
4448 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4449 {
4450 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4451 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4452 {
4453 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4454 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4455 }
4456 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4457 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4458 {
4459 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4460 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4461 }
4462 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
4463 {
4464 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
4465 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4466 }
4467 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
4468 {
4469 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
4470 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4471 }
4472 }
4473 else
4474 {
4475 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4476 {
4477 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4478 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4479 }
4480 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4481 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4482 {
4483#if 0 /* this is what intel says. */
4484 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4485 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4486 {
4487 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4488 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4489 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4490 }
4491#else /* this is what makes more sense. */
4492 if ((uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4493 {
4494 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4495 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4496 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4497 }
4498 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4499 {
4500 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4501 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4502 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
4503 }
4504#endif
4505 }
4506 }
4507
4508 /* Is it there? */
4509 if (!Desc.Legacy.Gen.u1Present)
4510 {
4511 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4512 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4513 }
4514
4515 /* The the base and limit. */
4516 uint64_t u64Base;
4517 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
4518 if (Desc.Legacy.Gen.u1Granularity)
4519 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
4520
4521 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
4522 && iSegReg < X86_SREG_FS)
4523 u64Base = 0;
4524 else
4525 u64Base = X86DESC_BASE(Desc.Legacy);
4526
4527 /*
4528 * Ok, everything checked out fine. Now set the accessed bit before
4529 * committing the result into the registers.
4530 */
4531 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4532 {
4533 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4534 if (rcStrict != VINF_SUCCESS)
4535 return rcStrict;
4536 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4537 }
4538
4539 /* commit */
4540 *pSel = uSel;
4541 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
4542 pHid->u32Limit = cbLimit;
4543 pHid->u64Base = u64Base;
4544
4545 /** @todo check if the hidden bits are loaded correctly for 64-bit
4546 * mode. */
4547
4548 iemRegAddToRip(pIemCpu, cbInstr);
4549 if (iSegReg == X86_SREG_SS)
4550 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4551 return VINF_SUCCESS;
4552}
4553
4554
4555/**
4556 * Implements 'pop SReg'.
4557 *
4558 * @param iSegReg The segment register number (valid).
4559 * @param enmEffOpSize The efficient operand size (valid).
4560 */
4561IEM_CIMPL_DEF_2(iemOpCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4562{
4563 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4564 VBOXSTRICTRC rcStrict;
4565
4566 /*
4567 * Read the selector off the stack and join paths with mov ss, reg.
4568 */
4569 RTUINT64U TmpRsp;
4570 TmpRsp.u = pCtx->rsp;
4571 switch (enmEffOpSize)
4572 {
4573 case IEMMODE_16BIT:
4574 {
4575 uint16_t uSel;
4576 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4577 if (rcStrict == VINF_SUCCESS)
4578 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4579 break;
4580 }
4581
4582 case IEMMODE_32BIT:
4583 {
4584 uint32_t u32Value;
4585 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4586 if (rcStrict == VINF_SUCCESS)
4587 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4588 break;
4589 }
4590
4591 case IEMMODE_64BIT:
4592 {
4593 uint64_t u64Value;
4594 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4595 if (rcStrict == VINF_SUCCESS)
4596 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4597 break;
4598 }
4599 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4600 }
4601
4602 /*
4603 * Commit the stack on success.
4604 */
4605 if (rcStrict == VINF_SUCCESS)
4606 pCtx->rsp = TmpRsp.u;
4607 return rcStrict;
4608}
4609
4610
4611/**
4612 * Implements lgdt.
4613 *
4614 * @param iEffSeg The segment of the new ldtr contents
4615 * @param GCPtrEffSrc The address of the new ldtr contents.
4616 * @param enmEffOpSize The effective operand size.
4617 */
4618IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4619{
4620 if (pIemCpu->uCpl != 0)
4621 return iemRaiseGeneralProtectionFault0(pIemCpu);
4622 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4623
4624 /*
4625 * Fetch the limit and base address.
4626 */
4627 uint16_t cbLimit;
4628 RTGCPTR GCPtrBase;
4629 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4630 if (rcStrict == VINF_SUCCESS)
4631 {
4632#ifndef IEM_VERIFICATION_MODE
4633 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4634#else
4635 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4636 pCtx->gdtr.cbGdt = cbLimit;
4637 pCtx->gdtr.pGdt = GCPtrBase;
4638#endif
4639 if (rcStrict == VINF_SUCCESS)
4640 iemRegAddToRip(pIemCpu, cbInstr);
4641 }
4642 return rcStrict;
4643}
4644
4645
4646/**
4647 * Implements lidt.
4648 *
4649 * @param iEffSeg The segment of the new ldtr contents
4650 * @param GCPtrEffSrc The address of the new ldtr contents.
4651 * @param enmEffOpSize The effective operand size.
4652 */
4653IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4654{
4655 if (pIemCpu->uCpl != 0)
4656 return iemRaiseGeneralProtectionFault0(pIemCpu);
4657 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4658
4659 /*
4660 * Fetch the limit and base address.
4661 */
4662 uint16_t cbLimit;
4663 RTGCPTR GCPtrBase;
4664 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4665 if (rcStrict == VINF_SUCCESS)
4666 {
4667#ifndef IEM_VERIFICATION_MODE
4668 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4669#else
4670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4671 pCtx->idtr.cbIdt = cbLimit;
4672 pCtx->idtr.pIdt = GCPtrBase;
4673#endif
4674 if (rcStrict == VINF_SUCCESS)
4675 iemRegAddToRip(pIemCpu, cbInstr);
4676 }
4677 return rcStrict;
4678}
4679
4680
4681/**
4682 * Implements mov GReg,CRx.
4683 *
4684 * @param iGReg The general register to store the CRx value in.
4685 * @param iCrReg The CRx register to read (valid).
4686 */
4687IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4688{
4689 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4690 if (pIemCpu->uCpl != 0)
4691 return iemRaiseGeneralProtectionFault0(pIemCpu);
4692 Assert(!pCtx->eflags.Bits.u1VM);
4693
4694 /* read it */
4695 uint64_t crX;
4696 switch (iCrReg)
4697 {
4698 case 0: crX = pCtx->cr0; break;
4699 case 2: crX = pCtx->cr2; break;
4700 case 3: crX = pCtx->cr3; break;
4701 case 4: crX = pCtx->cr4; break;
4702 case 8:
4703#ifndef IEM_VERIFICATION_MODE
4704 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
4705#else
4706 crX = 0xff;
4707#endif
4708 break;
4709 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4710 }
4711
4712 /* store it */
4713 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4714 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4715 else
4716 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4717
4718 iemRegAddToRip(pIemCpu, cbInstr);
4719 return VINF_SUCCESS;
4720}
4721
4722
4723/**
4724 * Implements mov CRx,GReg.
4725 *
4726 * @param iCrReg The CRx register to read (valid).
4727 * @param iGReg The general register to store the CRx value in.
4728 */
4729IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
4730{
4731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4732 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4733 VBOXSTRICTRC rcStrict;
4734 int rc;
4735
4736 if (pIemCpu->uCpl != 0)
4737 return iemRaiseGeneralProtectionFault0(pIemCpu);
4738 Assert(!pCtx->eflags.Bits.u1VM);
4739
4740 /*
4741 * Read the new value from the source register.
4742 */
4743 uint64_t NewCrX;
4744 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4745 NewCrX = iemGRegFetchU64(pIemCpu, iGReg);
4746 else
4747 NewCrX = iemGRegFetchU32(pIemCpu, iGReg);
4748
4749 /*
4750 * Try store it.
4751 * Unfortunately, CPUM only does a tiny bit of the work.
4752 */
4753 switch (iCrReg)
4754 {
4755 case 0:
4756 {
4757 /*
4758 * Perform checks.
4759 */
4760 uint64_t const OldCrX = pCtx->cr0;
4761 NewCrX |= X86_CR0_ET; /* hardcoded */
4762
4763 /* Check for reserved bits. */
4764 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4765 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4766 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4767 if (NewCrX & ~(uint64_t)fValid)
4768 {
4769 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", NewCrX, NewCrX & ~(uint64_t)fValid));
4770 return iemRaiseGeneralProtectionFault0(pIemCpu);
4771 }
4772
4773 /* Check for invalid combinations. */
4774 if ( (NewCrX & X86_CR0_PG)
4775 && !(NewCrX & X86_CR0_PE) )
4776 {
4777 Log(("Trying to set CR0.PG without CR0.PE\n"));
4778 return iemRaiseGeneralProtectionFault0(pIemCpu);
4779 }
4780
4781 if ( !(NewCrX & X86_CR0_CD)
4782 && (NewCrX & X86_CR0_NW) )
4783 {
4784 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4785 return iemRaiseGeneralProtectionFault0(pIemCpu);
4786 }
4787
4788 /* Long mode consistency checks. */
4789 if ( (NewCrX & X86_CR0_PG)
4790 && !(OldCrX & X86_CR0_PG)
4791 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4792 {
4793 if (!(pCtx->cr4 & X86_CR4_PAE))
4794 {
4795 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
4796 return iemRaiseGeneralProtectionFault0(pIemCpu);
4797 }
4798 if (pCtx->csHid.Attr.n.u1Long)
4799 {
4800 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
4801 return iemRaiseGeneralProtectionFault0(pIemCpu);
4802 }
4803 }
4804
4805 /** @todo check reserved PDPTR bits as AMD states. */
4806
4807 /*
4808 * Change CR0.
4809 */
4810#ifndef IEM_VERIFICATION_MODE
4811 rc = CPUMSetGuestCR0(pVCpu, NewCrX);
4812 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
4813#else
4814 pCtx->cr0 = NewCrX;
4815#endif
4816 Assert(pCtx->cr0 == NewCrX);
4817
4818 /*
4819 * Change EFER.LMA if entering or leaving long mode.
4820 */
4821 if ( (NewCrX & X86_CR0_PG) != (OldCrX & X86_CR0_PG)
4822 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4823 {
4824 uint64_t NewEFER = pCtx->msrEFER;
4825 if (NewCrX & X86_CR0_PG)
4826 NewEFER |= MSR_K6_EFER_LME;
4827 else
4828 NewEFER &= ~MSR_K6_EFER_LME;
4829
4830#ifndef IEM_VERIFICATION_MODE
4831 CPUMSetGuestEFER(pVCpu, NewEFER);
4832#else
4833 pCtx->msrEFER = NewEFER;
4834#endif
4835 Assert(pCtx->msrEFER == NewEFER);
4836 }
4837
4838#ifndef IEM_VERIFICATION_MODE
4839 /*
4840 * Inform PGM.
4841 */
4842 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4843 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4844 {
4845 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4846 AssertRCReturn(rc, rc);
4847 /* ignore informational status codes */
4848 }
4849 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4850 /** @todo Status code management. */
4851#else
4852 rcStrict = VINF_SUCCESS;
4853#endif
4854 break;
4855 }
4856
4857 /*
4858 * CR2 can be changed without any restrictions.
4859 */
4860 case 2:
4861 pCtx->cr2 = NewCrX;
4862 break;
4863
4864 /*
4865 * CR3 is relatively simple, although AMD and Intel have different
4866 * accounts of how setting reserved bits are handled. We take intel's
4867 * word for the lower bits and AMD's for the high bits (63:52).
4868 */
4869 /** @todo Testcase: Setting reserved bits in CR3, especially before
4870 * enabling paging. */
4871 case 3:
4872 {
4873 /* check / mask the value. */
4874 if (NewCrX & UINT64_C(0xfff0000000000000))
4875 {
4876 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", NewCrX));
4877 return iemRaiseGeneralProtectionFault0(pIemCpu);
4878 }
4879
4880 uint64_t fValid;
4881 if ( (pCtx->cr4 & X86_CR4_PAE)
4882 && (pCtx->msrEFER & MSR_K6_EFER_LME))
4883 fValid = UINT64_C(0x000ffffffffff014);
4884 else if (pCtx->cr4 & X86_CR4_PAE)
4885 fValid = UINT64_C(0xfffffff4);
4886 else
4887 fValid = UINT64_C(0xfffff014);
4888 if (NewCrX & ~fValid)
4889 {
4890 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
4891 NewCrX, NewCrX & ~fValid));
4892 NewCrX &= fValid;
4893 }
4894
4895 /** @todo If we're in PAE mode we should check the PDPTRs for
4896 * invalid bits. */
4897
4898 /* Make the change. */
4899#ifndef IEM_VERIFICATION_MODE
4900 rc = CPUMSetGuestCR3(pVCpu, NewCrX);
4901 AssertRCSuccessReturn(rc, rc);
4902#else
4903 pCtx->cr3 = NewCrX;
4904#endif
4905
4906#ifndef IEM_VERIFICATION_MODE
4907 /* Inform PGM. */
4908 if (pCtx->cr0 & X86_CR0_PG)
4909 {
4910 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
4911 AssertRCReturn(rc, rc);
4912 /* ignore informational status codes */
4913 /** @todo status code management */
4914 }
4915#endif
4916 rcStrict = VINF_SUCCESS;
4917 break;
4918 }
4919
4920 /*
4921 * CR4 is a bit more tedious as there are bits which cannot be cleared
4922 * under some circumstances and such.
4923 */
4924 case 4:
4925 {
4926 uint64_t const OldCrX = pCtx->cr0;
4927
4928 /* reserved bits */
4929 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
4930 | X86_CR4_TSD | X86_CR4_DE
4931 | X86_CR4_PSE | X86_CR4_PAE
4932 | X86_CR4_MCE | X86_CR4_PGE
4933 | X86_CR4_PCE | X86_CR4_OSFSXR
4934 | X86_CR4_OSXMMEEXCPT;
4935 //if (xxx)
4936 // fValid |= X86_CR4_VMXE;
4937 //if (xxx)
4938 // fValid |= X86_CR4_OSXSAVE;
4939 if (NewCrX & ~(uint64_t)fValid)
4940 {
4941 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", NewCrX, NewCrX & ~(uint64_t)fValid));
4942 return iemRaiseGeneralProtectionFault0(pIemCpu);
4943 }
4944
4945 /* long mode checks. */
4946 if ( (OldCrX & X86_CR4_PAE)
4947 && !(NewCrX & X86_CR4_PAE)
4948 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
4949 {
4950 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
4951 return iemRaiseGeneralProtectionFault0(pIemCpu);
4952 }
4953
4954
4955 /*
4956 * Change it.
4957 */
4958#ifndef IEM_VERIFICATION_MODE
4959 rc = CPUMSetGuestCR4(pVCpu, NewCrX);
4960 AssertRCSuccessReturn(rc, rc);
4961#else
4962 pCtx->cr4 = NewCrX;
4963#endif
4964 Assert(pCtx->cr4 == NewCrX);
4965
4966 /*
4967 * Notify SELM and PGM.
4968 */
4969#ifndef IEM_VERIFICATION_MODE
4970 /* SELM - VME may change things wrt to the TSS shadowing. */
4971 if ((NewCrX ^ OldCrX) & X86_CR4_VME)
4972 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
4973
4974 /* PGM - flushing and mode. */
4975 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4976 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4977 {
4978 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4979 AssertRCReturn(rc, rc);
4980 /* ignore informational status codes */
4981 }
4982 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4983 /** @todo Status code management. */
4984#else
4985 rcStrict = VINF_SUCCESS;
4986#endif
4987 break;
4988 }
4989
4990 /*
4991 * CR8 maps to the APIC TPR.
4992 */
4993 case 8:
4994#ifndef IEM_VERIFICATION_MODE
4995 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
4996#else
4997 rcStrict = VINF_SUCCESS;
4998#endif
4999 break;
5000
5001 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5002 }
5003
5004 /*
5005 * Advance the RIP on success.
5006 */
5007 /** @todo Status code management. */
5008 if (rcStrict == VINF_SUCCESS)
5009 iemRegAddToRip(pIemCpu, cbInstr);
5010 return rcStrict;
5011}
5012
5013
5014/**
5015 * Implements 'IN eAX, port'.
5016 *
5017 * @param u16Port The source port.
5018 * @param cbReg The register size.
5019 */
5020IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5021{
5022 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5023
5024 /*
5025 * CPL check
5026 */
5027 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5028 if (rcStrict != VINF_SUCCESS)
5029 return rcStrict;
5030
5031 /*
5032 * Perform the I/O.
5033 */
5034 uint32_t u32Value;
5035#ifndef IEM_VERIFICATION_MODE
5036 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
5037#else
5038 u32Value = 0xffffffff;
5039 rcStrict = VINF_SUCCESS;
5040 pIemCpu->cIOReads++;
5041#endif
5042 if (IOM_SUCCESS(rcStrict))
5043 {
5044 switch (cbReg)
5045 {
5046 case 1: pCtx->al = (uint8_t)u32Value; break;
5047 case 2: pCtx->ax = (uint16_t)u32Value; break;
5048 case 4: pCtx->rax = u32Value; break;
5049 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5050 }
5051 iemRegAddToRip(pIemCpu, cbInstr);
5052 pIemCpu->cPotentialExits++;
5053 }
5054 /** @todo massage rcStrict. */
5055 return rcStrict;
5056}
5057
5058
5059/**
5060 * Implements 'IN eAX, DX'.
5061 *
5062 * @param cbReg The register size.
5063 */
5064IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5065{
5066 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5067}
5068
5069
5070/**
5071 * Implements 'OUT port, eAX'.
5072 *
5073 * @param u16Port The destination port.
5074 * @param cbReg The register size.
5075 */
5076IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5077{
5078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5079
5080 /*
5081 * CPL check
5082 */
5083 if ( (pCtx->cr0 & X86_CR0_PE)
5084 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
5085 || pCtx->eflags.Bits.u1VM) )
5086 {
5087 /** @todo I/O port permission bitmap check */
5088 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
5089 }
5090
5091 /*
5092 * Perform the I/O.
5093 */
5094 uint32_t u32Value;
5095 switch (cbReg)
5096 {
5097 case 1: u32Value = pCtx->al; break;
5098 case 2: u32Value = pCtx->ax; break;
5099 case 4: u32Value = pCtx->eax; break;
5100 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5101 }
5102#ifndef IEM_VERIFICATION_MODE
5103 VBOXSTRICTRC rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
5104#else
5105 VBOXSTRICTRC rc = VINF_SUCCESS;
5106 pIemCpu->cIOWrites++;
5107#endif
5108 if (IOM_SUCCESS(rc))
5109 {
5110 iemRegAddToRip(pIemCpu, cbInstr);
5111 pIemCpu->cPotentialExits++;
5112 /** @todo massage rc. */
5113 }
5114 return rc;
5115}
5116
5117
5118/**
5119 * Implements 'OUT DX, eAX'.
5120 *
5121 * @param cbReg The register size.
5122 */
5123IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5124{
5125 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5126}
5127
5128
5129/**
5130 * Implements 'CLI'.
5131 */
5132IEM_CIMPL_DEF_0(iemCImpl_cli)
5133{
5134 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5135
5136 if (pCtx->cr0 & X86_CR0_PE)
5137 {
5138 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
5139 if (!pCtx->eflags.Bits.u1VM)
5140 {
5141 if (pIemCpu->uCpl <= uIopl)
5142 pCtx->eflags.Bits.u1IF = 0;
5143 else if ( pIemCpu->uCpl == 3
5144 && (pCtx->cr4 & X86_CR4_PVI) )
5145 pCtx->eflags.Bits.u1VIF = 0;
5146 else
5147 return iemRaiseGeneralProtectionFault0(pIemCpu);
5148 }
5149 /* V8086 */
5150 else if (uIopl == 3)
5151 pCtx->eflags.Bits.u1IF = 0;
5152 else if ( uIopl < 3
5153 && (pCtx->cr4 & X86_CR4_VME) )
5154 pCtx->eflags.Bits.u1VIF = 0;
5155 else
5156 return iemRaiseGeneralProtectionFault0(pIemCpu);
5157 }
5158 /* real mode */
5159 else
5160 pCtx->eflags.Bits.u1IF = 0;
5161 iemRegAddToRip(pIemCpu, cbInstr);
5162 return VINF_SUCCESS;
5163}
5164
5165
5166/**
5167 * Implements 'STI'.
5168 */
5169IEM_CIMPL_DEF_0(iemCImpl_sti)
5170{
5171 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5172
5173 if (pCtx->cr0 & X86_CR0_PE)
5174 {
5175 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
5176 if (!pCtx->eflags.Bits.u1VM)
5177 {
5178 if (pIemCpu->uCpl <= uIopl)
5179 pCtx->eflags.Bits.u1IF = 1;
5180 else if ( pIemCpu->uCpl == 3
5181 && (pCtx->cr4 & X86_CR4_PVI)
5182 && !pCtx->eflags.Bits.u1VIP )
5183 pCtx->eflags.Bits.u1VIF = 1;
5184 else
5185 return iemRaiseGeneralProtectionFault0(pIemCpu);
5186 }
5187 /* V8086 */
5188 else if (uIopl == 3)
5189 pCtx->eflags.Bits.u1IF = 1;
5190 else if ( uIopl < 3
5191 && (pCtx->cr4 & X86_CR4_VME)
5192 && !pCtx->eflags.Bits.u1VIP )
5193 pCtx->eflags.Bits.u1VIF = 1;
5194 else
5195 return iemRaiseGeneralProtectionFault0(pIemCpu);
5196 }
5197 /* real mode */
5198 else
5199 pCtx->eflags.Bits.u1IF = 1;
5200
5201 iemRegAddToRip(pIemCpu, cbInstr);
5202 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5203 return VINF_SUCCESS;
5204}
5205
5206
5207/*
5208 * Instantiate the various string operation combinations.
5209 */
5210#define OP_SIZE 8
5211#define ADDR_SIZE 16
5212#include "IEMAllCImplStrInstr.cpp.h"
5213#define OP_SIZE 8
5214#define ADDR_SIZE 32
5215#include "IEMAllCImplStrInstr.cpp.h"
5216#define OP_SIZE 8
5217#define ADDR_SIZE 64
5218#include "IEMAllCImplStrInstr.cpp.h"
5219
5220#define OP_SIZE 16
5221#define ADDR_SIZE 16
5222#include "IEMAllCImplStrInstr.cpp.h"
5223#define OP_SIZE 16
5224#define ADDR_SIZE 32
5225#include "IEMAllCImplStrInstr.cpp.h"
5226#define OP_SIZE 16
5227#define ADDR_SIZE 64
5228#include "IEMAllCImplStrInstr.cpp.h"
5229
5230#define OP_SIZE 32
5231#define ADDR_SIZE 16
5232#include "IEMAllCImplStrInstr.cpp.h"
5233#define OP_SIZE 32
5234#define ADDR_SIZE 32
5235#include "IEMAllCImplStrInstr.cpp.h"
5236#define OP_SIZE 32
5237#define ADDR_SIZE 64
5238#include "IEMAllCImplStrInstr.cpp.h"
5239
5240#define OP_SIZE 64
5241#define ADDR_SIZE 32
5242#include "IEMAllCImplStrInstr.cpp.h"
5243#define OP_SIZE 64
5244#define ADDR_SIZE 64
5245#include "IEMAllCImplStrInstr.cpp.h"
5246
5247
5248/** @} */
5249
5250
5251/** @name "Microcode" macros.
5252 *
5253 * The idea is that we should be able to use the same code to interpret
5254 * instructions as well as recompiler instructions. Thus this obfuscation.
5255 *
5256 * @{
5257 */
5258#define IEM_MC_BEGIN(cArgs, cLocals) {
5259#define IEM_MC_END() }
5260#define IEM_MC_PAUSE() do {} while (0)
5261#define IEM_MC_CONTINUE() do {} while (0)
5262
5263/** Internal macro. */
5264#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5265 do \
5266 { \
5267 VBOXSTRICTRC rcStrict2 = a_Expr; \
5268 if (rcStrict2 != VINF_SUCCESS) \
5269 return rcStrict2; \
5270 } while (0)
5271
5272#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5273#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5274#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5275#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5276#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5277#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5278#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5279
5280#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5281
5282#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5283#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5284#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5285#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5286#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5287#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5288 uint32_t a_Name; \
5289 uint32_t *a_pName = &a_Name
5290#define IEM_MC_COMMIT_EFLAGS(a_EFlags) (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags)
5291
5292#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5293
5294#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5295#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5296#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5297#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5298#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5299#define IEM_MC_FETCH_SREG_U32_ZX(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5300#define IEM_MC_FETCH_SREG_U64_ZX(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5301#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5302
5303#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
5304#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
5305#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
5306#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
5307
5308#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
5309#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
5310/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
5311 * commit. */
5312#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
5313#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
5314#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5315
5316#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u16Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
5317#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
5318#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
5319 do { \
5320 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5321 *pu32Reg += (a_u32Value); \
5322 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5323 } while (0)
5324#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
5325
5326#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
5327#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
5328#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
5329 do { \
5330 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5331 *pu32Reg -= (a_u32Value); \
5332 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5333 } while (0)
5334#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
5335
5336#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
5337#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
5338
5339
5340
5341#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
5342 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
5343#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5344 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
5345#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5346 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
5347#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5348 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5349#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5350 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5351
5352#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
5353 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
5354#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
5355 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
5356#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
5357 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
5358#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
5359 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
5360
5361#define IEM_MC_PUSH_U16(a_u16Value) \
5362 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
5363#define IEM_MC_PUSH_U32(a_u32Value) \
5364 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
5365#define IEM_MC_PUSH_U64(a_u64Value) \
5366 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
5367
5368#define IEM_MC_POP_U16(a_pu16Value) \
5369 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
5370#define IEM_MC_POP_U32(a_pu32Value) \
5371 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
5372#define IEM_MC_POP_U64(a_pu64Value) \
5373 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
5374
5375/** Maps guest memory for direct or bounce buffered access.
5376 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5377 * @remarks May return.
5378 */
5379#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
5380 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5381
5382/** Maps guest memory for direct or bounce buffered access.
5383 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5384 * @remarks May return.
5385 */
5386#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
5387 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5388
5389/** Commits the memory and unmaps the guest memory.
5390 * @remarks May return.
5391 */
5392#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
5393 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
5394
5395/** Calculate efficient address from R/M. */
5396#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
5397 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
5398
5399#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
5400#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
5401#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
5402
5403/**
5404 * Defers the rest of the instruction emulation to a C implementation routine
5405 * and returns, only taking the standard parameters.
5406 *
5407 * @param a_pfnCImpl The pointer to the C routine.
5408 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5409 */
5410#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5411
5412/**
5413 * Defers the rest of instruction emulation to a C implementation routine and
5414 * returns, taking one argument in addition to the standard ones.
5415 *
5416 * @param a_pfnCImpl The pointer to the C routine.
5417 * @param a0 The argument.
5418 */
5419#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5420
5421/**
5422 * Defers the rest of the instruction emulation to a C implementation routine
5423 * and returns, taking two arguments in addition to the standard ones.
5424 *
5425 * @param a_pfnCImpl The pointer to the C routine.
5426 * @param a0 The first extra argument.
5427 * @param a1 The second extra argument.
5428 */
5429#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5430
5431/**
5432 * Defers the rest of the instruction emulation to a C implementation routine
5433 * and returns, taking two arguments in addition to the standard ones.
5434 *
5435 * @param a_pfnCImpl The pointer to the C routine.
5436 * @param a0 The first extra argument.
5437 * @param a1 The second extra argument.
5438 * @param a2 The third extra argument.
5439 */
5440#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5441
5442/**
5443 * Defers the entire instruction emulation to a C implementation routine and
5444 * returns, only taking the standard parameters.
5445 *
5446 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5447 *
5448 * @param a_pfnCImpl The pointer to the C routine.
5449 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5450 */
5451#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5452
5453/**
5454 * Defers the entire instruction emulation to a C implementation routine and
5455 * returns, taking one argument in addition to the standard ones.
5456 *
5457 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5458 *
5459 * @param a_pfnCImpl The pointer to the C routine.
5460 * @param a0 The argument.
5461 */
5462#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5463
5464/**
5465 * Defers the entire instruction emulation to a C implementation routine and
5466 * returns, taking two arguments in addition to the standard ones.
5467 *
5468 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5469 *
5470 * @param a_pfnCImpl The pointer to the C routine.
5471 * @param a0 The first extra argument.
5472 * @param a1 The second extra argument.
5473 */
5474#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5475
5476#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
5477#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
5478#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
5479 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5480 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5481#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
5482 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5483 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5484 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5485#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
5486#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
5487#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
5488#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5489 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5490 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5491#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5492 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5493 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5494#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5495 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5496 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5497#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5498 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5499 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5500#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5501 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5502 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5503#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5504 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5505 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5506#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
5507#define IEM_MC_ELSE() } else {
5508#define IEM_MC_ENDIF() } do {} while (0)
5509
5510/** @} */
5511
5512
5513/** @name Opcode Debug Helpers.
5514 * @{
5515 */
5516#ifdef DEBUG
5517# define IEMOP_MNEMONIC(a_szMnemonic) \
5518 Log2(("decode - %04x:%08RGv %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic))
5519# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
5520 Log2(("decode - %04x:%08RGv %s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic, a_szOps))
5521#else
5522# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
5523# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
5524#endif
5525
5526/** @} */
5527
5528
5529/** @name Opcode Helpers.
5530 * @{
5531 */
5532
5533/** The instruction allows no lock prefixing (in this encoding), throw #UD if
5534 * lock prefixed. */
5535#define IEMOP_HLP_NO_LOCK_PREFIX() \
5536 do \
5537 { \
5538 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5539 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
5540 } while (0)
5541
5542/** The instruction is not available in 64-bit mode, throw #UD if we're in
5543 * 64-bit mode. */
5544#define IEMOP_HLP_NO_64BIT() \
5545 do \
5546 { \
5547 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5548 return IEMOP_RAISE_INVALID_OPCODE(); \
5549 } while (0)
5550
5551/** The instruction defaults to 64-bit operand size if 64-bit mode. */
5552#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
5553 do \
5554 { \
5555 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5556 iemRecalEffOpSize64Default(pIemCpu); \
5557 } while (0)
5558
5559
5560
5561/**
5562 * Calculates the effective address of a ModR/M memory operand.
5563 *
5564 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
5565 *
5566 * @return Strict VBox status code.
5567 * @param pIemCpu The IEM per CPU data.
5568 * @param bRm The ModRM byte.
5569 * @param pGCPtrEff Where to return the effective address.
5570 */
5571static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
5572{
5573 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
5574 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5575#define SET_SS_DEF() \
5576 do \
5577 { \
5578 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
5579 pIemCpu->iEffSeg = X86_SREG_SS; \
5580 } while (0)
5581
5582/** @todo Check the effective address size crap! */
5583 switch (pIemCpu->enmEffAddrMode)
5584 {
5585 case IEMMODE_16BIT:
5586 {
5587 uint16_t u16EffAddr;
5588
5589 /* Handle the disp16 form with no registers first. */
5590 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5591 IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr);
5592 else
5593 {
5594 /* Get the displacment. */
5595 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5596 {
5597 case 0: u16EffAddr = 0; break;
5598 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(pIemCpu, &u16EffAddr); break;
5599 case 2: IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr); break;
5600 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5601 }
5602
5603 /* Add the base and index registers to the disp. */
5604 switch (bRm & X86_MODRM_RM_MASK)
5605 {
5606 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5607 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5608 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5609 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5610 case 4: u16EffAddr += pCtx->si; break;
5611 case 5: u16EffAddr += pCtx->di; break;
5612 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5613 case 7: u16EffAddr += pCtx->bx; break;
5614 }
5615 }
5616
5617 *pGCPtrEff = u16EffAddr;
5618 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5619 return VINF_SUCCESS;
5620 }
5621
5622 case IEMMODE_32BIT:
5623 {
5624 uint32_t u32EffAddr;
5625
5626 /* Handle the disp32 form with no registers first. */
5627 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5628 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32EffAddr);
5629 else
5630 {
5631 /* Get the register (or SIB) value. */
5632 switch ((bRm & X86_MODRM_RM_MASK))
5633 {
5634 case 0: u32EffAddr = pCtx->eax; break;
5635 case 1: u32EffAddr = pCtx->ecx; break;
5636 case 2: u32EffAddr = pCtx->edx; break;
5637 case 3: u32EffAddr = pCtx->ebx; break;
5638 case 4: /* SIB */
5639 {
5640 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
5641
5642 /* Get the index and scale it. */
5643 switch ((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK)
5644 {
5645 case 0: u32EffAddr = pCtx->eax; break;
5646 case 1: u32EffAddr = pCtx->ecx; break;
5647 case 2: u32EffAddr = pCtx->edx; break;
5648 case 3: u32EffAddr = pCtx->ebx; break;
5649 case 4: u32EffAddr = 0; /*none */ break;
5650 case 5: u32EffAddr = pCtx->ebp; break;
5651 case 6: u32EffAddr = pCtx->esi; break;
5652 case 7: u32EffAddr = pCtx->edi; break;
5653 }
5654 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5655
5656 /* add base */
5657 switch (bSib & X86_SIB_BASE_MASK)
5658 {
5659 case 0: u32EffAddr += pCtx->eax; break;
5660 case 1: u32EffAddr += pCtx->ecx; break;
5661 case 2: u32EffAddr += pCtx->edx; break;
5662 case 3: u32EffAddr += pCtx->ebx; break;
5663 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5664 case 5:
5665 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5666 {
5667 u32EffAddr += pCtx->ebp;
5668 SET_SS_DEF();
5669 }
5670 else
5671 {
5672 uint32_t u32Disp;
5673 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
5674 u32EffAddr += u32Disp;
5675 }
5676 break;
5677 case 6: u32EffAddr += pCtx->esi; break;
5678 case 7: u32EffAddr += pCtx->edi; break;
5679 }
5680 break;
5681 }
5682 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5683 case 6: u32EffAddr = pCtx->esi; break;
5684 case 7: u32EffAddr = pCtx->edi; break;
5685 }
5686
5687 /* Get and add the displacement. */
5688 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5689 {
5690 case 0:
5691 break;
5692 case 1:
5693 {
5694 int8_t i8Disp;
5695 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
5696 u32EffAddr += i8Disp;
5697 break;
5698 }
5699 case 2:
5700 {
5701 uint32_t u32Disp;
5702 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
5703 u32EffAddr += u32Disp;
5704 break;
5705 }
5706 default:
5707 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5708 }
5709
5710 }
5711 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
5712 *pGCPtrEff = u32EffAddr;
5713 else
5714 {
5715 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
5716 *pGCPtrEff = u32EffAddr & UINT16_MAX;
5717 }
5718 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5719 return VINF_SUCCESS;
5720 }
5721
5722 case IEMMODE_64BIT:
5723 {
5724 uint64_t u64EffAddr;
5725
5726 /* Handle the rip+disp32 form with no registers first. */
5727 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5728 {
5729 IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64EffAddr);
5730 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
5731 }
5732 else
5733 {
5734 /* Get the register (or SIB) value. */
5735 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
5736 {
5737 case 0: u64EffAddr = pCtx->rax; break;
5738 case 1: u64EffAddr = pCtx->rcx; break;
5739 case 2: u64EffAddr = pCtx->rdx; break;
5740 case 3: u64EffAddr = pCtx->rbx; break;
5741 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
5742 case 6: u64EffAddr = pCtx->rsi; break;
5743 case 7: u64EffAddr = pCtx->rdi; break;
5744 case 8: u64EffAddr = pCtx->r8; break;
5745 case 9: u64EffAddr = pCtx->r9; break;
5746 case 10: u64EffAddr = pCtx->r10; break;
5747 case 11: u64EffAddr = pCtx->r11; break;
5748 case 13: u64EffAddr = pCtx->r13; break;
5749 case 14: u64EffAddr = pCtx->r14; break;
5750 case 15: u64EffAddr = pCtx->r15; break;
5751 /* SIB */
5752 case 4:
5753 case 12:
5754 {
5755 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
5756
5757 /* Get the index and scale it. */
5758 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
5759 {
5760 case 0: u64EffAddr = pCtx->rax; break;
5761 case 1: u64EffAddr = pCtx->rcx; break;
5762 case 2: u64EffAddr = pCtx->rdx; break;
5763 case 3: u64EffAddr = pCtx->rbx; break;
5764 case 4: u64EffAddr = 0; /*none */ break;
5765 case 5: u64EffAddr = pCtx->rbp; break;
5766 case 6: u64EffAddr = pCtx->rsi; break;
5767 case 7: u64EffAddr = pCtx->rdi; break;
5768 case 8: u64EffAddr = pCtx->r8; break;
5769 case 9: u64EffAddr = pCtx->r9; break;
5770 case 10: u64EffAddr = pCtx->r10; break;
5771 case 11: u64EffAddr = pCtx->r11; break;
5772 case 12: u64EffAddr = pCtx->r12; break;
5773 case 13: u64EffAddr = pCtx->r13; break;
5774 case 14: u64EffAddr = pCtx->r14; break;
5775 case 15: u64EffAddr = pCtx->r15; break;
5776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5777 }
5778 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5779
5780 /* add base */
5781 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
5782 {
5783 case 0: u64EffAddr += pCtx->rax; break;
5784 case 1: u64EffAddr += pCtx->rcx; break;
5785 case 2: u64EffAddr += pCtx->rdx; break;
5786 case 3: u64EffAddr += pCtx->rbx; break;
5787 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
5788 case 6: u64EffAddr += pCtx->rsi; break;
5789 case 7: u64EffAddr += pCtx->rdi; break;
5790 case 8: u64EffAddr += pCtx->r8; break;
5791 case 9: u64EffAddr += pCtx->r9; break;
5792 case 10: u64EffAddr += pCtx->r10; break;
5793 case 11: u64EffAddr += pCtx->r11; break;
5794 case 14: u64EffAddr += pCtx->r14; break;
5795 case 15: u64EffAddr += pCtx->r15; break;
5796 /* complicated encodings */
5797 case 5:
5798 case 13:
5799 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5800 {
5801 if (!pIemCpu->uRexB)
5802 {
5803 u64EffAddr += pCtx->rbp;
5804 SET_SS_DEF();
5805 }
5806 else
5807 u64EffAddr += pCtx->r13;
5808 }
5809 else
5810 {
5811 uint32_t u32Disp;
5812 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
5813 u64EffAddr += (int32_t)u32Disp;
5814 }
5815 break;
5816 }
5817 break;
5818 }
5819 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5820 }
5821
5822 /* Get and add the displacement. */
5823 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5824 {
5825 case 0:
5826 break;
5827 case 1:
5828 {
5829 int8_t i8Disp;
5830 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
5831 u64EffAddr += i8Disp;
5832 break;
5833 }
5834 case 2:
5835 {
5836 uint32_t u32Disp;
5837 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
5838 u64EffAddr += (int32_t)u32Disp;
5839 break;
5840 }
5841 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
5842 }
5843
5844 }
5845 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
5846 *pGCPtrEff = u64EffAddr;
5847 else
5848 *pGCPtrEff = u64EffAddr & UINT16_MAX;
5849 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5850 return VINF_SUCCESS;
5851 }
5852 }
5853
5854 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5855}
5856
5857/** @} */
5858
5859
5860
5861/*
5862 * Include the instructions
5863 */
5864#include "IEMAllInstructions.cpp.h"
5865
5866
5867
5868
5869#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5870
5871/**
5872 * Sets up execution verification mode.
5873 */
5874static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
5875{
5876 static CPUMCTX s_DebugCtx; /* Ugly! */
5877
5878 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
5879 s_DebugCtx = *pOrgCtx;
5880 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
5881 pIemCpu->cIOReads = 0;
5882 pIemCpu->cIOWrites = 0;
5883 pIemCpu->fMulDivHack = false;
5884 pIemCpu->fShlHack = false;
5885
5886}
5887
5888/**
5889 * Performs the post-execution verfication checks.
5890 */
5891static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
5892{
5893 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5894 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
5895 Assert(pOrgCtx != pDebugCtx);
5896 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
5897
5898 int rc = REMR3EmulateInstruction(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu));
5899 AssertRC(rc);
5900
5901 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
5902 {
5903 Log(("REM and IEM ends up with different registers!\n"));
5904 unsigned cDiffs = 0;
5905
5906# define CHECK_FIELD(a_Field) \
5907 do \
5908 { \
5909 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
5910 { \
5911 switch (sizeof(pOrgCtx->a_Field)) \
5912 { \
5913 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5914 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5915 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5916 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5917 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
5918 } \
5919 cDiffs++; \
5920 } \
5921 } while (0)
5922
5923# define CHECK_BIT_FIELD(a_Field) \
5924 do \
5925 { \
5926 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
5927 { \
5928 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
5929 cDiffs++; \
5930 } \
5931 } while (0)
5932
5933 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
5934 {
5935 if (pIemCpu->cInstructions != 1)
5936 {
5937 RTAssertMsg2Weak(" the FPU state differs\n");
5938 cDiffs++;
5939 }
5940 else
5941 RTAssertMsg2Weak(" the FPU state differs - happends the first time...\n");
5942 }
5943 CHECK_FIELD(rip);
5944 uint32_t fFlagsMask = UINT32_MAX;
5945 if (pIemCpu->fMulDivHack)
5946 fFlagsMask &= ~(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5947 if (pIemCpu->fShlHack)
5948 fFlagsMask &= ~(X86_EFL_OF);
5949 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
5950 {
5951 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
5952 CHECK_BIT_FIELD(rflags.Bits.u1CF);
5953 /*CHECK_BIT_FIELD(rflags.Bits.u1Reserved0); */ /** @todo why does REM set this? */
5954 CHECK_BIT_FIELD(rflags.Bits.u1PF);
5955 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
5956 CHECK_BIT_FIELD(rflags.Bits.u1AF);
5957 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
5958 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
5959 CHECK_BIT_FIELD(rflags.Bits.u1SF);
5960 CHECK_BIT_FIELD(rflags.Bits.u1TF);
5961 CHECK_BIT_FIELD(rflags.Bits.u1IF);
5962 CHECK_BIT_FIELD(rflags.Bits.u1DF);
5963 CHECK_BIT_FIELD(rflags.Bits.u1OF);
5964 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
5965 CHECK_BIT_FIELD(rflags.Bits.u1NT);
5966 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
5967 CHECK_BIT_FIELD(rflags.Bits.u1RF);
5968 CHECK_BIT_FIELD(rflags.Bits.u1VM);
5969 CHECK_BIT_FIELD(rflags.Bits.u1AC);
5970 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
5971 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
5972 CHECK_BIT_FIELD(rflags.Bits.u1ID);
5973 }
5974
5975 if (pIemCpu->cIOReads != 1)
5976 CHECK_FIELD(rax);
5977 CHECK_FIELD(rcx);
5978 CHECK_FIELD(rdx);
5979 CHECK_FIELD(rbx);
5980 CHECK_FIELD(rsp);
5981 CHECK_FIELD(rbp);
5982 CHECK_FIELD(rsi);
5983 CHECK_FIELD(rdi);
5984 CHECK_FIELD(r8);
5985 CHECK_FIELD(r9);
5986 CHECK_FIELD(r10);
5987 CHECK_FIELD(r11);
5988 CHECK_FIELD(r12);
5989 CHECK_FIELD(r13);
5990 CHECK_FIELD(cs);
5991 CHECK_FIELD(csHid.u64Base);
5992 CHECK_FIELD(csHid.u32Limit);
5993 CHECK_FIELD(csHid.Attr.u);
5994 CHECK_FIELD(ss);
5995 CHECK_FIELD(ssHid.u64Base);
5996 CHECK_FIELD(ssHid.u32Limit);
5997 CHECK_FIELD(ssHid.Attr.u);
5998 CHECK_FIELD(ds);
5999 CHECK_FIELD(dsHid.u64Base);
6000 CHECK_FIELD(dsHid.u32Limit);
6001 CHECK_FIELD(dsHid.Attr.u);
6002 CHECK_FIELD(es);
6003 CHECK_FIELD(esHid.u64Base);
6004 CHECK_FIELD(esHid.u32Limit);
6005 CHECK_FIELD(esHid.Attr.u);
6006 CHECK_FIELD(fs);
6007 CHECK_FIELD(fsHid.u64Base);
6008 CHECK_FIELD(fsHid.u32Limit);
6009 CHECK_FIELD(fsHid.Attr.u);
6010 CHECK_FIELD(gs);
6011 CHECK_FIELD(gsHid.u64Base);
6012 CHECK_FIELD(gsHid.u32Limit);
6013 CHECK_FIELD(gsHid.Attr.u);
6014 CHECK_FIELD(cr0);
6015 CHECK_FIELD(cr2);
6016 CHECK_FIELD(cr3);
6017 CHECK_FIELD(cr4);
6018 CHECK_FIELD(dr[0]);
6019 CHECK_FIELD(dr[1]);
6020 CHECK_FIELD(dr[2]);
6021 CHECK_FIELD(dr[3]);
6022 CHECK_FIELD(dr[6]);
6023 CHECK_FIELD(dr[7]);
6024 CHECK_FIELD(gdtr.cbGdt);
6025 CHECK_FIELD(gdtr.pGdt);
6026 CHECK_FIELD(idtr.cbIdt);
6027 CHECK_FIELD(idtr.pIdt);
6028 CHECK_FIELD(ldtr);
6029 CHECK_FIELD(ldtrHid.u64Base);
6030 CHECK_FIELD(ldtrHid.u32Limit);
6031 CHECK_FIELD(ldtrHid.Attr.u);
6032 CHECK_FIELD(tr);
6033 CHECK_FIELD(trHid.u64Base);
6034 CHECK_FIELD(trHid.u32Limit);
6035 CHECK_FIELD(trHid.Attr.u);
6036 CHECK_FIELD(SysEnter.cs);
6037 CHECK_FIELD(SysEnter.eip);
6038 CHECK_FIELD(SysEnter.esp);
6039 CHECK_FIELD(msrEFER);
6040 CHECK_FIELD(msrSTAR);
6041 CHECK_FIELD(msrPAT);
6042 CHECK_FIELD(msrLSTAR);
6043 CHECK_FIELD(msrCSTAR);
6044 CHECK_FIELD(msrSFMASK);
6045 CHECK_FIELD(msrKERNELGSBASE);
6046
6047 if (cDiffs != 0)
6048 AssertFailed();
6049# undef CHECK_FIELD
6050 }
6051 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6052}
6053
6054#endif /* IEM_VERIFICATION_MODE && IN_RING3 */
6055
6056
6057/**
6058 * Execute one instruction.
6059 *
6060 * @return Strict VBox status code.
6061 * @param pVCpu The current virtual CPU.
6062 */
6063VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6064{
6065 PIEMCPU pIemCpu = &pVCpu->iem.s;
6066#ifdef DEBUG
6067 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6068 char szInstr[256];
6069 uint32_t cbInstr = 0;
6070 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6071 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6072 szInstr, sizeof(szInstr), &cbInstr);
6073
6074 Log2(("**** "
6075 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6076 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6077 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6078 " %s\n"
6079 ,
6080 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6081 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6082 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6083 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6084 szInstr));
6085#endif
6086#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6087 iemExecVerificationModeSetup(pIemCpu);
6088#endif
6089
6090 /*
6091 * Do the decoding and emulation.
6092 */
6093 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6094 if (rcStrict != VINF_SUCCESS)
6095 return rcStrict;
6096
6097 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
6098 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6099 if (rcStrict == VINF_SUCCESS)
6100 pIemCpu->cInstructions++;
6101
6102 /* Execute the next instruction as well if a cli, pop ss or
6103 mov ss, Gr has just completed successfully. */
6104 if ( rcStrict == VINF_SUCCESS
6105 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6106 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
6107 {
6108 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6109 if (rcStrict == VINF_SUCCESS)
6110 {
6111 b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
6112 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6113 if (rcStrict == VINF_SUCCESS)
6114 pIemCpu->cInstructions++;
6115 }
6116 }
6117
6118 /*
6119 * Assert some sanity.
6120 */
6121#ifdef DEBUG
6122 AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
6123#endif
6124#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6125 iemExecVerificationModeCheck(pIemCpu);
6126#endif
6127 return rcStrict;
6128}
6129
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette