VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsInterpretOnly.cpp@ 95516

Last change on this file since 95516 was 95516, checked in by vboxsync, 2 years ago

VMM/IEM: Eliminated the IEMOPMEDIAF2 function tables. Not need because SSE and MMX are never implemented by the same decoder functions. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 55.8 KB
Line 
1/* $Id: IEMAllInstructionsInterpretOnly.cpp 95516 2022-07-05 14:26:12Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
23# define LOG_GROUP LOG_GROUP_IEM
24#endif
25#define VMCPU_INCL_CPUM_GST_CTX
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/apic.h>
29#include <VBox/vmm/pdm.h>
30#include <VBox/vmm/pgm.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/nem.h>
35#include <VBox/vmm/gim.h>
36#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
37# include <VBox/vmm/em.h>
38# include <VBox/vmm/hm_svm.h>
39#endif
40#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
41# include <VBox/vmm/hmvmxinline.h>
42#endif
43#include <VBox/vmm/tm.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/vmm/dbgftrace.h>
46#ifndef TST_IEM_CHECK_MC
47# include "IEMInternal.h"
48#endif
49#include <VBox/vmm/vmcc.h>
50#include <VBox/log.h>
51#include <VBox/err.h>
52#include <VBox/param.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55#include <iprt/asm-math.h>
56#include <iprt/assert.h>
57#include <iprt/string.h>
58#include <iprt/x86.h>
59
60#ifndef TST_IEM_CHECK_MC
61# include "IEMInline.h"
62# include "IEMOpHlp.h"
63# include "IEMMc.h"
64#endif
65
66
67#ifdef _MSC_VER
68# pragma warning(push)
69# pragma warning(disable: 4702) /* Unreachable code like return in iemOp_Grp6_lldt. */
70#endif
71
72
73/*********************************************************************************************************************************
74* Global Variables *
75*********************************************************************************************************************************/
76#ifndef TST_IEM_CHECK_MC
77/** Function table for the ADD instruction. */
78IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
79{
80 iemAImpl_add_u8, iemAImpl_add_u8_locked,
81 iemAImpl_add_u16, iemAImpl_add_u16_locked,
82 iemAImpl_add_u32, iemAImpl_add_u32_locked,
83 iemAImpl_add_u64, iemAImpl_add_u64_locked
84};
85
86/** Function table for the ADC instruction. */
87IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
88{
89 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
90 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
91 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
92 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
93};
94
95/** Function table for the SUB instruction. */
96IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
97{
98 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
99 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
100 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
101 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
102};
103
104/** Function table for the SBB instruction. */
105IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
106{
107 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
108 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
109 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
110 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
111};
112
113/** Function table for the OR instruction. */
114IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
115{
116 iemAImpl_or_u8, iemAImpl_or_u8_locked,
117 iemAImpl_or_u16, iemAImpl_or_u16_locked,
118 iemAImpl_or_u32, iemAImpl_or_u32_locked,
119 iemAImpl_or_u64, iemAImpl_or_u64_locked
120};
121
122/** Function table for the XOR instruction. */
123IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
124{
125 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
126 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
127 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
128 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
129};
130
131/** Function table for the AND instruction. */
132IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
133{
134 iemAImpl_and_u8, iemAImpl_and_u8_locked,
135 iemAImpl_and_u16, iemAImpl_and_u16_locked,
136 iemAImpl_and_u32, iemAImpl_and_u32_locked,
137 iemAImpl_and_u64, iemAImpl_and_u64_locked
138};
139
140/** Function table for the CMP instruction.
141 * @remarks Making operand order ASSUMPTIONS.
142 */
143IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
144{
145 iemAImpl_cmp_u8, NULL,
146 iemAImpl_cmp_u16, NULL,
147 iemAImpl_cmp_u32, NULL,
148 iemAImpl_cmp_u64, NULL
149};
150
151/** Function table for the TEST instruction.
152 * @remarks Making operand order ASSUMPTIONS.
153 */
154IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
155{
156 iemAImpl_test_u8, NULL,
157 iemAImpl_test_u16, NULL,
158 iemAImpl_test_u32, NULL,
159 iemAImpl_test_u64, NULL
160};
161
162
163/** Function table for the BT instruction. */
164IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
165{
166 NULL, NULL,
167 iemAImpl_bt_u16, NULL,
168 iemAImpl_bt_u32, NULL,
169 iemAImpl_bt_u64, NULL
170};
171
172/** Function table for the BTC instruction. */
173IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
174{
175 NULL, NULL,
176 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
177 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
178 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
179};
180
181/** Function table for the BTR instruction. */
182IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
183{
184 NULL, NULL,
185 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
186 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
187 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
188};
189
190/** Function table for the BTS instruction. */
191IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
192{
193 NULL, NULL,
194 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
195 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
196 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
197};
198
199/** Function table for the BSF instruction. */
200IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
201{
202 NULL, NULL,
203 iemAImpl_bsf_u16, NULL,
204 iemAImpl_bsf_u32, NULL,
205 iemAImpl_bsf_u64, NULL
206};
207
208/** Function table for the BSF instruction, AMD EFLAGS variant. */
209IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
210{
211 NULL, NULL,
212 iemAImpl_bsf_u16_amd, NULL,
213 iemAImpl_bsf_u32_amd, NULL,
214 iemAImpl_bsf_u64_amd, NULL
215};
216
217/** Function table for the BSF instruction, Intel EFLAGS variant. */
218IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
219{
220 NULL, NULL,
221 iemAImpl_bsf_u16_intel, NULL,
222 iemAImpl_bsf_u32_intel, NULL,
223 iemAImpl_bsf_u64_intel, NULL
224};
225
226/** EFLAGS variation selection table for the BSF instruction. */
227IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
228{
229 &g_iemAImpl_bsf,
230 &g_iemAImpl_bsf_intel,
231 &g_iemAImpl_bsf_amd,
232 &g_iemAImpl_bsf,
233};
234
235/** Function table for the BSR instruction. */
236IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
237{
238 NULL, NULL,
239 iemAImpl_bsr_u16, NULL,
240 iemAImpl_bsr_u32, NULL,
241 iemAImpl_bsr_u64, NULL
242};
243
244/** Function table for the BSR instruction, AMD EFLAGS variant. */
245IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
246{
247 NULL, NULL,
248 iemAImpl_bsr_u16_amd, NULL,
249 iemAImpl_bsr_u32_amd, NULL,
250 iemAImpl_bsr_u64_amd, NULL
251};
252
253/** Function table for the BSR instruction, Intel EFLAGS variant. */
254IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
255{
256 NULL, NULL,
257 iemAImpl_bsr_u16_intel, NULL,
258 iemAImpl_bsr_u32_intel, NULL,
259 iemAImpl_bsr_u64_intel, NULL
260};
261
262/** EFLAGS variation selection table for the BSR instruction. */
263IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
264{
265 &g_iemAImpl_bsr,
266 &g_iemAImpl_bsr_intel,
267 &g_iemAImpl_bsr_amd,
268 &g_iemAImpl_bsr,
269};
270
271/** Function table for the IMUL instruction. */
272IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
273{
274 NULL, NULL,
275 iemAImpl_imul_two_u16, NULL,
276 iemAImpl_imul_two_u32, NULL,
277 iemAImpl_imul_two_u64, NULL
278};
279
280/** Function table for the IMUL instruction, AMD EFLAGS variant. */
281IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
282{
283 NULL, NULL,
284 iemAImpl_imul_two_u16_amd, NULL,
285 iemAImpl_imul_two_u32_amd, NULL,
286 iemAImpl_imul_two_u64_amd, NULL
287};
288
289/** Function table for the IMUL instruction, Intel EFLAGS variant. */
290IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
291{
292 NULL, NULL,
293 iemAImpl_imul_two_u16_intel, NULL,
294 iemAImpl_imul_two_u32_intel, NULL,
295 iemAImpl_imul_two_u64_intel, NULL
296};
297
298/** EFLAGS variation selection table for the IMUL instruction. */
299IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
300{
301 &g_iemAImpl_imul_two,
302 &g_iemAImpl_imul_two_intel,
303 &g_iemAImpl_imul_two_amd,
304 &g_iemAImpl_imul_two,
305};
306
307/** EFLAGS variation selection table for the 16-bit IMUL instruction. */
308IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
309{
310 iemAImpl_imul_two_u16,
311 iemAImpl_imul_two_u16_intel,
312 iemAImpl_imul_two_u16_amd,
313 iemAImpl_imul_two_u16,
314};
315
316/** EFLAGS variation selection table for the 32-bit IMUL instruction. */
317IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
318{
319 iemAImpl_imul_two_u32,
320 iemAImpl_imul_two_u32_intel,
321 iemAImpl_imul_two_u32_amd,
322 iemAImpl_imul_two_u32,
323};
324
325/** EFLAGS variation selection table for the 64-bit IMUL instruction. */
326IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
327{
328 iemAImpl_imul_two_u64,
329 iemAImpl_imul_two_u64_intel,
330 iemAImpl_imul_two_u64_amd,
331 iemAImpl_imul_two_u64,
332};
333
334/** Group 1 /r lookup table. */
335IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
336{
337 &g_iemAImpl_add,
338 &g_iemAImpl_or,
339 &g_iemAImpl_adc,
340 &g_iemAImpl_sbb,
341 &g_iemAImpl_and,
342 &g_iemAImpl_sub,
343 &g_iemAImpl_xor,
344 &g_iemAImpl_cmp
345};
346
347/** Function table for the INC instruction. */
348IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
349{
350 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
351 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
352 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
353 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
354};
355
356/** Function table for the DEC instruction. */
357IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
358{
359 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
360 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
361 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
362 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
363};
364
365/** Function table for the NEG instruction. */
366IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
367{
368 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
369 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
370 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
371 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
372};
373
374/** Function table for the NOT instruction. */
375IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
376{
377 iemAImpl_not_u8, iemAImpl_not_u8_locked,
378 iemAImpl_not_u16, iemAImpl_not_u16_locked,
379 iemAImpl_not_u32, iemAImpl_not_u32_locked,
380 iemAImpl_not_u64, iemAImpl_not_u64_locked
381};
382
383
384/** Function table for the ROL instruction. */
385IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
386{
387 iemAImpl_rol_u8,
388 iemAImpl_rol_u16,
389 iemAImpl_rol_u32,
390 iemAImpl_rol_u64
391};
392
393/** Function table for the ROL instruction, AMD EFLAGS variant. */
394IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_amd =
395{
396 iemAImpl_rol_u8_amd,
397 iemAImpl_rol_u16_amd,
398 iemAImpl_rol_u32_amd,
399 iemAImpl_rol_u64_amd
400};
401
402/** Function table for the ROL instruction, Intel EFLAGS variant. */
403IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_intel =
404{
405 iemAImpl_rol_u8_intel,
406 iemAImpl_rol_u16_intel,
407 iemAImpl_rol_u32_intel,
408 iemAImpl_rol_u64_intel
409};
410
411/** EFLAGS variation selection table for the ROL instruction. */
412IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rol_eflags[] =
413{
414 &g_iemAImpl_rol,
415 &g_iemAImpl_rol_intel,
416 &g_iemAImpl_rol_amd,
417 &g_iemAImpl_rol,
418};
419
420
421/** Function table for the ROR instruction. */
422IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
423{
424 iemAImpl_ror_u8,
425 iemAImpl_ror_u16,
426 iemAImpl_ror_u32,
427 iemAImpl_ror_u64
428};
429
430/** Function table for the ROR instruction, AMD EFLAGS variant. */
431IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_amd =
432{
433 iemAImpl_ror_u8_amd,
434 iemAImpl_ror_u16_amd,
435 iemAImpl_ror_u32_amd,
436 iemAImpl_ror_u64_amd
437};
438
439/** Function table for the ROR instruction, Intel EFLAGS variant. */
440IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_intel =
441{
442 iemAImpl_ror_u8_intel,
443 iemAImpl_ror_u16_intel,
444 iemAImpl_ror_u32_intel,
445 iemAImpl_ror_u64_intel
446};
447
448/** EFLAGS variation selection table for the ROR instruction. */
449IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_ror_eflags[] =
450{
451 &g_iemAImpl_ror,
452 &g_iemAImpl_ror_intel,
453 &g_iemAImpl_ror_amd,
454 &g_iemAImpl_ror,
455};
456
457
458/** Function table for the RCL instruction. */
459IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
460{
461 iemAImpl_rcl_u8,
462 iemAImpl_rcl_u16,
463 iemAImpl_rcl_u32,
464 iemAImpl_rcl_u64
465};
466
467/** Function table for the RCL instruction, AMD EFLAGS variant. */
468IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_amd =
469{
470 iemAImpl_rcl_u8_amd,
471 iemAImpl_rcl_u16_amd,
472 iemAImpl_rcl_u32_amd,
473 iemAImpl_rcl_u64_amd
474};
475
476/** Function table for the RCL instruction, Intel EFLAGS variant. */
477IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_intel =
478{
479 iemAImpl_rcl_u8_intel,
480 iemAImpl_rcl_u16_intel,
481 iemAImpl_rcl_u32_intel,
482 iemAImpl_rcl_u64_intel
483};
484
485/** EFLAGS variation selection table for the RCL instruction. */
486IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcl_eflags[] =
487{
488 &g_iemAImpl_rcl,
489 &g_iemAImpl_rcl_intel,
490 &g_iemAImpl_rcl_amd,
491 &g_iemAImpl_rcl,
492};
493
494
495/** Function table for the RCR instruction. */
496IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
497{
498 iemAImpl_rcr_u8,
499 iemAImpl_rcr_u16,
500 iemAImpl_rcr_u32,
501 iemAImpl_rcr_u64
502};
503
504/** Function table for the RCR instruction, AMD EFLAGS variant. */
505IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_amd =
506{
507 iemAImpl_rcr_u8_amd,
508 iemAImpl_rcr_u16_amd,
509 iemAImpl_rcr_u32_amd,
510 iemAImpl_rcr_u64_amd
511};
512
513/** Function table for the RCR instruction, Intel EFLAGS variant. */
514IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_intel =
515{
516 iemAImpl_rcr_u8_intel,
517 iemAImpl_rcr_u16_intel,
518 iemAImpl_rcr_u32_intel,
519 iemAImpl_rcr_u64_intel
520};
521
522/** EFLAGS variation selection table for the RCR instruction. */
523IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcr_eflags[] =
524{
525 &g_iemAImpl_rcr,
526 &g_iemAImpl_rcr_intel,
527 &g_iemAImpl_rcr_amd,
528 &g_iemAImpl_rcr,
529};
530
531
532/** Function table for the SHL instruction. */
533IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
534{
535 iemAImpl_shl_u8,
536 iemAImpl_shl_u16,
537 iemAImpl_shl_u32,
538 iemAImpl_shl_u64
539};
540
541/** Function table for the SHL instruction, AMD EFLAGS variant. */
542IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_amd =
543{
544 iemAImpl_shl_u8_amd,
545 iemAImpl_shl_u16_amd,
546 iemAImpl_shl_u32_amd,
547 iemAImpl_shl_u64_amd
548};
549
550/** Function table for the SHL instruction, Intel EFLAGS variant. */
551IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_intel =
552{
553 iemAImpl_shl_u8_intel,
554 iemAImpl_shl_u16_intel,
555 iemAImpl_shl_u32_intel,
556 iemAImpl_shl_u64_intel
557};
558
559/** EFLAGS variation selection table for the SHL instruction. */
560IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shl_eflags[] =
561{
562 &g_iemAImpl_shl,
563 &g_iemAImpl_shl_intel,
564 &g_iemAImpl_shl_amd,
565 &g_iemAImpl_shl,
566};
567
568
569/** Function table for the SHR instruction. */
570IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
571{
572 iemAImpl_shr_u8,
573 iemAImpl_shr_u16,
574 iemAImpl_shr_u32,
575 iemAImpl_shr_u64
576};
577
578/** Function table for the SHR instruction, AMD EFLAGS variant. */
579IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_amd =
580{
581 iemAImpl_shr_u8_amd,
582 iemAImpl_shr_u16_amd,
583 iemAImpl_shr_u32_amd,
584 iemAImpl_shr_u64_amd
585};
586
587/** Function table for the SHR instruction, Intel EFLAGS variant. */
588IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_intel =
589{
590 iemAImpl_shr_u8_intel,
591 iemAImpl_shr_u16_intel,
592 iemAImpl_shr_u32_intel,
593 iemAImpl_shr_u64_intel
594};
595
596/** EFLAGS variation selection table for the SHR instruction. */
597IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shr_eflags[] =
598{
599 &g_iemAImpl_shr,
600 &g_iemAImpl_shr_intel,
601 &g_iemAImpl_shr_amd,
602 &g_iemAImpl_shr,
603};
604
605
606/** Function table for the SAR instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
608{
609 iemAImpl_sar_u8,
610 iemAImpl_sar_u16,
611 iemAImpl_sar_u32,
612 iemAImpl_sar_u64
613};
614
615/** Function table for the SAR instruction, AMD EFLAGS variant. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_amd =
617{
618 iemAImpl_sar_u8_amd,
619 iemAImpl_sar_u16_amd,
620 iemAImpl_sar_u32_amd,
621 iemAImpl_sar_u64_amd
622};
623
624/** Function table for the SAR instruction, Intel EFLAGS variant. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_intel =
626{
627 iemAImpl_sar_u8_intel,
628 iemAImpl_sar_u16_intel,
629 iemAImpl_sar_u32_intel,
630 iemAImpl_sar_u64_intel
631};
632
633/** EFLAGS variation selection table for the SAR instruction. */
634IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_sar_eflags[] =
635{
636 &g_iemAImpl_sar,
637 &g_iemAImpl_sar_intel,
638 &g_iemAImpl_sar_amd,
639 &g_iemAImpl_sar,
640};
641
642
643/** Function table for the MUL instruction. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
645{
646 iemAImpl_mul_u8,
647 iemAImpl_mul_u16,
648 iemAImpl_mul_u32,
649 iemAImpl_mul_u64
650};
651
652/** Function table for the MUL instruction, AMD EFLAGS variation. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
654{
655 iemAImpl_mul_u8_amd,
656 iemAImpl_mul_u16_amd,
657 iemAImpl_mul_u32_amd,
658 iemAImpl_mul_u64_amd
659};
660
661/** Function table for the MUL instruction, Intel EFLAGS variation. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
663{
664 iemAImpl_mul_u8_intel,
665 iemAImpl_mul_u16_intel,
666 iemAImpl_mul_u32_intel,
667 iemAImpl_mul_u64_intel
668};
669
670/** EFLAGS variation selection table for the MUL instruction. */
671IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
672{
673 &g_iemAImpl_mul,
674 &g_iemAImpl_mul_intel,
675 &g_iemAImpl_mul_amd,
676 &g_iemAImpl_mul,
677};
678
679/** EFLAGS variation selection table for the 8-bit MUL instruction. */
680IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
681{
682 iemAImpl_mul_u8,
683 iemAImpl_mul_u8_intel,
684 iemAImpl_mul_u8_amd,
685 iemAImpl_mul_u8
686};
687
688
689/** Function table for the IMUL instruction working implicitly on rAX. */
690IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
691{
692 iemAImpl_imul_u8,
693 iemAImpl_imul_u16,
694 iemAImpl_imul_u32,
695 iemAImpl_imul_u64
696};
697
698/** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
699IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
700{
701 iemAImpl_imul_u8_amd,
702 iemAImpl_imul_u16_amd,
703 iemAImpl_imul_u32_amd,
704 iemAImpl_imul_u64_amd
705};
706
707/** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
708IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
709{
710 iemAImpl_imul_u8_intel,
711 iemAImpl_imul_u16_intel,
712 iemAImpl_imul_u32_intel,
713 iemAImpl_imul_u64_intel
714};
715
716/** EFLAGS variation selection table for the IMUL instruction. */
717IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
718{
719 &g_iemAImpl_imul,
720 &g_iemAImpl_imul_intel,
721 &g_iemAImpl_imul_amd,
722 &g_iemAImpl_imul,
723};
724
725/** EFLAGS variation selection table for the 8-bit IMUL instruction. */
726IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
727{
728 iemAImpl_imul_u8,
729 iemAImpl_imul_u8_intel,
730 iemAImpl_imul_u8_amd,
731 iemAImpl_imul_u8
732};
733
734
735/** Function table for the DIV instruction. */
736IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
737{
738 iemAImpl_div_u8,
739 iemAImpl_div_u16,
740 iemAImpl_div_u32,
741 iemAImpl_div_u64
742};
743
744/** Function table for the DIV instruction, AMD EFLAGS variation. */
745IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
746{
747 iemAImpl_div_u8_amd,
748 iemAImpl_div_u16_amd,
749 iemAImpl_div_u32_amd,
750 iemAImpl_div_u64_amd
751};
752
753/** Function table for the DIV instruction, Intel EFLAGS variation. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
755{
756 iemAImpl_div_u8_intel,
757 iemAImpl_div_u16_intel,
758 iemAImpl_div_u32_intel,
759 iemAImpl_div_u64_intel
760};
761
762/** EFLAGS variation selection table for the DIV instruction. */
763IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
764{
765 &g_iemAImpl_div,
766 &g_iemAImpl_div_intel,
767 &g_iemAImpl_div_amd,
768 &g_iemAImpl_div,
769};
770
771/** EFLAGS variation selection table for the 8-bit DIV instruction. */
772IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u8_intel,
776 iemAImpl_div_u8_amd,
777 iemAImpl_div_u8
778};
779
780
781/** Function table for the IDIV instruction. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
783{
784 iemAImpl_idiv_u8,
785 iemAImpl_idiv_u16,
786 iemAImpl_idiv_u32,
787 iemAImpl_idiv_u64
788};
789
790/** Function table for the IDIV instruction, AMD EFLAGS variation. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
792{
793 iemAImpl_idiv_u8_amd,
794 iemAImpl_idiv_u16_amd,
795 iemAImpl_idiv_u32_amd,
796 iemAImpl_idiv_u64_amd
797};
798
799/** Function table for the IDIV instruction, Intel EFLAGS variation. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
801{
802 iemAImpl_idiv_u8_intel,
803 iemAImpl_idiv_u16_intel,
804 iemAImpl_idiv_u32_intel,
805 iemAImpl_idiv_u64_intel
806};
807
808/** EFLAGS variation selection table for the IDIV instruction. */
809IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
810{
811 &g_iemAImpl_idiv,
812 &g_iemAImpl_idiv_intel,
813 &g_iemAImpl_idiv_amd,
814 &g_iemAImpl_idiv,
815};
816
817/** EFLAGS variation selection table for the 8-bit IDIV instruction. */
818IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
819{
820 iemAImpl_idiv_u8,
821 iemAImpl_idiv_u8_intel,
822 iemAImpl_idiv_u8_amd,
823 iemAImpl_idiv_u8
824};
825
826
827/** Function table for the SHLD instruction. */
828IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
829{
830 iemAImpl_shld_u16,
831 iemAImpl_shld_u32,
832 iemAImpl_shld_u64,
833};
834
835/** Function table for the SHLD instruction, AMD EFLAGS variation. */
836IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_amd =
837{
838 iemAImpl_shld_u16_amd,
839 iemAImpl_shld_u32_amd,
840 iemAImpl_shld_u64_amd
841};
842
843/** Function table for the SHLD instruction, Intel EFLAGS variation. */
844IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_intel =
845{
846 iemAImpl_shld_u16_intel,
847 iemAImpl_shld_u32_intel,
848 iemAImpl_shld_u64_intel
849};
850
851/** EFLAGS variation selection table for the SHLD instruction. */
852IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shld_eflags[] =
853{
854 &g_iemAImpl_shld,
855 &g_iemAImpl_shld_intel,
856 &g_iemAImpl_shld_amd,
857 &g_iemAImpl_shld
858};
859
860/** Function table for the SHRD instruction. */
861IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
862{
863 iemAImpl_shrd_u16,
864 iemAImpl_shrd_u32,
865 iemAImpl_shrd_u64
866};
867
868/** Function table for the SHRD instruction, AMD EFLAGS variation. */
869IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_amd =
870{
871 iemAImpl_shrd_u16_amd,
872 iemAImpl_shrd_u32_amd,
873 iemAImpl_shrd_u64_amd
874};
875
876/** Function table for the SHRD instruction, Intel EFLAGS variation. */
877IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_intel =
878{
879 iemAImpl_shrd_u16_intel,
880 iemAImpl_shrd_u32_intel,
881 iemAImpl_shrd_u64_intel
882};
883
884/** EFLAGS variation selection table for the SHRD instruction. */
885IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shrd_eflags[] =
886{
887 &g_iemAImpl_shrd,
888 &g_iemAImpl_shrd_intel,
889 &g_iemAImpl_shrd_amd,
890 &g_iemAImpl_shrd
891};
892
893
894# ifndef IEM_WITHOUT_ASSEMBLY
895/** Function table for the VPSHUFB instruction. */
896IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpshufb = { iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u256 };
897/** Function table for the VPXOR instruction */
898IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpand = { iemAImpl_vpand_u128, iemAImpl_vpand_u256 };
899/** Function table for the VPXORN instruction */
900IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpandn = { iemAImpl_vpandn_u128, iemAImpl_vpandn_u256 };
901/** Function table for the VPOR instruction */
902IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpor = { iemAImpl_vpor_u128, iemAImpl_vpor_u256 };
903/** Function table for the VPXOR instruction */
904IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpxor = { iemAImpl_vpxor_u128, iemAImpl_vpxor_u256 };
905/** Function table for the VPCMPEQB instruction */
906IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqb = { iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u256 };
907/** Function table for the VPCMPEQW instruction */
908IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqw = { iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u256 };
909/** Function table for the VPCMPEQD instruction */
910IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqd = { iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u256 };
911/** Function table for the VPCMPEQQ instruction */
912IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqq = { iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u256 };
913/** Function table for the VPCMPGTB instruction */
914IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtb = { iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u256 };
915/** Function table for the VPCMPGTW instruction */
916IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtw = { iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u256 };
917/** Function table for the VPCMPGTD instruction */
918IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtd = { iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u256 };
919/** Function table for the VPCMPGTQ instruction */
920IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtq = { iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u256 };
921/** Function table for the VPADDB instruction */
922IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddb = { iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u256 };
923/** Function table for the VPADDW instruction */
924IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddw = { iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u256 };
925/** Function table for the VPADDD instruction */
926IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddd = { iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u256 };
927/** Function table for the VPADDQ instruction */
928IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddq = { iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u256 };
929/** Function table for the VPSUBB instruction */
930IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubb = { iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u256 };
931/** Function table for the VPSUBW instruction */
932IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubw = { iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u256 };
933/** Function table for the VPSUBD instruction */
934IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubd = { iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u256 };
935/** Function table for the VPSUBQ instruction */
936IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubq = { iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u256 };
937# endif
938
939/** Function table for the VPSHUFB instruction, software fallback. */
940IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpshufb_fallback = { iemAImpl_vpshufb_u128_fallback, iemAImpl_vpshufb_u256_fallback };
941/** Function table for the VPAND instruction, software fallback. */
942IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpand_fallback = { iemAImpl_vpand_u128_fallback, iemAImpl_vpand_u256_fallback };
943/** Function table for the VPANDN instruction, software fallback. */
944IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpandn_fallback= { iemAImpl_vpandn_u128_fallback, iemAImpl_vpandn_u256_fallback };
945/** Function table for the VPOR instruction, software fallback. */
946IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpor_fallback = { iemAImpl_vpor_u128_fallback, iemAImpl_vpor_u256_fallback };
947/** Function table for the VPXOR instruction, software fallback. */
948IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpxor_fallback = { iemAImpl_vpxor_u128_fallback, iemAImpl_vpxor_u256_fallback };
949/** Function table for the VPCMPEQB instruction, software fallback. */
950IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqb_fallback = { iemAImpl_vpcmpeqb_u128_fallback, iemAImpl_vpcmpeqb_u256_fallback };
951/** Function table for the VPCMPEQW instruction, software fallback. */
952IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqw_fallback = { iemAImpl_vpcmpeqw_u128_fallback, iemAImpl_vpcmpeqw_u256_fallback };
953/** Function table for the VPCMPEQD instruction, software fallback. */
954IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqd_fallback = { iemAImpl_vpcmpeqd_u128_fallback, iemAImpl_vpcmpeqd_u256_fallback };
955/** Function table for the VPCMPEQQ instruction, software fallback. */
956IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpeqq_fallback = { iemAImpl_vpcmpeqq_u128_fallback, iemAImpl_vpcmpeqq_u256_fallback };
957/** Function table for the VPCMPGTB instruction, software fallback. */
958IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtb_fallback = { iemAImpl_vpcmpgtb_u128_fallback, iemAImpl_vpcmpgtb_u256_fallback };
959/** Function table for the VPCMPGTW instruction, software fallback. */
960IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtw_fallback = { iemAImpl_vpcmpgtw_u128_fallback, iemAImpl_vpcmpgtw_u256_fallback };
961/** Function table for the VPCMPGTD instruction, software fallback. */
962IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtd_fallback = { iemAImpl_vpcmpgtd_u128_fallback, iemAImpl_vpcmpgtd_u256_fallback };
963/** Function table for the VPCMPGTQ instruction, software fallback. */
964IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpcmpgtq_fallback = { iemAImpl_vpcmpgtq_u128_fallback, iemAImpl_vpcmpgtq_u256_fallback };
965/** Function table for the VPADDB instruction, software fallback. */
966IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddb_fallback = { iemAImpl_vpaddb_u128_fallback, iemAImpl_vpaddb_u256_fallback };
967/** Function table for the VPADDW instruction, software fallback. */
968IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddw_fallback = { iemAImpl_vpaddw_u128_fallback, iemAImpl_vpaddw_u256_fallback };
969/** Function table for the VPADDD instruction, software fallback. */
970IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddd_fallback = { iemAImpl_vpaddd_u128_fallback, iemAImpl_vpaddd_u256_fallback };
971/** Function table for the VPADDQ instruction, software fallback. */
972IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpaddq_fallback = { iemAImpl_vpaddq_u128_fallback, iemAImpl_vpaddq_u256_fallback };
973/** Function table for the VPSUBB instruction, software fallback. */
974IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubb_fallback = { iemAImpl_vpsubb_u128_fallback, iemAImpl_vpsubb_u256_fallback };
975/** Function table for the VPSUBW instruction, software fallback. */
976IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubw_fallback = { iemAImpl_vpsubw_u128_fallback, iemAImpl_vpsubw_u256_fallback };
977/** Function table for the VPSUBD instruction, software fallback. */
978IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubd_fallback = { iemAImpl_vpsubd_u128_fallback, iemAImpl_vpsubd_u256_fallback };
979/** Function table for the VPSUBQ instruction, software fallback. */
980IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpsubq_fallback = { iemAImpl_vpsubq_u128_fallback, iemAImpl_vpsubq_u256_fallback };
981
982#endif /* !TST_IEM_CHECK_MC */
983
984
985/**
986 * Common worker for instructions like ADD, AND, OR, ++ with a byte
987 * memory/register as the destination.
988 *
989 * @param pImpl Pointer to the instruction implementation (assembly).
990 */
991FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
992{
993 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
994
995 /*
996 * If rm is denoting a register, no more instruction bytes.
997 */
998 if (IEM_IS_MODRM_REG_MODE(bRm))
999 {
1000 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1001
1002 IEM_MC_BEGIN(3, 0);
1003 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1004 IEM_MC_ARG(uint8_t, u8Src, 1);
1005 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1006
1007 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1008 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1009 IEM_MC_REF_EFLAGS(pEFlags);
1010 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1011
1012 IEM_MC_ADVANCE_RIP();
1013 IEM_MC_END();
1014 }
1015 else
1016 {
1017 /*
1018 * We're accessing memory.
1019 * Note! We're putting the eflags on the stack here so we can commit them
1020 * after the memory.
1021 */
1022 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
1023 IEM_MC_BEGIN(3, 2);
1024 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1025 IEM_MC_ARG(uint8_t, u8Src, 1);
1026 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1027 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1028
1029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1030 if (!pImpl->pfnLockedU8)
1031 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1032 IEM_MC_MEM_MAP(pu8Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1033 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1034 IEM_MC_FETCH_EFLAGS(EFlags);
1035 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1036 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1037 else
1038 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
1039
1040 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
1041 IEM_MC_COMMIT_EFLAGS(EFlags);
1042 IEM_MC_ADVANCE_RIP();
1043 IEM_MC_END();
1044 }
1045 return VINF_SUCCESS;
1046}
1047
1048
1049/**
1050 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
1051 * memory/register as the destination.
1052 *
1053 * @param pImpl Pointer to the instruction implementation (assembly).
1054 */
1055FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
1056{
1057 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1058
1059 /*
1060 * If rm is denoting a register, no more instruction bytes.
1061 */
1062 if (IEM_IS_MODRM_REG_MODE(bRm))
1063 {
1064 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1065
1066 switch (pVCpu->iem.s.enmEffOpSize)
1067 {
1068 case IEMMODE_16BIT:
1069 IEM_MC_BEGIN(3, 0);
1070 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1071 IEM_MC_ARG(uint16_t, u16Src, 1);
1072 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1073
1074 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1075 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1076 IEM_MC_REF_EFLAGS(pEFlags);
1077 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1078
1079 IEM_MC_ADVANCE_RIP();
1080 IEM_MC_END();
1081 break;
1082
1083 case IEMMODE_32BIT:
1084 IEM_MC_BEGIN(3, 0);
1085 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1086 IEM_MC_ARG(uint32_t, u32Src, 1);
1087 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1088
1089 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1090 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1091 IEM_MC_REF_EFLAGS(pEFlags);
1092 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1093
1094 if (pImpl != &g_iemAImpl_test)
1095 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1096 IEM_MC_ADVANCE_RIP();
1097 IEM_MC_END();
1098 break;
1099
1100 case IEMMODE_64BIT:
1101 IEM_MC_BEGIN(3, 0);
1102 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1103 IEM_MC_ARG(uint64_t, u64Src, 1);
1104 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1105
1106 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1107 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1108 IEM_MC_REF_EFLAGS(pEFlags);
1109 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1110
1111 IEM_MC_ADVANCE_RIP();
1112 IEM_MC_END();
1113 break;
1114 }
1115 }
1116 else
1117 {
1118 /*
1119 * We're accessing memory.
1120 * Note! We're putting the eflags on the stack here so we can commit them
1121 * after the memory.
1122 */
1123 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
1124 switch (pVCpu->iem.s.enmEffOpSize)
1125 {
1126 case IEMMODE_16BIT:
1127 IEM_MC_BEGIN(3, 2);
1128 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1129 IEM_MC_ARG(uint16_t, u16Src, 1);
1130 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1131 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1132
1133 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1134 if (!pImpl->pfnLockedU16)
1135 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1136 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1137 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1138 IEM_MC_FETCH_EFLAGS(EFlags);
1139 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1140 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1141 else
1142 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
1143
1144 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
1145 IEM_MC_COMMIT_EFLAGS(EFlags);
1146 IEM_MC_ADVANCE_RIP();
1147 IEM_MC_END();
1148 break;
1149
1150 case IEMMODE_32BIT:
1151 IEM_MC_BEGIN(3, 2);
1152 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1153 IEM_MC_ARG(uint32_t, u32Src, 1);
1154 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1155 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1156
1157 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1158 if (!pImpl->pfnLockedU32)
1159 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1160 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1161 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1162 IEM_MC_FETCH_EFLAGS(EFlags);
1163 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1164 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1165 else
1166 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
1167
1168 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
1169 IEM_MC_COMMIT_EFLAGS(EFlags);
1170 IEM_MC_ADVANCE_RIP();
1171 IEM_MC_END();
1172 break;
1173
1174 case IEMMODE_64BIT:
1175 IEM_MC_BEGIN(3, 2);
1176 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1177 IEM_MC_ARG(uint64_t, u64Src, 1);
1178 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1179 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1180
1181 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1182 if (!pImpl->pfnLockedU64)
1183 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1184 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1185 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1186 IEM_MC_FETCH_EFLAGS(EFlags);
1187 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1188 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1189 else
1190 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
1191
1192 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
1193 IEM_MC_COMMIT_EFLAGS(EFlags);
1194 IEM_MC_ADVANCE_RIP();
1195 IEM_MC_END();
1196 break;
1197 }
1198 }
1199 return VINF_SUCCESS;
1200}
1201
1202
1203/**
1204 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
1205 * the destination.
1206 *
1207 * @param pImpl Pointer to the instruction implementation (assembly).
1208 */
1209FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
1210{
1211 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1212
1213 /*
1214 * If rm is denoting a register, no more instruction bytes.
1215 */
1216 if (IEM_IS_MODRM_REG_MODE(bRm))
1217 {
1218 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1219 IEM_MC_BEGIN(3, 0);
1220 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1221 IEM_MC_ARG(uint8_t, u8Src, 1);
1222 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1223
1224 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1225 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1226 IEM_MC_REF_EFLAGS(pEFlags);
1227 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1228
1229 IEM_MC_ADVANCE_RIP();
1230 IEM_MC_END();
1231 }
1232 else
1233 {
1234 /*
1235 * We're accessing memory.
1236 */
1237 IEM_MC_BEGIN(3, 1);
1238 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1239 IEM_MC_ARG(uint8_t, u8Src, 1);
1240 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1241 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1242
1243 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1244 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1245 IEM_MC_FETCH_MEM_U8(u8Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1246 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1247 IEM_MC_REF_EFLAGS(pEFlags);
1248 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1249
1250 IEM_MC_ADVANCE_RIP();
1251 IEM_MC_END();
1252 }
1253 return VINF_SUCCESS;
1254}
1255
1256
1257/**
1258 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
1259 * register as the destination.
1260 *
1261 * @param pImpl Pointer to the instruction implementation (assembly).
1262 */
1263FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
1264{
1265 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1266
1267 /*
1268 * If rm is denoting a register, no more instruction bytes.
1269 */
1270 if (IEM_IS_MODRM_REG_MODE(bRm))
1271 {
1272 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1273 switch (pVCpu->iem.s.enmEffOpSize)
1274 {
1275 case IEMMODE_16BIT:
1276 IEM_MC_BEGIN(3, 0);
1277 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1278 IEM_MC_ARG(uint16_t, u16Src, 1);
1279 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1280
1281 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1282 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1283 IEM_MC_REF_EFLAGS(pEFlags);
1284 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1285
1286 IEM_MC_ADVANCE_RIP();
1287 IEM_MC_END();
1288 break;
1289
1290 case IEMMODE_32BIT:
1291 IEM_MC_BEGIN(3, 0);
1292 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1293 IEM_MC_ARG(uint32_t, u32Src, 1);
1294 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1295
1296 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1297 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1298 IEM_MC_REF_EFLAGS(pEFlags);
1299 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1300
1301 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1302 IEM_MC_ADVANCE_RIP();
1303 IEM_MC_END();
1304 break;
1305
1306 case IEMMODE_64BIT:
1307 IEM_MC_BEGIN(3, 0);
1308 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1309 IEM_MC_ARG(uint64_t, u64Src, 1);
1310 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1311
1312 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1313 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1314 IEM_MC_REF_EFLAGS(pEFlags);
1315 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1316
1317 IEM_MC_ADVANCE_RIP();
1318 IEM_MC_END();
1319 break;
1320 }
1321 }
1322 else
1323 {
1324 /*
1325 * We're accessing memory.
1326 */
1327 switch (pVCpu->iem.s.enmEffOpSize)
1328 {
1329 case IEMMODE_16BIT:
1330 IEM_MC_BEGIN(3, 1);
1331 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1332 IEM_MC_ARG(uint16_t, u16Src, 1);
1333 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1334 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1335
1336 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1337 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1338 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1339 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1340 IEM_MC_REF_EFLAGS(pEFlags);
1341 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1342
1343 IEM_MC_ADVANCE_RIP();
1344 IEM_MC_END();
1345 break;
1346
1347 case IEMMODE_32BIT:
1348 IEM_MC_BEGIN(3, 1);
1349 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1350 IEM_MC_ARG(uint32_t, u32Src, 1);
1351 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1352 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1353
1354 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1355 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1356 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1357 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1358 IEM_MC_REF_EFLAGS(pEFlags);
1359 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1360
1361 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1362 IEM_MC_ADVANCE_RIP();
1363 IEM_MC_END();
1364 break;
1365
1366 case IEMMODE_64BIT:
1367 IEM_MC_BEGIN(3, 1);
1368 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1369 IEM_MC_ARG(uint64_t, u64Src, 1);
1370 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1371 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1372
1373 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1374 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1375 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1376 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1377 IEM_MC_REF_EFLAGS(pEFlags);
1378 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1379
1380 IEM_MC_ADVANCE_RIP();
1381 IEM_MC_END();
1382 break;
1383 }
1384 }
1385 return VINF_SUCCESS;
1386}
1387
1388
1389/**
1390 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
1391 * a byte immediate.
1392 *
1393 * @param pImpl Pointer to the instruction implementation (assembly).
1394 */
1395FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
1396{
1397 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
1398 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1399
1400 IEM_MC_BEGIN(3, 0);
1401 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1402 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
1403 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1404
1405 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
1406 IEM_MC_REF_EFLAGS(pEFlags);
1407 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1408
1409 IEM_MC_ADVANCE_RIP();
1410 IEM_MC_END();
1411 return VINF_SUCCESS;
1412}
1413
1414
1415/**
1416 * Common worker for instructions like ADD, AND, OR, ++ with working on
1417 * AX/EAX/RAX with a word/dword immediate.
1418 *
1419 * @param pImpl Pointer to the instruction implementation (assembly).
1420 */
1421FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
1422{
1423 switch (pVCpu->iem.s.enmEffOpSize)
1424 {
1425 case IEMMODE_16BIT:
1426 {
1427 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
1428 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1429
1430 IEM_MC_BEGIN(3, 0);
1431 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1432 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
1433 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1434
1435 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
1436 IEM_MC_REF_EFLAGS(pEFlags);
1437 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1438
1439 IEM_MC_ADVANCE_RIP();
1440 IEM_MC_END();
1441 return VINF_SUCCESS;
1442 }
1443
1444 case IEMMODE_32BIT:
1445 {
1446 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
1447 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1448
1449 IEM_MC_BEGIN(3, 0);
1450 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1451 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
1452 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1453
1454 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
1455 IEM_MC_REF_EFLAGS(pEFlags);
1456 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1457
1458 if (pImpl != &g_iemAImpl_test)
1459 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1460 IEM_MC_ADVANCE_RIP();
1461 IEM_MC_END();
1462 return VINF_SUCCESS;
1463 }
1464
1465 case IEMMODE_64BIT:
1466 {
1467 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
1468 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1469
1470 IEM_MC_BEGIN(3, 0);
1471 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1472 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
1473 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1474
1475 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
1476 IEM_MC_REF_EFLAGS(pEFlags);
1477 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1478
1479 IEM_MC_ADVANCE_RIP();
1480 IEM_MC_END();
1481 return VINF_SUCCESS;
1482 }
1483
1484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1485 }
1486}
1487
1488
1489/** Opcodes 0xf1, 0xd6. */
1490FNIEMOP_DEF(iemOp_Invalid)
1491{
1492 IEMOP_MNEMONIC(Invalid, "Invalid");
1493 return IEMOP_RAISE_INVALID_OPCODE();
1494}
1495
1496
1497/** Invalid with RM byte . */
1498FNIEMOPRM_DEF(iemOp_InvalidWithRM)
1499{
1500 RT_NOREF_PV(bRm);
1501 IEMOP_MNEMONIC(InvalidWithRm, "InvalidWithRM");
1502 return IEMOP_RAISE_INVALID_OPCODE();
1503}
1504
1505
1506/** Invalid with RM byte where intel decodes any additional address encoding
1507 * bytes. */
1508FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedDecode)
1509{
1510 IEMOP_MNEMONIC(InvalidWithRMNeedDecode, "InvalidWithRMNeedDecode");
1511 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1512 {
1513#ifndef TST_IEM_CHECK_MC
1514 if (IEM_IS_MODRM_MEM_MODE(bRm))
1515 {
1516 RTGCPTR GCPtrEff;
1517 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1518 if (rcStrict != VINF_SUCCESS)
1519 return rcStrict;
1520 }
1521#endif
1522 }
1523 IEMOP_HLP_DONE_DECODING();
1524 return IEMOP_RAISE_INVALID_OPCODE();
1525}
1526
1527
1528/** Invalid with RM byte where both AMD and Intel decodes any additional
1529 * address encoding bytes. */
1530FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeeded)
1531{
1532 IEMOP_MNEMONIC(InvalidWithRMAllNeeded, "InvalidWithRMAllNeeded");
1533#ifndef TST_IEM_CHECK_MC
1534 if (IEM_IS_MODRM_MEM_MODE(bRm))
1535 {
1536 RTGCPTR GCPtrEff;
1537 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1538 if (rcStrict != VINF_SUCCESS)
1539 return rcStrict;
1540 }
1541#endif
1542 IEMOP_HLP_DONE_DECODING();
1543 return IEMOP_RAISE_INVALID_OPCODE();
1544}
1545
1546
1547/** Invalid with RM byte where intel requires 8-byte immediate.
1548 * Intel will also need SIB and displacement if bRm indicates memory. */
1549FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedImm8)
1550{
1551 IEMOP_MNEMONIC(InvalidWithRMNeedImm8, "InvalidWithRMNeedImm8");
1552 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1553 {
1554#ifndef TST_IEM_CHECK_MC
1555 if (IEM_IS_MODRM_MEM_MODE(bRm))
1556 {
1557 RTGCPTR GCPtrEff;
1558 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1559 if (rcStrict != VINF_SUCCESS)
1560 return rcStrict;
1561 }
1562#endif
1563 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bRm);
1564 }
1565 IEMOP_HLP_DONE_DECODING();
1566 return IEMOP_RAISE_INVALID_OPCODE();
1567}
1568
1569
1570/** Invalid with RM byte where intel requires 8-byte immediate.
1571 * Both AMD and Intel also needs SIB and displacement according to bRm. */
1572FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeedImm8)
1573{
1574 IEMOP_MNEMONIC(InvalidWithRMAllNeedImm8, "InvalidWithRMAllNeedImm8");
1575#ifndef TST_IEM_CHECK_MC
1576 if (IEM_IS_MODRM_MEM_MODE(bRm))
1577 {
1578 RTGCPTR GCPtrEff;
1579 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1580 if (rcStrict != VINF_SUCCESS)
1581 return rcStrict;
1582 }
1583#endif
1584 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bRm);
1585 IEMOP_HLP_DONE_DECODING();
1586 return IEMOP_RAISE_INVALID_OPCODE();
1587}
1588
1589
1590/** Invalid opcode where intel requires Mod R/M sequence. */
1591FNIEMOP_DEF(iemOp_InvalidNeedRM)
1592{
1593 IEMOP_MNEMONIC(InvalidNeedRM, "InvalidNeedRM");
1594 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1595 {
1596 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1597#ifndef TST_IEM_CHECK_MC
1598 if (IEM_IS_MODRM_MEM_MODE(bRm))
1599 {
1600 RTGCPTR GCPtrEff;
1601 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1602 if (rcStrict != VINF_SUCCESS)
1603 return rcStrict;
1604 }
1605#endif
1606 }
1607 IEMOP_HLP_DONE_DECODING();
1608 return IEMOP_RAISE_INVALID_OPCODE();
1609}
1610
1611
1612/** Invalid opcode where both AMD and Intel requires Mod R/M sequence. */
1613FNIEMOP_DEF(iemOp_InvalidAllNeedRM)
1614{
1615 IEMOP_MNEMONIC(InvalidAllNeedRM, "InvalidAllNeedRM");
1616 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1617#ifndef TST_IEM_CHECK_MC
1618 if (IEM_IS_MODRM_MEM_MODE(bRm))
1619 {
1620 RTGCPTR GCPtrEff;
1621 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1622 if (rcStrict != VINF_SUCCESS)
1623 return rcStrict;
1624 }
1625#endif
1626 IEMOP_HLP_DONE_DECODING();
1627 return IEMOP_RAISE_INVALID_OPCODE();
1628}
1629
1630
1631/** Invalid opcode where intel requires Mod R/M sequence and 8-byte
1632 * immediate. */
1633FNIEMOP_DEF(iemOp_InvalidNeedRMImm8)
1634{
1635 IEMOP_MNEMONIC(InvalidNeedRMImm8, "InvalidNeedRMImm8");
1636 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1637 {
1638 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1639#ifndef TST_IEM_CHECK_MC
1640 if (IEM_IS_MODRM_MEM_MODE(bRm))
1641 {
1642 RTGCPTR GCPtrEff;
1643 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1644 if (rcStrict != VINF_SUCCESS)
1645 return rcStrict;
1646 }
1647#endif
1648 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
1649 }
1650 IEMOP_HLP_DONE_DECODING();
1651 return IEMOP_RAISE_INVALID_OPCODE();
1652}
1653
1654
1655/** Invalid opcode where intel requires a 3rd escape byte and a Mod R/M
1656 * sequence. */
1657FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRM)
1658{
1659 IEMOP_MNEMONIC(InvalidNeed3ByteEscRM, "InvalidNeed3ByteEscRM");
1660 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1661 {
1662 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
1663 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1664#ifndef TST_IEM_CHECK_MC
1665 if (IEM_IS_MODRM_MEM_MODE(bRm))
1666 {
1667 RTGCPTR GCPtrEff;
1668 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1669 if (rcStrict != VINF_SUCCESS)
1670 return rcStrict;
1671 }
1672#endif
1673 }
1674 IEMOP_HLP_DONE_DECODING();
1675 return IEMOP_RAISE_INVALID_OPCODE();
1676}
1677
1678
1679/** Invalid opcode where intel requires a 3rd escape byte, Mod R/M sequence, and
1680 * a 8-byte immediate. */
1681FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRMImm8)
1682{
1683 IEMOP_MNEMONIC(InvalidNeed3ByteEscRMImm8, "InvalidNeed3ByteEscRMImm8");
1684 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1685 {
1686 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
1687 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1688#ifndef TST_IEM_CHECK_MC
1689 if (IEM_IS_MODRM_MEM_MODE(bRm))
1690 {
1691 RTGCPTR GCPtrEff;
1692 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 1, &GCPtrEff);
1693 if (rcStrict != VINF_SUCCESS)
1694 return rcStrict;
1695 }
1696#endif
1697 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
1698 IEMOP_HLP_DONE_DECODING();
1699 }
1700 return IEMOP_RAISE_INVALID_OPCODE();
1701}
1702
1703
1704/** Repeats a_fn four times. For decoding tables. */
1705#define IEMOP_X4(a_fn) a_fn, a_fn, a_fn, a_fn
1706
1707/*
1708 * Include the tables.
1709 */
1710#ifdef IEM_WITH_3DNOW
1711# include "IEMAllInstructions3DNow.cpp.h"
1712#endif
1713#ifdef IEM_WITH_THREE_0F_38
1714# include "IEMAllInstructionsThree0f38.cpp.h"
1715#endif
1716#ifdef IEM_WITH_THREE_0F_3A
1717# include "IEMAllInstructionsThree0f3a.cpp.h"
1718#endif
1719#include "IEMAllInstructionsTwoByte0f.cpp.h"
1720#ifdef IEM_WITH_VEX
1721# include "IEMAllInstructionsVexMap1.cpp.h"
1722# include "IEMAllInstructionsVexMap2.cpp.h"
1723# include "IEMAllInstructionsVexMap3.cpp.h"
1724#endif
1725#include "IEMAllInstructionsOneByte.cpp.h"
1726
1727
1728#ifdef _MSC_VER
1729# pragma warning(pop)
1730#endif
1731
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette