/* * Autogenerated by $Id: IEMAllIntprA64Tables-armv8.cpp 108958 2025-04-12 00:16:40Z vboxsync $ * from the open source v9Ap6-A specs, build 406 (5e0a212688c6bd7aee92394b6f5e491b4d0fee1d) * dated Sun Dec 15 22:18:44 2024 UTC. * * Do not edit! */ /* * Copyright (C) 2025 Oracle and/or its affiliates. * * This file is part of VirtualBox base platform packages, as * available from https://www.virtualbox.org. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, in version 3 of the * License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see . * * SPDX-License-Identifier: GPL-3.0-only */ #define LOG_GROUP LOG_GROUP_IEM #define VMCPU_INCL_CPUM_GST_CTX #include "IEMInternal.h" #include #include "VBox/err.h" #include "iprt/armv8.h" #include "IEMMc.h" #include "IEMAllIntprA64Tables-armv8.h" #include "IEMAllInstrA64Impl.h" /** Invalid instruction decoder function. */ FNIEMOP_DEF_1(iemDecodeA64_Invalid, uint32_t, uOpcode) { Log(("Invalid instruction %%#x at %%x\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* fffffc00/5ac02000: ABS , Instruction Set: A64 Groups: dp_1src, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ABS_32_dp_1src, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fCssc /*FEAT_CSSC*/) { LogFlow(("%018x/%010x: ABS_32_dp_1src Rd=%#x Rn=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn)); #ifdef IEM_INSTR_IMPL_A64__ABS_32_dp_1src IEM_INSTR_IMPL_A64__ABS_32_dp_1src(Rd, Rn); #else RT_NOREF(Rd, Rn, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* fffffc00/dac02000: ABS , Instruction Set: A64 Groups: dp_1src, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ABS_64_dp_1src, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fCssc /*FEAT_CSSC*/) { LogFlow(("%018x/%010x: ABS_64_dp_1src Rd=%#x Rn=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn)); #ifdef IEM_INSTR_IMPL_A64__ABS_64_dp_1src IEM_INSTR_IMPL_A64__ABS_64_dp_1src(Rd, Rn); #else RT_NOREF(Rd, Rn, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* bf3ffc00/0e20b800: ABS ., . Instruction Set: A64 Groups: asimdmisc, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ABS_asimdmisc_R, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const size = (uOpcode >> 22) & 0x00000003; uint32_t const Q = (uOpcode >> 30) & 0x00000001; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ABS_asimdmisc_R Rd=%#x Rn=%#x size=%u Q=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, size, Q)); #ifdef IEM_INSTR_IMPL_A64__ABS_asimdmisc_R IEM_INSTR_IMPL_A64__ABS_asimdmisc_R(Rd, Rn, size, Q); #else RT_NOREF(Rd, Rn, size, Q, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* fffffc00/5ee0b800: ABS D, D Instruction Set: A64 Groups: asisdmisc, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ABS_asisdmisc_R, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ABS_asisdmisc_R Rd=%#x Rn=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn)); #ifdef IEM_INSTR_IMPL_A64__ABS_asisdmisc_R IEM_INSTR_IMPL_A64__ABS_asisdmisc_R(Rd, Rn); #else RT_NOREF(Rd, Rn, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ffe0fc00/3a000000: ADCS , , Instruction Set: A64 Groups: addsub_carry, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADCS_32_addsub_carry, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADCS_32_addsub_carry Rd=%#x Rn=%#x Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADCS_32_addsub_carry IEM_INSTR_IMPL_A64__ADCS_32_addsub_carry(Rd, Rn, Rm); #else RT_NOREF(Rd, Rn, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ffe0fc00/ba000000: ADCS , , Instruction Set: A64 Groups: addsub_carry, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADCS_64_addsub_carry, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADCS_64_addsub_carry Rd=%#x Rn=%#x Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADCS_64_addsub_carry IEM_INSTR_IMPL_A64__ADCS_64_addsub_carry(Rd, Rn, Rm); #else RT_NOREF(Rd, Rn, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ffe0fc00/1a000000: ADC , , Instruction Set: A64 Groups: addsub_carry, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADC_32_addsub_carry, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADC_32_addsub_carry Rd=%#x Rn=%#x Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADC_32_addsub_carry IEM_INSTR_IMPL_A64__ADC_32_addsub_carry(Rd, Rn, Rm); #else RT_NOREF(Rd, Rn, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ffe0fc00/9a000000: ADC , , Instruction Set: A64 Groups: addsub_carry, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADC_64_addsub_carry, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADC_64_addsub_carry Rd=%#x Rn=%#x Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADC_64_addsub_carry IEM_INSTR_IMPL_A64__ADC_64_addsub_carry(Rd, Rn, Rm); #else RT_NOREF(Rd, Rn, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ffc0c000/91800000: ADDG , , #, # Instruction Set: A64 Groups: addsub_immtags, dpimm */ FNIEMOP_DEF_1(iemDecodeA64_ADDG_64_addsub_immtags, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm4 = (uOpcode >> 10) & 0x0000000f; uint32_t const imm6 = (uOpcode >> 16) & 0x0000003f; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMte /*FEAT_MTE*/) { LogFlow(("%018x/%010x: ADDG_64_addsub_immtags Rd=%#x Rn=%#x imm4=%#x imm6=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm4, imm6)); #ifdef IEM_INSTR_IMPL_A64__ADDG_64_addsub_immtags IEM_INSTR_IMPL_A64__ADDG_64_addsub_immtags(Rd, Rn, imm4, imm6); #else RT_NOREF(Rd, Rn, imm4, imm6, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* bf20fc00/0e204000: ADDHN2 ., ., . Instruction Set: A64 Groups: asimddiff, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ADDHN_asimddiff_N, uint32_t, uOpcode) { if ((uOpcode & UINT32_C(0xbf20fc00)) == UINT32_C(0x0e204000)) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const o1 = (uOpcode >> 13) & 0x00000001; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const size = (uOpcode >> 22) & 0x00000003; uint32_t const Q = (uOpcode >> 30) & 0x00000001; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ADDHN_asimddiff_N Rd=%#x Rn=%#x o1=%u Rm=%#x size=%u Q=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, o1, Rm, size, Q)); #ifdef IEM_INSTR_IMPL_A64__ADDHN_asimddiff_N IEM_INSTR_IMPL_A64__ADDHN_asimddiff_N(Rd, Rn, o1, Rm, size, Q); #else RT_NOREF(Rd, Rn, o1, Rm, size, Q, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } Log(("Invalid instruction %%#x at %%x\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ffe0e000/9a002000: ADDPT , , {, LSL #} Instruction Set: A64 Groups: addsub_pt, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADDPT_64_addsub_pt, uint32_t, uOpcode) { if ((uOpcode & UINT32_C(0xffe0e000)) == UINT32_C(0x9a002000)) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm3 = (uOpcode >> 10) & 0x00000007; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; if (false /** @todo IEM_GET_GUEST_CPU_FEATURES(pVCpu)->FEAT_CPA*/) { LogFlow(("%018x/%010x: ADDPT_64_addsub_pt Rd=%#x Rn=%#x imm3=%u Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm3, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADDPT_64_addsub_pt IEM_INSTR_IMPL_A64__ADDPT_64_addsub_pt(Rd, Rn, imm3, Rm); #else RT_NOREF(Rd, Rn, imm3, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } Log(("Invalid instruction %%#x at %%x\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* bf20fc00/0e20bc00: ADDP ., ., . Instruction Set: A64 Groups: asimdsame, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ADDP_asimdsame_only, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const size = (uOpcode >> 22) & 0x00000003; uint32_t const Q = (uOpcode >> 30) & 0x00000001; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ADDP_asimdsame_only Rd=%#x Rn=%#x Rm=%#x size=%u Q=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm, size, Q)); #ifdef IEM_INSTR_IMPL_A64__ADDP_asimdsame_only IEM_INSTR_IMPL_A64__ADDP_asimdsame_only(Rd, Rn, Rm, size, Q); #else RT_NOREF(Rd, Rn, Rm, size, Q, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* fffffc00/5ef1b800: ADDP D, .2D Instruction Set: A64 Groups: asisdpair, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ADDP_asisdpair_only, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ADDP_asisdpair_only Rd=%#x Rn=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn)); #ifdef IEM_INSTR_IMPL_A64__ADDP_asisdpair_only IEM_INSTR_IMPL_A64__ADDP_asisdpair_only(Rd, Rn); #else RT_NOREF(Rd, Rn, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ffe00000/2b200000: ADDS , , {, { #}} Instruction Set: A64 Groups: addsub_ext, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADDS_32S_addsub_ext, uint32_t, uOpcode) { if ((uOpcode & UINT32_C(0xffe00000)) == UINT32_C(0x2b200000)) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm3 = (uOpcode >> 10) & 0x00000007; uint32_t const option = (uOpcode >> 13) & 0x00000007; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADDS_32S_addsub_ext Rd=%#x Rn=%#x imm3=%u option=%u Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm3, option, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADDS_32S_addsub_ext IEM_INSTR_IMPL_A64__ADDS_32S_addsub_ext(Rd, Rn, imm3, option, Rm); #else RT_NOREF(Rd, Rn, imm3, option, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ff800000/31000000: ADDS , , #{, } Instruction Set: A64 Groups: addsub_imm, dpimm */ FNIEMOP_DEF_1(iemDecodeA64_ADDS_32S_addsub_imm, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm12 = (uOpcode >> 10) & 0x00000fff; uint32_t const sh = (uOpcode >> 22) & 0x00000001; LogFlow(("%018x/%010x: ADDS_32S_addsub_imm Rd=%#x Rn=%#x imm12=%#x sh=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm12, sh)); #ifdef IEM_INSTR_IMPL_A64__ADDS_32S_addsub_imm IEM_INSTR_IMPL_A64__ADDS_32S_addsub_imm(Rd, Rn, imm12, sh); #else RT_NOREF(Rd, Rn, imm12, sh, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ff200000/2b000000: ADDS , , {, #} Instruction Set: A64 Groups: addsub_shift, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADDS_32_addsub_shift, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm6 = (uOpcode >> 10) & 0x0000003f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const shift = (uOpcode >> 22) & 0x00000003; LogFlow(("%018x/%010x: ADDS_32_addsub_shift Rd=%#x Rn=%#x imm6=%#x Rm=%#x shift=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm6, Rm, shift)); #ifdef IEM_INSTR_IMPL_A64__ADDS_32_addsub_shift IEM_INSTR_IMPL_A64__ADDS_32_addsub_shift(Rd, Rn, imm6, Rm, shift); #else RT_NOREF(Rd, Rn, imm6, Rm, shift, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ffe00000/ab200000: ADDS , , {, { #}} Instruction Set: A64 Groups: addsub_ext, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADDS_64S_addsub_ext, uint32_t, uOpcode) { if ((uOpcode & UINT32_C(0xffe00000)) == UINT32_C(0xab200000)) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm3 = (uOpcode >> 10) & 0x00000007; uint32_t const option = (uOpcode >> 13) & 0x00000007; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADDS_64S_addsub_ext Rd=%#x Rn=%#x imm3=%u option=%u Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm3, option, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADDS_64S_addsub_ext IEM_INSTR_IMPL_A64__ADDS_64S_addsub_ext(Rd, Rn, imm3, option, Rm); #else RT_NOREF(Rd, Rn, imm3, option, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ff800000/b1000000: ADDS , , #{, } Instruction Set: A64 Groups: addsub_imm, dpimm */ FNIEMOP_DEF_1(iemDecodeA64_ADDS_64S_addsub_imm, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm12 = (uOpcode >> 10) & 0x00000fff; uint32_t const sh = (uOpcode >> 22) & 0x00000001; LogFlow(("%018x/%010x: ADDS_64S_addsub_imm Rd=%#x Rn=%#x imm12=%#x sh=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm12, sh)); #ifdef IEM_INSTR_IMPL_A64__ADDS_64S_addsub_imm IEM_INSTR_IMPL_A64__ADDS_64S_addsub_imm(Rd, Rn, imm12, sh); #else RT_NOREF(Rd, Rn, imm12, sh, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ff200000/ab000000: ADDS , , {, #} Instruction Set: A64 Groups: addsub_shift, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADDS_64_addsub_shift, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm6 = (uOpcode >> 10) & 0x0000003f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const shift = (uOpcode >> 22) & 0x00000003; LogFlow(("%018x/%010x: ADDS_64_addsub_shift Rd=%#x Rn=%#x imm6=%#x Rm=%#x shift=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm6, Rm, shift)); #ifdef IEM_INSTR_IMPL_A64__ADDS_64_addsub_shift IEM_INSTR_IMPL_A64__ADDS_64_addsub_shift(Rd, Rn, imm6, Rm, shift); #else RT_NOREF(Rd, Rn, imm6, Rm, shift, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* bf3ffc00/0e31b800: ADDV , . Instruction Set: A64 Groups: asimdall, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ADDV_asimdall_only, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const size = (uOpcode >> 22) & 0x00000003; uint32_t const Q = (uOpcode >> 30) & 0x00000001; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ADDV_asimdall_only Rd=%#x Rn=%#x size=%u Q=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, size, Q)); #ifdef IEM_INSTR_IMPL_A64__ADDV_asimdall_only IEM_INSTR_IMPL_A64__ADDV_asimdall_only(Rd, Rn, size, Q); #else RT_NOREF(Rd, Rn, size, Q, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ffe00000/0b200000: ADD , , {, { #}} Instruction Set: A64 Groups: addsub_ext, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADD_32_addsub_ext, uint32_t, uOpcode) { if ((uOpcode & UINT32_C(0xffe00000)) == UINT32_C(0x0b200000)) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm3 = (uOpcode >> 10) & 0x00000007; uint32_t const option = (uOpcode >> 13) & 0x00000007; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADD_32_addsub_ext Rd=%#x Rn=%#x imm3=%u option=%u Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm3, option, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADD_32_addsub_ext IEM_INSTR_IMPL_A64__ADD_32_addsub_ext(Rd, Rn, imm3, option, Rm); #else RT_NOREF(Rd, Rn, imm3, option, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ff800000/11000000: ADD , , #{, } Instruction Set: A64 Groups: addsub_imm, dpimm */ FNIEMOP_DEF_1(iemDecodeA64_ADD_32_addsub_imm, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm12 = (uOpcode >> 10) & 0x00000fff; uint32_t const sh = (uOpcode >> 22) & 0x00000001; LogFlow(("%018x/%010x: ADD_32_addsub_imm Rd=%#x Rn=%#x imm12=%#x sh=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm12, sh)); #ifdef IEM_INSTR_IMPL_A64__ADD_32_addsub_imm IEM_INSTR_IMPL_A64__ADD_32_addsub_imm(Rd, Rn, imm12, sh); #else RT_NOREF(Rd, Rn, imm12, sh, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ff200000/0b000000: ADD , , {, #} Instruction Set: A64 Groups: addsub_shift, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADD_32_addsub_shift, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm6 = (uOpcode >> 10) & 0x0000003f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const shift = (uOpcode >> 22) & 0x00000003; LogFlow(("%018x/%010x: ADD_32_addsub_shift Rd=%#x Rn=%#x imm6=%#x Rm=%#x shift=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm6, Rm, shift)); #ifdef IEM_INSTR_IMPL_A64__ADD_32_addsub_shift IEM_INSTR_IMPL_A64__ADD_32_addsub_shift(Rd, Rn, imm6, Rm, shift); #else RT_NOREF(Rd, Rn, imm6, Rm, shift, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ffe00000/8b200000: ADD , , {, { #}} Instruction Set: A64 Groups: addsub_ext, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADD_64_addsub_ext, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm3 = (uOpcode >> 10) & 0x00000007; uint32_t const option = (uOpcode >> 13) & 0x00000007; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; LogFlow(("%018x/%010x: ADD_64_addsub_ext Rd=%#x Rn=%#x imm3=%u option=%u Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm3, option, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADD_64_addsub_ext IEM_INSTR_IMPL_A64__ADD_64_addsub_ext(Rd, Rn, imm3, option, Rm); #else RT_NOREF(Rd, Rn, imm3, option, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ff800000/91000000: ADD , , #{, } Instruction Set: A64 Groups: addsub_imm, dpimm */ FNIEMOP_DEF_1(iemDecodeA64_ADD_64_addsub_imm, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm12 = (uOpcode >> 10) & 0x00000fff; uint32_t const sh = (uOpcode >> 22) & 0x00000001; LogFlow(("%018x/%010x: ADD_64_addsub_imm Rd=%#x Rn=%#x imm12=%#x sh=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm12, sh)); #ifdef IEM_INSTR_IMPL_A64__ADD_64_addsub_imm IEM_INSTR_IMPL_A64__ADD_64_addsub_imm(Rd, Rn, imm12, sh); #else RT_NOREF(Rd, Rn, imm12, sh, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* ff200000/8b000000: ADD , , {, #} Instruction Set: A64 Groups: addsub_shift, dpreg */ FNIEMOP_DEF_1(iemDecodeA64_ADD_64_addsub_shift, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const imm6 = (uOpcode >> 10) & 0x0000003f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const shift = (uOpcode >> 22) & 0x00000003; LogFlow(("%018x/%010x: ADD_64_addsub_shift Rd=%#x Rn=%#x imm6=%#x Rm=%#x shift=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, imm6, Rm, shift)); #ifdef IEM_INSTR_IMPL_A64__ADD_64_addsub_shift IEM_INSTR_IMPL_A64__ADD_64_addsub_shift(Rd, Rn, imm6, Rm, shift); #else RT_NOREF(Rd, Rn, imm6, Rm, shift, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } /* bf20fc00/0e208400: ADD ., ., . Instruction Set: A64 Groups: asimdsame, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ADD_asimdsame_only, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; uint32_t const size = (uOpcode >> 22) & 0x00000003; uint32_t const Q = (uOpcode >> 30) & 0x00000001; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ADD_asimdsame_only Rd=%#x Rn=%#x Rm=%#x size=%u Q=%u\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm, size, Q)); #ifdef IEM_INSTR_IMPL_A64__ADD_asimdsame_only IEM_INSTR_IMPL_A64__ADD_asimdsame_only(Rd, Rn, Rm, size, Q); #else RT_NOREF(Rd, Rn, Rm, size, Q, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* ffe0fc00/5ee08400: ADD D, D, D Instruction Set: A64 Groups: asisdsame, simd_dp */ FNIEMOP_DEF_1(iemDecodeA64_ADD_asisdsame_only, uint32_t, uOpcode) { uint32_t const Rd = (uOpcode >> 0) & 0x0000001f; uint32_t const Rn = (uOpcode >> 5) & 0x0000001f; uint32_t const Rm = (uOpcode >> 16) & 0x0000001f; if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdvSimd /*FEAT_AdvSIMD*/) { LogFlow(("%018x/%010x: ADD_asisdsame_only Rd=%#x Rn=%#x Rm=%#x\n", pVCpu->cpum.GstCtx.Pc.u64, uOpcode, Rd, Rn, Rm)); #ifdef IEM_INSTR_IMPL_A64__ADD_asisdsame_only IEM_INSTR_IMPL_A64__ADD_asisdsame_only(Rd, Rn, Rm); #else RT_NOREF(Rd, Rn, Rm, pVCpu, uOpcode); return VERR_IEM_INSTR_NOT_IMPLEMENTED; #endif } Log(("Invalid instruction %%#x at %%x (cond)\n", uOpcode, pVCpu->cpum.GstCtx.Pc.u64)); IEMOP_RAISE_INVALID_OPCODE_RET(); } /* 9f000000/90000000: ADRP ,