VirtualBox

Ignore:
Timestamp:
Mar 24, 2025 9:34:46 AM (4 weeks ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
168127
Message:

VMM/IEM: More ARM target work. jiraref:VBP-1598

File:
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/target-armv8/IEMAll-armv8.cpp

    r108707 r108710  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - x86 target, miscellaneous.
     3 * IEM - Interpreted Execution Manager - ARMv8 target, miscellaneous.
    44 */
    55
     
    3232#define LOG_GROUP   LOG_GROUP_IEM
    3333#define VMCPU_INCL_CPUM_GST_CTX
    34 #ifdef IN_RING0
    35 # define VBOX_VMM_TARGET_X86
    36 #endif
    3734#include <VBox/vmm/iem.h>
    3835#include <VBox/vmm/cpum.h>
     
    4138#include <VBox/vmm/vmcc.h>
    4239#include <VBox/log.h>
    43 #include <VBox/param.h>
    4440#include <iprt/assert.h>
    45 #include <iprt/errcore.h>
    46 #include <iprt/string.h>
    47 #include <iprt/x86.h>
     41#include <iprt/armv8.h>
    4842
    49 #include "IEMInline-x86.h" /* iemRegFinishClearingRF */
    5043
    5144
    5245/**
    53  * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
    54  * path.
    55  *
    56  * This will also invalidate TLB entries for any pages with active data
    57  * breakpoints on them.
    58  *
    59  * @returns IEM_F_BRK_PENDING_XXX or zero.
    60  * @param   pVCpu               The cross context virtual CPU structure of the
    61  *                              calling thread.
    62  *
    63  * @note    Don't call directly, use iemCalcExecDbgFlags instead.
     46 * Slow iemCalcExecDbgFlags() code path.
    6447 */
    6548uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
     
    6750    uint32_t fExec = 0;
    6851
     52#if 0 /** @todo ARM hardware breakpoints/watchpoints. */
    6953    /*
    7054     * Helper for invalidate the data TLB for breakpoint addresses.
     
    137121        PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
    138122    }
     123#else
     124    RT_NOREF(pVCpu);
     125#endif
    139126
    140127    return fExec;
    141128}
    142129
    143 
    144 /** @name   Register Access.
    145  * @{
    146  */
    147 
    148 /**
    149  * Adds a 8-bit signed jump offset to RIP/EIP/IP.
    150  *
    151  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    152  * segment limit.
    153  *
    154  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    155  * @param   cbInstr             Instruction size.
    156  * @param   offNextInstr        The offset of the next instruction.
    157  * @param   enmEffOpSize        Effective operand size.
    158  */
    159 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    160                                                         IEMMODE enmEffOpSize) RT_NOEXCEPT
    161 {
    162     switch (enmEffOpSize)
    163     {
    164         case IEMMODE_16BIT:
    165         {
    166             uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
    167             if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    168                           || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
    169                 pVCpu->cpum.GstCtx.rip = uNewIp;
    170             else
    171                 return iemRaiseGeneralProtectionFault0(pVCpu);
    172             break;
    173         }
    174 
    175         case IEMMODE_32BIT:
    176         {
    177             Assert(!IEM_IS_64BIT_CODE(pVCpu));
    178             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    179 
    180             uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    181             if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    182                 pVCpu->cpum.GstCtx.rip = uNewEip;
    183             else
    184                 return iemRaiseGeneralProtectionFault0(pVCpu);
    185             break;
    186         }
    187 
    188         case IEMMODE_64BIT:
    189         {
    190             Assert(IEM_IS_64BIT_CODE(pVCpu));
    191 
    192             uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    193             if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    194                 pVCpu->cpum.GstCtx.rip = uNewRip;
    195             else
    196                 return iemRaiseGeneralProtectionFault0(pVCpu);
    197             break;
    198         }
    199 
    200         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    201     }
    202 
    203 #ifndef IEM_WITH_CODE_TLB
    204     /* Flush the prefetch buffer. */
    205     pVCpu->iem.s.cbOpcode = cbInstr;
    206 #endif
    207 
    208     /*
    209      * Clear RF and finish the instruction (maybe raise #DB).
    210      */
    211     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    212 }
    213 
    214 
    215 /**
    216  * Adds a 16-bit signed jump offset to RIP/EIP/IP.
    217  *
    218  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    219  * segment limit.
    220  *
    221  * @returns Strict VBox status code.
    222  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    223  * @param   cbInstr             Instruction size.
    224  * @param   offNextInstr        The offset of the next instruction.
    225  */
    226 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
    227 {
    228     Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
    229 
    230     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
    231     if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    232                   || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
    233         pVCpu->cpum.GstCtx.rip = uNewIp;
    234     else
    235         return iemRaiseGeneralProtectionFault0(pVCpu);
    236 
    237 #ifndef IEM_WITH_CODE_TLB
    238     /* Flush the prefetch buffer. */
    239     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    240 #endif
    241 
    242     /*
    243      * Clear RF and finish the instruction (maybe raise #DB).
    244      */
    245     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    246 }
    247 
    248 
    249 /**
    250  * Adds a 32-bit signed jump offset to RIP/EIP/IP.
    251  *
    252  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    253  * segment limit.
    254  *
    255  * @returns Strict VBox status code.
    256  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    257  * @param   cbInstr             Instruction size.
    258  * @param   offNextInstr        The offset of the next instruction.
    259  * @param   enmEffOpSize        Effective operand size.
    260  */
    261 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
    262                                                          IEMMODE enmEffOpSize) RT_NOEXCEPT
    263 {
    264     if (enmEffOpSize == IEMMODE_32BIT)
    265     {
    266         Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
    267 
    268         uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
    269         if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    270             pVCpu->cpum.GstCtx.rip = uNewEip;
    271         else
    272             return iemRaiseGeneralProtectionFault0(pVCpu);
    273     }
    274     else
    275     {
    276         Assert(enmEffOpSize == IEMMODE_64BIT);
    277 
    278         uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    279         if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    280             pVCpu->cpum.GstCtx.rip = uNewRip;
    281         else
    282             return iemRaiseGeneralProtectionFault0(pVCpu);
    283     }
    284 
    285 #ifndef IEM_WITH_CODE_TLB
    286     /* Flush the prefetch buffer. */
    287     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    288 #endif
    289 
    290     /*
    291      * Clear RF and finish the instruction (maybe raise #DB).
    292      */
    293     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    294 }
    295 
    296 /** @}  */
    297 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette