VirtualBox

Changeset 94712 in vbox


Ignore:
Timestamp:
Apr 26, 2022 11:49:13 PM (3 years ago)
Author:
vboxsync
Message:

VMM/IEM: Some TLB compile and doc tweaks. bugref:9898

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r94051 r94712  
    2525 * relative offsets.
    2626 */
    27 # ifdef IEM_WITH_CODE_TLB
    28 #  define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm)         do { } while (0)
    29 #  define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib)               do { } while (0)
    30 #  define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp)         do { } while (0)
    31 #  define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp)   do { } while (0)
    32 #  define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp)         do { } while (0)
    33 #  define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp)   do { } while (0)
    34 #  define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp)  do { } while (0)
    35 #  define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp)   do { } while (0)
    36 #  error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
     27# ifdef IEM_WITH_CODE_TLB /** @todo IEM TLB */
     28#  define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm)         do { a_bModRm  = 0; RT_NOREF(a_offModRm); } while (0)
     29#  define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib)               do { a_bSib    = 0; RT_NOREF(a_offSib);  } while (0)
     30#  define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp)         do { a_u16Disp = 0; RT_NOREF(a_offDisp); } while (0)
     31#  define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp)   do { a_u16Disp = 0; RT_NOREF(a_offDisp); } while (0)
     32#  define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp)         do { a_u32Disp = 0; RT_NOREF(a_offDisp); } while (0)
     33#  define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp)   do { a_u32Disp = 0; RT_NOREF(a_offDisp); } while (0)
     34#  define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp)  do { a_u64Disp = 0; RT_NOREF(a_offDisp); } while (0)
     35#  define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp)   do { a_u64Disp = 0; RT_NOREF(a_offDisp); } while (0)
     36#  if 1
     37#   error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
     38#  endif
    3739# else  /* !IEM_WITH_CODE_TLB */
    3840#  define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r94423 r94712  
    7575
    7676
    77 //#define IEM_WITH_CODE_TLB// - work in progress
     77//#define IEM_WITH_CODE_TLB // - work in progress
     78//#define IEM_WITH_DATA_TLB // - incomplete in progress
    7879
    7980
     
    212213
    213214        ; Check access.
    214         movsx   rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
     215        mov     rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
    215216        and     rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
    216217        cmp     rax, [uTlbPhysRev]
     
    239240{
    240241    /** The TLB entry tag.
    241      * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits.
     242     * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
     243     * is ASSUMING a virtual address width of 48 bits.
     244     *
    242245     * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
    243246     *
     
    248251     * @note    Try use SHRD instruction?  After seeing
    249252     *          https://gmplib.org/~tege/x86-timing.pdf, maybe not.
     253     *
     254     * @todo    This will need to be reorganized for 57-bit wide virtual address and
     255     *          PCID (currently 12 bits) and ASID (currently 6 bits) support.  We'll
     256     *          have to move the TLB entry versioning entirely to the
     257     *          fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
     258     *          19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
     259     *          consumed by PCID and ASID (12 + 6 = 18).
    250260     */
    251261    uint64_t                uTag;
     
    290300#define IEMTLBE_F_PG_NO_WRITE       RT_BIT_64(3) /**< Phys page:   Not writable (access handler, ROM, whatever). */
    291301#define IEMTLBE_F_PG_NO_READ        RT_BIT_64(4) /**< Phys page:   Not readable (MMIO / access handler, ROM) */
    292 #define IEMTLBE_F_PT_NO_DIRTY       RT_BIT_64(5) /**< Page tables: Not dirty (needs to be made dirty on write). */
    293 #define IEMTLBE_F_NO_MAPPINGR3      RT_BIT_64(6) /**< TLB entry:   The IEMTLBENTRY::pMappingR3 member is invalid. */
     302#define IEMTLBE_F_PT_NO_DIRTY       RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
     303#define IEMTLBE_F_NO_MAPPINGR3      RT_BIT_64(7) /**< TLB entry:   The IEMTLBENTRY::pMappingR3 member is invalid. */
    294304#define IEMTLBE_F_PHYS_REV          UINT64_C(0xffffffffffffff00) /**< Physical revision mask. */
    295305/** @} */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette