VirtualBox

Changeset 61993 in vbox for trunk/src


Ignore:
Timestamp:
Jul 3, 2016 5:38:02 PM (8 years ago)
Author:
vboxsync
Message:

IEM: Early TLB plans.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r61968 r61993  
    225225
    226226#endif /* IEM_VERIFICATION_MODE_FULL */
     227
     228
     229/**
     230 * IEM TLB entry.
     231 *
     232 * Lookup assembly:
     233 * @code{.asm}
     234        ; Calculate tag.
     235        mov     rax, [VA]
     236        shl     rax, 16
     237        shr     rax, 16 + X86_PAGE_SHIFT
     238        or      rax, [uTlbRevision]
     239
     240        ; Do indexing.
     241        movzx   ecx, al
     242        lea     rcx, [pTlbEntries + rcx]
     243
     244        ; Check tag.
     245        cmp     [rcx + IEMTLBENTRY.uTag], rax
     246        jne     .TlbMiss
     247
     248        ; Check access.
     249        movsx   rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
     250        and     rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
     251        cmp     rax, [uTlbPhysRev]
     252        jne     .TlbMiss
     253
     254        ; Calc address and we're done.
     255        mov     eax, X86_PAGE_OFFSET_MASK
     256        and     eax, [VA]
     257        or      rax, [rcx + IEMTLBENTRY.pMappingR3]
     258    %ifdef VBOX_WITH_STATISTICS
     259        inc     qword [cTlbHits]
     260    %endif
     261        jmp     .Done
     262
     263    .TlbMiss:
     264        mov     r8d, ACCESS_FLAGS
     265        mov     rdx, [VA]
     266        mov     rcx, [pIemCpu]
     267        call    iemTlbTypeMiss
     268    .Done:
     269
     270   @endcode
     271 *
     272 */
     273typedef struct IEMTLBENTRY
     274{
     275    /** The TLB entry tag.
     276     * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits.
     277     * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
     278     *
     279     * The TLB lookup code uses the current TLB revision, which won't ever be zero,
     280     * enabling an extremely cheap TLB invalidation most of the time.  When the TLB
     281     * revision wraps around though, the tags needs to be zeroed.
     282     *
     283     * @note    Try use SHRD instruction?  After seeing
     284     *          https://gmplib.org/~tege/x86-timing.pdf, maybe not.
     285     */
     286    uint64_t                uTag;
     287    /** Access flags and physical TLB revision.
     288     *
     289     * - Bit  0 - page tables   - not executable (X86_PTE_PAE_NX).
     290     * - Bit  1 - page tables   - not writable (complemented X86_PTE_RW).
     291     * - Bit  2 - page tables   - not user (complemented X86_PTE_US).
     292     * - Bit  3 - pgm phys/virt - not directly writable.
     293     * - Bit  4 - pgm phys page - not directly readable.
     294     * - Bit  5 - tlb entry     - HCPhys member not valid.
     295     * - Bit  6 - page tables   - not dirty (complemented X86_PTE_D).
     296     * - Bit  7 - tlb entry     - pMappingR3 member not valid.
     297     * - Bits 63 thru 8 are used for the physical TLB revision number.
     298     *
     299     * We're using complemented bit meanings here because it makes it easy to check
     300     * whether special action is required.  For instance a user mode write access
     301     * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
     302     * non-zero result would mean special handling needed because either it wasn't
     303     * writable, or it wasn't user, or the page wasn't dirty.  A user mode read
     304     * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
     305     * need to check any PTE flag.
     306     */
     307    uint64_t                fFlagsAndPhysRev;
     308    /** The host physical page address (for raw-mode and maybe ring-0). */
     309    RTHCPHYS                HCPhys;
     310    /** Pointer to the ring-3 mapping. */
     311    R3PTRTYPE(uint8_t *)    pMappingR3;
     312#if HC_ARCH_BITS == 32
     313    uint32_t                u32Padding1;
     314#endif
     315} IEMTLBENTRY;
     316AssertCompileSize(IEMTLBENTRY, 32);
     317
     318
     319/**
     320 * An IEM TLB.
     321 *
     322 * We've got two of these, one for data and one for instructions.
     323 */
     324typedef struct IEMTLB
     325{
     326    /** The TLB entries.
     327     * We've choosen 256 because that way we can obtain the result directly from a
     328     * 8-bit register without an additional AND instruction. */
     329    IEMTLBENTRY         aEntries[256];
     330    /** The TLB revision.
     331     * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
     332     * by adding RT_BIT_64(36) to it.  When it wraps around and becomes zero, all
     333     * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
     334     * (The revision zero indicates an invalid TLB entry.)
     335     *
     336     * The initial value is choosen to cause an early wraparound. */
     337    uint64_t            uTlbRevision;
     338    /** The TLB physical address revision - shadow of PGM variable.
     339     * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
     340     * incremented by adding RT_BIT_64(8).  When it wraps around and becomes zero,
     341     * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3,
     342     * IEMTLBENTRY::HCPhys and bits 3, 4 and 8-63 in IEMTLBENTRY::fFlagsAndPhysRev.
     343     *
     344     * The initial value is choosen to cause an early wraparound. */
     345    uint64_t volatile   uTlbPhysRev;
     346
     347    /* Statistics: */
     348
     349    /** TLB hits (VBOX_WITH_STATISTICS only). */
     350    uint64_t            cTlbHits;
     351    /** TLB misses. */
     352    uint32_t            cTlbMisses;
     353    /** TLB misses because of tag mismatch. */
     354    uint32_t            cTlbMissesTag;
     355    /** TLB misses because of virtual access violation. */
     356    uint32_t            cTlbMissesVirtAccess;
     357    /** TLB misses because of dirty bit. */
     358    uint32_t            cTlbMissesDirty;
     359    /** TLB misses because of MMIO */
     360    uint32_t            cTlbMissesMmio;
     361    /** TLB misses because of write access handlers. */
     362    uint32_t            cTlbMissesWriteHandler;
     363    /** Alignment padding. */
     364    uint32_t            au32Padding[4];
     365} IEMTLB;
     366AssertCompileSizeAlignment(IEMTLB, 64);
    227367
    228368
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette