VirtualBox

Changeset 91283 in vbox


Ignore:
Timestamp:
Sep 16, 2021 1:58:36 PM (3 years ago)
Author:
vboxsync
Message:

VMM/CPUM: Moved the host's extended state (XState) from the hyper heap and into CPUMCTX. bugref:10093

Location:
trunk
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/vm.h

    r91281 r91283  
    305305        CPUMCTX             GstCtx;
    306306#endif
    307         uint8_t             padding[20480];      /* multiple of 4096 */
     307        uint8_t             padding[36864];      /* multiple of 4096 */
    308308    } cpum;
    309309
  • trunk/include/VBox/vmm/vm.mac

    r91281 r91283  
    9191    .pgm                    resb 4096+28672
    9292    alignb 4096
    93     .cpum                   resb 20480
     93    .cpum                   resb 36864
    9494%define VMCPU.cpum.GstCtx   VMCPU.cpum
    9595    alignb 4096
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r87361 r91283  
    186186{
    187187    LogFlow(("CPUMR0Init: %p\n", pVM));
     188    AssertCompile(sizeof(pVM->aCpus[0].cpum.s.Host.abXState) >= sizeof(pVM->aCpus[0].cpum.s.Guest.abXState));
    188189
    189190    /*
  • trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm

    r91281 r91283  
    144144%ifdef VBOX_WITH_KERNEL_USING_XMM
    145145        ; If we didn't save the host state, we must save the non-volatile XMM registers.
    146         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
     146        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    147147        stmxcsr [pXState + X86FXSTATE.MXCSR]
    148148        movdqa  [pXState + X86FXSTATE.xmm6 ], xmm6
     
    166166%ifdef VBOX_WITH_KERNEL_USING_XMM
    167167        ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
    168         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
     168        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    169169        movdqa  xmm6,  [pXState + X86FXSTATE.xmm6]
    170170        movdqa  xmm7,  [pXState + X86FXSTATE.xmm7]
     
    242242        ; them while saving the guest state (we've gotta do this anyway).
    243243        ;
    244         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
     244        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    245245        stmxcsr [pXState + X86FXSTATE.MXCSR]
    246246        movdqa  [pXState + X86FXSTATE.xmm6], xmm6
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r91281 r91283  
    22812281
    22822282    /*
    2283      * Allocate memory for the extended CPU state and initialize the host XSAVE/XRSTOR mask.
     2283     * Initialize the host XSAVE/XRSTOR mask.
    22842284     */
    22852285    uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.cbMaxExtendedState;
    22862286    cbMaxXState = RT_ALIGN(cbMaxXState, 128);
    2287     AssertLogRelReturn(cbMaxXState >= sizeof(X86FXSTATE) && cbMaxXState <= _8K, VERR_CPUM_IPE_2);
    2288 
    2289     uint8_t *pbXStates;
    2290     rc = MMR3HyperAllocOnceNoRelEx(pVM, cbMaxXState * pVM->cCpus, PAGE_SIZE, MM_TAG_CPUM_CTX,
    2291                                    MMHYPER_AONR_FLAGS_KERNEL_MAPPING, (void **)&pbXStates);
    2292     AssertLogRelRCReturn(rc, rc);
     2287    AssertLogRelReturn(   pVM->cpum.s.HostFeatures.cbMaxExtendedState >= sizeof(X86FXSTATE)
     2288                       && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.XState)
     2289                       && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.XState)
     2290                       , VERR_CPUM_IPE_2);
    22932291
    22942292    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    22962294        PVMCPU pVCpu = pVM->apCpusR3[i];
    22972295
    2298         pVCpu->cpum.s.Host.pXStateR3  = (PX86XSAVEAREA)pbXStates;
    2299         pVCpu->cpum.s.Host.pXStateR0  = MMHyperR3ToR0(pVM, pbXStates);
    2300         pbXStates += cbMaxXState;
    2301 
    2302         pVCpu->cpum.s.Host.fXStateMask = fXStateHostMask;
    2303 
     2296        pVCpu->cpum.s.Host.fXStateMask       = fXStateHostMask;
    23042297        pVCpu->cpum.s.hNestedVmxPreemptTimer = NIL_TMTIMERHANDLE;
    23052298    }
  • trunk/src/VBox/VMM/VMMRZ/CPUMRZA.asm

    r91281 r91283  
    159159
    160160        ; Save caller's XMM registers.
    161         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
     161        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    162162        movdqa  [pXState + X86FXSTATE.xmm6 ], xmm6
    163163        movdqa  [pXState + X86FXSTATE.xmm7 ], xmm7
     
    195195
    196196        ; Restore caller's XMM registers.
    197         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
     197        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    198198        movdqa  xmm6,  [pXState + X86FXSTATE.xmm6 ]
    199199        movdqa  xmm7,  [pXState + X86FXSTATE.xmm7 ]
     
    249249%ifndef VBOX_WITH_KERNEL_USING_XMM
    250250        ;
    251         ; Load xCX with the guest pXStateR0.
     251        ; Load xCX with the guest pXState.
    252252        ;
    253253 %ifdef ASM_CALL64_GCC
     
    323323
    324324        ;
    325         ; Load xCX with the guest pXStateR0.
     325        ; Load xCX with the guest pXState.
    326326        ;
    327327%ifdef ASM_CALL64_GCC
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r91271 r91283  
    203203typedef struct CPUMHOSTCTX
    204204{
     205    /** The extended state (FPU/SSE/AVX/AVX-2/XXXX). Must be aligned on 64 bytes. */
     206    union /* no tag */
     207    {
     208        X86XSAVEAREA    XState;
     209        /** Byte view for simple indexing and space allocation.
     210         * @note Must match or exceed the size of CPUMCTX::abXState. */
     211        uint8_t         abXState[0x4000 - 0x300];
     212    } CPUM_UNION_NM(u);
     213
    205214    /** General purpose register, selectors, flags and more
    206215     * @{ */
     
    284293    /** @} */
    285294
     295    /** The XCR0 register. */
     296    uint64_t        xcr0;
     297    /** The mask to pass to XSAVE/XRSTOR in EDX:EAX.  If zero we use
     298     *  FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
     299    uint64_t        fXStateMask;
     300
    286301    /* padding to get 64byte aligned size */
    287     uint8_t         auPadding[8];
    288 
     302    uint8_t         auPadding[24];
    289303#if HC_ARCH_BITS != 64
    290304# error HC_ARCH_BITS not defined or unsupported
    291305#endif
    292 
    293     /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */
    294     R0PTRTYPE(PX86XSAVEAREA)    pXStateR0;
    295     /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */
    296     R3PTRTYPE(PX86XSAVEAREA)    pXStateR3;
    297     /** The XCR0 register. */
    298     uint64_t                    xcr0;
    299     /** The mask to pass to XSAVE/XRSTOR in EDX:EAX.  If zero we use
    300      *  FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
    301     uint64_t                    fXStateMask;
    302306} CPUMHOSTCTX;
    303307#ifndef VBOX_FOR_DTRACE_LIB
     
    481485#endif
    482486} CPUMCPU;
     487#ifndef VBOX_FOR_DTRACE_LIB
     488AssertCompileMemberAlignment(CPUMCPU, Host, 64);
     489#endif
    483490/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
    484491typedef CPUMCPU *PCPUMCPU;
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r91281 r91283  
    286286    alignb 64
    287287    .Host                resb    0
     288    .Host.abXState          resb    0x4000-0x300
     289    .Host.XState            EQU     .Host.abXState
    288290    ;.Host.rax            resq    1 - scratch
    289291    .Host.rbx            resq    1
     
    346348    .Host.GSbase         resq    1
    347349    .Host.efer           resq    1
    348     .Host.auPadding      resb    4
    349     alignb RTR0PTR_CB
    350     .Host.pXStateR0 RTR0PTR_RES  1
    351     .Host.pXStateR3 RTR3PTR_RES  1
    352350    alignb 8
    353351    .Host.xcr0           resq    1
     
    478476        ; Load a couple of registers we'll use later in all branches.
    479477        ;
    480  %ifdef IN_RING0
    481         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
    482  %else
    483   %error "Unsupported context!"
    484  %endif
     478        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    485479        mov     eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
    486480
     
    523517        ; Load a couple of registers we'll use later in all branches.
    524518        ;
    525  %ifdef IN_RING0
    526         mov     pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
    527  %else
    528   %error "Unsupported context!"
    529  %endif
     519        lea     pXState, [pCpumCpu + CPUMCPU.Host.XState]
    530520        mov     eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
    531521
     
    679669        ; Load a couple of registers we'll use later in all branches.
    680670        ;
    681  %ifdef IN_RING0
    682671        lea     pXState, [pCpumCpu + CPUMCPU.Guest.XState]
    683  %else
    684   %error "Unsupported context!"
    685  %endif
    686672        mov     eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
    687673
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r91281 r91283  
    5757
    5858    GEN_CHECK_SIZE(CPUMHOSTCTX);
    59     GEN_CHECK_OFF(CPUMHOSTCTX, pXStateR3);
    60     GEN_CHECK_OFF(CPUMHOSTCTX, pXStateR0);
     59    GEN_CHECK_OFF(CPUMHOSTCTX, XState);
     60    GEN_CHECK_OFF(CPUMHOSTCTX, abXState);
    6161#if HC_ARCH_BITS == 64
    6262    GEN_CHECK_OFF(CPUMHOSTCTX, rbx);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette