VirtualBox

Ignore:
Timestamp:
Jan 20, 2021 7:02:24 PM (4 years ago)
Author:
vboxsync
Message:

VMM/HMR0A.asm: Reduced the number of parameters passed to SVMR0VMRun. Required extending HMInternal.mac quite a bit.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r87321 r87330  
    192192; @param    1   How to address CPUMCTX.
    193193; @param    2   Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
     194%macro INDIRECT_BRANCH_PREDICTION_BARRIER_OLD 2
     195    test    byte [%1 + CPUMCTX.fWorldSwitcher], %2
     196    jz      %%no_indirect_branch_barrier
     197    mov     ecx, MSR_IA32_PRED_CMD
     198    mov     eax, MSR_IA32_PRED_CMD_F_IBPB
     199    xor     edx, edx
     200    wrmsr
     201%%no_indirect_branch_barrier:
     202%endmacro
     203
     204;;
     205; Creates an indirect branch prediction barrier on CPUs that need and supports that.
     206; @clobbers eax, edx, ecx
     207; @param    1   How to address VMCPU.
     208; @param    2   Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
    194209%macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
    195     test    byte [%1 + CPUMCTX.fWorldSwitcher], %2
     210    test    byte [%1 + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher], %2
    196211    jz      %%no_indirect_branch_barrier
    197212    mov     ecx, MSR_IA32_PRED_CMD
     
    575590; load the guest ones when necessary.
    576591;
    577 ; @cproto       DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
    578 ;                                              PFNHMSVMVMRUN pfnVMRun);
     592; @cproto       DECLASM(int) hmR0SVMRunWrapXMM(PVM pVM, PVMCPU pVCpu, RTHCPHYS HCPhysVmcb, PFNHMSVMVMRUN pfnVMRun);
    579593;
    580594; @returns      eax
    581595;
    582 ; @param        HCPhysVmcbHost  msc:rcx
    583 ; @param        HCPhysVmcb      msc:rdx
    584 ; @param        pCtx            msc:r8
    585 ; @param        pVM             msc:r9
    586 ; @param        pVCpu           msc:[rbp+30h]   The cross context virtual CPU structure of the calling EMT.
    587 ; @param        pfnVMRun        msc:[rbp+38h]
     596; @param        pVM             msc:rcx
     597; @param        pVCpu           msc:rdx        The cross context virtual CPU structure of the calling EMT.
     598; @param        HCPhysVmcb      msc:r8
     599; @param        pfnVMRun        msc:r9
    588600;
    589601; @remarks      This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
     
    596608;
    597609; ASSUMING 64-bit and windows for now.
    598 ALIGNCODE(16)
     610ALIGNCODE(64)
    599611BEGINPROC hmR0SVMRunWrapXMM
    600612        push    xBP
     
    602614        sub     xSP, 0b0h + 040h        ; don't bother optimizing the frame size
    603615
     616%ifndef ASM_CALL64_MSC
     617 %error "MSC only"
     618%endif
    604619        ; Spill input parameters.
    605         mov     [xBP + 010h], rcx       ; HCPhysVmcbHost
    606         mov     [xBP + 018h], rdx       ; HCPhysVmcb
    607         mov     [xBP + 020h], r8        ; pCtx
    608         mov     [xBP + 028h], r9        ; pVM
     620        mov     [xBP + 010h], rcx       ; pVM
     621        mov     [xBP + 018h], rdx       ; pVCpu
     622        mov     [xBP + 020h], r8        ; HCPhysVmcb
     623        mov     [xBP + 028h], r9        ; pfnVMRun
    609624
    610625        ; Ask CPUM whether we've started using the FPU yet.
    611         mov     rcx, [xBP + 30h]        ; pVCpu
     626;; @todo implement this in assembly, it's just checking a couple of things. Or have the C code do it.
     627        mov     rcx, rdx                ; pVCpu
    612628        call    NAME(CPUMIsGuestFPUStateActive)
    613629        test    al, al
     630
     631        mov     rcx, [xBP + 010h]       ; pVM
     632        mov     rdx, [xBP + 018h]       ; pVCpu
     633        mov     r8,  [xBP + 020h]       ; HCPhysVmcb
     634        mov     r9,  [xBP + 028h]       ; pfnVMRun
     635
    614636        jnz     .guest_fpu_state_active
    615637
    616638        ; No need to mess with XMM registers just call the start routine and return.
    617         mov     r11, [xBP + 38h]        ; pfnVMRun
    618         mov     r10, [xBP + 30h]        ; pVCpu
    619         mov     [xSP + 020h], r10
    620         mov     rcx, [xBP + 010h]       ; HCPhysVmcbHost
    621         mov     rdx, [xBP + 018h]       ; HCPhysVmcb
    622         mov     r8,  [xBP + 020h]       ; pCtx
    623         mov     r9,  [xBP + 028h]       ; pVM
    624         call    r11
     639        call    r9
    625640
    626641        leave
     
    630645.guest_fpu_state_active:
    631646        ; Save the non-volatile host XMM registers.
     647;; @todo change to rbp relative addressing as that saves a byte per instruction!
    632648        movdqa  [rsp + 040h + 000h], xmm6
    633649        movdqa  [rsp + 040h + 010h], xmm7
     
    642658        stmxcsr [rsp + 040h + 0a0h]
    643659
    644         mov     r10, [xBP + 020h]       ; pCtx
    645         mov     eax, [r10 + CPUMCTX.fXStateMask]
     660        mov     r11, rdx                ; r11 = pVCpu (rdx may get trashed)
     661        mov     eax, [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
    646662        test    eax, eax
    647663        jz      .guest_fpu_state_manually
     
    652668        and     eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
    653669        xor     edx, edx
    654         mov     r10, [r10 + CPUMCTX.pXStateR0]
     670        mov     r10, [r11 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
    655671        xrstor  [r10]
    656672
    657673        ; Make the call (same as in the other case).
    658         mov     r11, [xBP + 38h]        ; pfnVMRun
    659         mov     r10, [xBP + 30h]        ; pVCpu
    660         mov     [xSP + 020h], r10
    661         mov     rcx, [xBP + 010h]       ; HCPhysVmcbHost
    662         mov     rdx, [xBP + 018h]       ; HCPhysVmcb
    663         mov     r8,  [xBP + 020h]       ; pCtx
    664         mov     r9,  [xBP + 028h]       ; pVM
    665         call    r11
    666 
    667         mov     r11d, eax               ; save return value (xsave below uses eax)
     674        mov     rdx, r11                ; restore pVCpu to rdx
     675        call    r9
     676
     677        mov     r10d, eax               ; save return value (xsave below uses eax)
    668678
    669679        ; Save the guest XMM registers.
    670         mov     r10, [xBP + 020h]       ; pCtx
    671         mov     eax, [r10 + CPUMCTX.fXStateMask]
     680        mov     rcx, [xBP + 018h]       ; pVCpu
     681        mov     eax, [rcx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask]
    672682        and     eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
     683        mov     rcx, [rcx + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
    673684        xor     edx, edx
    674         mov     r10, [r10 + CPUMCTX.pXStateR0]
    675         xsave  [r10]
    676 
    677         mov     eax, r11d               ; restore return value
     685        xsave   [rcx]
     686
     687        mov     eax, r10d               ; restore return value
    678688
    679689.restore_non_volatile_host_xmm_regs:
    680690        ; Load the non-volatile host XMM registers.
     691;; @todo change to rbp relative addressing as that saves a byte per instruction!
    681692        movdqa  xmm6,  [rsp + 040h + 000h]
    682693        movdqa  xmm7,  [rsp + 040h + 010h]
     
    696707        ; No XSAVE, load and save the guest XMM registers manually.
    697708        ;
     709ALIGNCODE(8)
    698710.guest_fpu_state_manually:
    699711        ; Load the full guest XMM register state.
    700         mov     r10, [r10 + CPUMCTX.pXStateR0]
    701         movdqa  xmm0,  [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
    702         movdqa  xmm1,  [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
    703         movdqa  xmm2,  [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
    704         movdqa  xmm3,  [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
    705         movdqa  xmm4,  [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
    706         movdqa  xmm5,  [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
    707         movdqa  xmm6,  [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
    708         movdqa  xmm7,  [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
    709         movdqa  xmm8,  [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
    710         movdqa  xmm9,  [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
    711         movdqa  xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
    712         movdqa  xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
    713         movdqa  xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
    714         movdqa  xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
    715         movdqa  xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
    716         movdqa  xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
    717         ldmxcsr        [r10 + X86FXSTATE.MXCSR]
     712        mov     rdx, [r11 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
     713        movdqa  xmm0,  [rdx + XMM_OFF_IN_X86FXSTATE + 000h]
     714        movdqa  xmm1,  [rdx + XMM_OFF_IN_X86FXSTATE + 010h]
     715        movdqa  xmm2,  [rdx + XMM_OFF_IN_X86FXSTATE + 020h]
     716        movdqa  xmm3,  [rdx + XMM_OFF_IN_X86FXSTATE + 030h]
     717        movdqa  xmm4,  [rdx + XMM_OFF_IN_X86FXSTATE + 040h]
     718        movdqa  xmm5,  [rdx + XMM_OFF_IN_X86FXSTATE + 050h]
     719        movdqa  xmm6,  [rdx + XMM_OFF_IN_X86FXSTATE + 060h]
     720        movdqa  xmm7,  [rdx + XMM_OFF_IN_X86FXSTATE + 070h]
     721        movdqa  xmm8,  [rdx + XMM_OFF_IN_X86FXSTATE + 080h]
     722        movdqa  xmm9,  [rdx + XMM_OFF_IN_X86FXSTATE + 090h]
     723        movdqa  xmm10, [rdx + XMM_OFF_IN_X86FXSTATE + 0a0h]
     724        movdqa  xmm11, [rdx + XMM_OFF_IN_X86FXSTATE + 0b0h]
     725        movdqa  xmm12, [rdx + XMM_OFF_IN_X86FXSTATE + 0c0h]
     726        movdqa  xmm13, [rdx + XMM_OFF_IN_X86FXSTATE + 0d0h]
     727        movdqa  xmm14, [rdx + XMM_OFF_IN_X86FXSTATE + 0e0h]
     728        movdqa  xmm15, [rdx + XMM_OFF_IN_X86FXSTATE + 0f0h]
     729        ldmxcsr        [rdx + X86FXSTATE.MXCSR]
    718730
    719731        ; Make the call (same as in the other case).
    720         mov     r11, [xBP + 38h]        ; pfnVMRun
    721         mov     r10, [xBP + 30h]        ; pVCpu
    722         mov     [xSP + 020h], r10
    723         mov     rcx, [xBP + 010h]       ; HCPhysVmcbHost
    724         mov     rdx, [xBP + 018h]       ; HCPhysVmcb
    725         mov     r8,  [xBP + 020h]       ; pCtx
    726         mov     r9,  [xBP + 028h]       ; pVM
    727         call    r11
     732        mov     rdx, r11                ; restore pVCpu to rdx
     733        call    r9
    728734
    729735        ; Save the guest XMM registers.
    730         mov     r10, [xBP + 020h]       ; pCtx
    731         mov     r10, [r10 + CPUMCTX.pXStateR0]
    732         stmxcsr [r10 + X86FXSTATE.MXCSR]
    733         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
    734         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
    735         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
    736         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
    737         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
    738         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
    739         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
    740         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
    741         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
    742         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
    743         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
    744         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
    745         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
    746         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
    747         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
    748         movdqa  [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
     736        mov     rdx, [xBP + 018h]       ; pVCpu
     737        mov     rdx, [rdx + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0]
     738        stmxcsr [rdx + X86FXSTATE.MXCSR]
     739        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
     740        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
     741        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
     742        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
     743        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
     744        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
     745        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
     746        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
     747        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
     748        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
     749        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
     750        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
     751        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
     752        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
     753        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
     754        movdqa  [rdx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
    749755        jmp     .restore_non_volatile_host_xmm_regs
    750756ENDPROC   hmR0SVMRunWrapXMM
     
    810816
    811817    ; Fight spectre.
    812     INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT
     818    INDIRECT_BRANCH_PREDICTION_BARRIER_OLD xDI, CPUMCTX_WSF_IBPB_EXIT
    813819
    814820 %ifndef VMX_SKIP_TR
     
    10401046;
    10411047; @returns  VBox status code
    1042 ; @param    HCPhysVmcbHost  msc:rcx,gcc:rdi     Physical address of host VMCB.
    1043 ; @param    HCPhysVmcb      msc:rdx,gcc:rsi     Physical address of guest VMCB.
    1044 ; @param    pCtx            msc:r8,gcc:rdx      Pointer to the guest-CPU context.
    1045 ; @param    pVM             msc:r9,gcc:rcx      The cross context VM structure.
    1046 ; @param    pVCpu           msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
    1047 ;
    1048 ALIGNCODE(16)
     1048; @param    pVM             msc:rcx,gcc:rdi     The cross context VM structure (unused).
     1049; @param    pVCpu           msc:rdx,gcc:rsi     The cross context virtual CPU structure of the calling EMT.
     1050; @param    HCPhysVmcb      msc:r8, gcc:rdx     Physical address of guest VMCB.
     1051;
     1052ALIGNCODE(64)
    10491053BEGINPROC SVMR0VMRun
    1050     ; Fake a cdecl stack frame
    1051  %ifdef ASM_CALL64_GCC
    1052     push    r8                ; pVCpu
    1053     push    rcx               ; pVM
    1054     push    rdx               ; pCtx
    1055     push    rsi               ; HCPhysVmcb
    1056     push    rdi               ; HCPhysVmcbHost
    1057  %else
    1058     mov     rax, [rsp + 28h]
    1059     push    rax               ; rbp + 30h pVCpu
    1060     push    r9                ; rbp + 28h pVM
    1061     push    r8                ; rbp + 20h pCtx
    1062     push    rdx               ; rbp + 18h HCPhysVmcb
    1063     push    rcx               ; rbp + 10h HCPhysVmcbHost
    1064  %endif
    1065     push    0                 ; rbp + 08h "fake ret addr"
    1066     push    rbp               ; rbp + 00h
     1054    push    rbp
    10671055    mov     rbp, rsp
    10681056    pushf
     
    10801068    PUSH_CALLEE_PRESERVED_REGISTERS
    10811069
    1082     ; Load pCtx into xSI.
    1083     mov     xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
     1070    ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu.  (rdx & rcx will soon be trashed.)
     1071%ifdef ASM_CALL64_GCC
     1072    mov     r8, rdx                         ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below.
     1073%else
     1074    mov     rsi, rdx                        ; Put pVCpu in rsi like on GCC as rdx is trashed below.
     1075    ;mov     rdi, rcx                        ; Put pVM in rdi like on GCC as rcx is trashed below.
     1076%endif
    10841077
    10851078    ; Save the host XCR0 and load the guest one if necessary.
    1086     mov     rax, [xBP + 30h]                ; pVCpu
    1087     test    byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
     1079    test    byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
    10881080    jz      .xcr0_before_skip
    10891081
    10901082    xor     ecx, ecx
    10911083    xgetbv                                  ; save the host XCR0 on the stack
    1092     push    xDX
    1093     push    xAX
    1094 
    1095     mov     xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2]  ; pCtx
    1096     mov     eax, [xSI + CPUMCTX.aXcr]       ; load the guest XCR0
    1097     mov     edx, [xSI + CPUMCTX.aXcr + 4]
     1084    push    rdx
     1085    push    rax
     1086
     1087    mov     eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr] ; load the guest XCR0
     1088    mov     edx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr + 4]
    10981089    xor     ecx, ecx                        ; paranoia
    10991090    xsetbv
     
    11061097.xcr0_before_done:
    11071098
    1108     ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
     1099    ; Save pVCpu pointer for simplifying saving of the GPRs afterwards.
    11091100    push    rsi
    11101101
    11111102    ; Save host fs, gs, sysenter msr etc.
    1112     mov     rax, [rbp + xCB * 2]            ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
     1103    mov     rax, [rsi + VMCPU.hm + HMCPU.u + HMCPUSVM.HCPhysVmcbHost]
    11131104    push    rax                             ; save for the vmload after vmrun
    11141105    vmsave
    11151106
    1116     ; Fight spectre.
    1117     INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
     1107    ; Fight spectre (trashes rax, rdx and rcx).
     1108    INDIRECT_BRANCH_PREDICTION_BARRIER rsi, CPUMCTX_WSF_IBPB_ENTRY
    11181109
    11191110    ; Setup rax for VMLOAD.
    1120     mov     rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only)
     1111    mov     rax, r8                        ; HCPhysVmcb (64 bits physical address; take low dword only)
    11211112
    11221113    ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
    1123     mov     rbx, qword [xSI + CPUMCTX.ebx]
    1124     mov     rcx, qword [xSI + CPUMCTX.ecx]
    1125     mov     rdx, qword [xSI + CPUMCTX.edx]
    1126     mov     rdi, qword [xSI + CPUMCTX.edi]
    1127     mov     rbp, qword [xSI + CPUMCTX.ebp]
    1128     mov     r8,  qword [xSI + CPUMCTX.r8]
    1129     mov     r9,  qword [xSI + CPUMCTX.r9]
    1130     mov     r10, qword [xSI + CPUMCTX.r10]
    1131     mov     r11, qword [xSI + CPUMCTX.r11]
    1132     mov     r12, qword [xSI + CPUMCTX.r12]
    1133     mov     r13, qword [xSI + CPUMCTX.r13]
    1134     mov     r14, qword [xSI + CPUMCTX.r14]
    1135     mov     r15, qword [xSI + CPUMCTX.r15]
    1136     mov     rsi, qword [xSI + CPUMCTX.esi]
     1114    mov     rbx, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.ebx]
     1115    mov     rcx, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.ecx]
     1116    mov     rdx, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.edx]
     1117    mov     rdi, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.edi]
     1118    mov     rbp, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.ebp]
     1119    mov     r8,  qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r8]
     1120    mov     r9,  qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r9]
     1121    mov     r10, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r10]
     1122    mov     r11, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r11]
     1123    mov     r12, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r12]
     1124    mov     r13, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r13]
     1125    mov     r14, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r14]
     1126    mov     r15, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r15]
     1127    mov     rsi, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.esi]
    11371128
    11381129    ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
     
    11571148    stgi
    11581149
    1159     ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
     1150    ; Pop pVCpu (pushed above) and save the guest GPRs (sans RSP and RAX).
    11601151    pop     rax
    11611152
    1162     mov     qword [rax + CPUMCTX.ebx], rbx
     1153    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.ebx], rbx
    11631154    mov     rbx, SPECTRE_FILLER
    1164     mov     qword [rax + CPUMCTX.ecx], rcx
     1155    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.ecx], rcx
    11651156    mov     rcx, rbx
    1166     mov     qword [rax + CPUMCTX.edx], rdx
     1157    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.edx], rdx
    11671158    mov     rdx, rbx
    1168     mov     qword [rax + CPUMCTX.esi], rsi
     1159    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.esi], rsi
    11691160    mov     rsi, rbx
    1170     mov     qword [rax + CPUMCTX.edi], rdi
     1161    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.edi], rdi
    11711162    mov     rdi, rbx
    1172     mov     qword [rax + CPUMCTX.ebp], rbp
     1163    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.ebp], rbp
    11731164    mov     rbp, rbx
    1174     mov     qword [rax + CPUMCTX.r8],  r8
     1165    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r8],  r8
    11751166    mov     r8, rbx
    1176     mov     qword [rax + CPUMCTX.r9],  r9
     1167    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r9],  r9
    11771168    mov     r9, rbx
    1178     mov     qword [rax + CPUMCTX.r10], r10
     1169    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r10], r10
    11791170    mov     r10, rbx
    1180     mov     qword [rax + CPUMCTX.r11], r11
     1171    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r11], r11
    11811172    mov     r11, rbx
    1182     mov     qword [rax + CPUMCTX.r12], r12
     1173    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r12], r12
    11831174    mov     r12, rbx
    1184     mov     qword [rax + CPUMCTX.r13], r13
     1175    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r13], r13
    11851176    mov     r13, rbx
    1186     mov     qword [rax + CPUMCTX.r14], r14
     1177    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r14], r14
    11871178    mov     r14, rbx
    1188     mov     qword [rax + CPUMCTX.r15], r15
     1179    mov     qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r15], r15
    11891180    mov     r15, rbx
    11901181
    1191     ; Fight spectre.  Note! Trashes rax!
     1182    ; Fight spectre.  Note! Trashes rax, rdx and rcx!
    11921183    INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT
    11931184
    11941185    ; Restore the host xcr0 if necessary.
    1195     pop     xCX
     1186    pop     rcx
    11961187    test    ecx, ecx
    11971188    jnz     .xcr0_after_skip
    1198     pop     xAX
    1199     pop     xDX
     1189    pop     rax
     1190    pop     rdx
    12001191    xsetbv                              ; ecx is already zero
    12011192.xcr0_after_skip:
     
    12071198
    12081199    popf
    1209     pop     rbp
    1210     add     rsp, 6 * xCB
     1200    pop     rbp                         ; Do not use leave! rbp is trashed.
    12111201    ret
    12121202ENDPROC SVMR0VMRun
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette