VirtualBox

Changeset 87451 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Jan 27, 2021 10:47:54 AM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
142469
Message:

VMM/HMR0A.asm: Moved the RESTORE_STATE_VMX macro out of the hmR0VmxStartVmTemplate macro to shut up warnings.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r87444 r87451  
    559559        ret
    560560ENDPROC VMXDispatchHostNmi
     561
     562
     563;;
     564; Common restore logic for success and error paths.  We duplicate this because we
     565; don't want to waste writing the VINF_SUCCESS return value to the stack in the
     566; regular code path.
     567;
     568; @param    1   Zero if regular return, non-zero if error return.  Controls label emission.
     569; @param    2   fLoadSaveGuestXcr0 value
     570; @param    3   The (CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY) + CPUMCTX_WSF_IBPB_EXIT value.
     571;               The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.
     572; @param    4   The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
     573;
     574; @note Important that this does not modify cbFrame or rsp.
     575%macro RESTORE_STATE_VMX 4
     576        ; Restore base and limit of the IDTR & GDTR.
     577 %ifndef VMX_SKIP_IDTR
     578        lidt    [rsp + cbFrame + frm_saved_idtr]
     579 %endif
     580 %ifndef VMX_SKIP_GDTR
     581        lgdt    [rsp + cbFrame + frm_saved_gdtr]
     582 %endif
     583
     584        ; Save the guest state and restore the non-volatile registers.  We use rax=pGstCtx here.
     585        mov     [rsp + cbFrame + frm_guest_rax], rax
     586        mov     rax, [rsp + cbFrame + frm_pGstCtx]
     587
     588        mov     qword [rax + CPUMCTX.ebp], rbp
     589        lea     rbp, [rsp + cbFrame]    ; re-establish the frame pointer as early as possible.
     590        mov     qword [rax + CPUMCTX.ecx], rcx
     591        mov     rcx, SPECTRE_FILLER
     592        mov     qword [rax + CPUMCTX.edx], rdx
     593        mov     rdx, [rbp + frm_guest_rax]
     594        mov     qword [rax + CPUMCTX.eax], rdx
     595        mov     rdx, rcx
     596        mov     qword [rax + CPUMCTX.r8],  r8
     597        mov     r8, rcx
     598        mov     qword [rax + CPUMCTX.r9],  r9
     599        mov     r9, rcx
     600        mov     qword [rax + CPUMCTX.r10], r10
     601        mov     r10, rcx
     602        mov     qword [rax + CPUMCTX.r11], r11
     603        mov     r11, rcx
     604        mov     qword [rax + CPUMCTX.esi], rsi
     605 %ifdef ASM_CALL64_MSC
     606        mov     rsi, [rbp + frm_saved_rsi]
     607 %else
     608        mov     rbx, rcx
     609 %endif
     610        mov     qword [rax + CPUMCTX.edi], rdi
     611 %ifdef ASM_CALL64_MSC
     612        mov     rdi, [rbp + frm_saved_rdi]
     613 %else
     614        mov     rbx, rcx
     615 %endif
     616        mov     qword [rax + CPUMCTX.ebx], rbx
     617        mov     rbx, [rbp + frm_saved_rbx]
     618        mov     qword [rax + CPUMCTX.r12], r12
     619        mov     r12,  [rbp + frm_saved_r12]
     620        mov     qword [rax + CPUMCTX.r13], r13
     621        mov     r13,  [rbp + frm_saved_r13]
     622        mov     qword [rax + CPUMCTX.r14], r14
     623        mov     r14,  [rbp + frm_saved_r14]
     624        mov     qword [rax + CPUMCTX.r15], r15
     625        mov     r15,  [rbp + frm_saved_r15]
     626
     627        mov     rdx, cr2
     628        mov     qword [rax + CPUMCTX.cr2], rdx
     629        mov     rdx, rcx
     630
     631 %if %4 != 0
     632        ; Save the context pointer in r8 for the SSE save/restore.
     633        mov     r8, rax
     634 %endif
     635
     636 %if %3 & CPUMCTX_WSF_IBPB_EXIT
     637        ; Fight spectre (trashes rax, rdx and rcx).
     638  %if %1 = 0 ; Skip this in failure branch (=> guru)
     639        mov     ecx, MSR_IA32_PRED_CMD
     640        mov     eax, MSR_IA32_PRED_CMD_F_IBPB
     641        xor     edx, edx
     642        wrmsr
     643  %endif
     644 %endif
     645
     646 %ifndef VMX_SKIP_TR
     647        ; Restore TSS selector; must mark it as not busy before using ltr!
     648        ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
     649  %ifndef VMX_SKIP_GDTR
     650        lgdt    [rbp + frm_saved_gdtr]
     651  %endif
     652        movzx   eax, word [rbp + frm_saved_tr]
     653        mov     ecx, eax
     654        and     eax, X86_SEL_MASK_OFF_RPL           ; mask away TI and RPL bits leaving only the descriptor offset
     655        add     rax, [rbp + frm_saved_gdtr + 2]     ; eax <- GDTR.address + descriptor offset
     656        and     dword [rax + 4], ~RT_BIT(9)         ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
     657        ltr     cx
     658 %endif
     659        movzx   edx, word [rbp + frm_saved_ldtr]
     660        test    edx, edx
     661        jz      %%skip_ldt_write
     662        lldt    dx
     663%%skip_ldt_write:
     664
     665 %if %1 != 0
     666.return_after_vmwrite_error:
     667 %endif
     668        ; Restore segment registers.
     669        ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
     670
     671 %if %2 != 0
     672        ; Restore the host XCR0.
     673        xor     ecx, ecx
     674        mov     eax, [rbp + frm_uHostXcr0]
     675        mov     edx, [rbp + frm_uHostXcr0 + 4]
     676        xsetbv
     677 %endif
     678%endmacro ; RESTORE_STATE_VMX
    561679
    562680
     
    867985ALIGNCODE(64)
    868986GLOBALNAME RT_CONCAT(hmR0VmxStartVmHostRIP,%1)
    869 
    870  ;;
    871  ; Common restore logic for success and error paths.  We duplicate this because we
    872  ; don't want to waste writing the VINF_SUCCESS return value to the stack in the
    873  ; regular code path.
    874  ;
    875  ; @param    1   Zero if regular return, non-zero if error return.  Controls label emission.
    876  ; @param    2   fLoadSaveGuestXcr0 value
    877  ; @param    3   The (CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY) + CPUMCTX_WSF_IBPB_EXIT value.
    878  ;               The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.
    879  ; @param    4   The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.
    880  ;
    881  ; @note Important that this does not modify cbFrame or rsp.
    882  %macro RESTORE_STATE_VMX 4
    883         ; Restore base and limit of the IDTR & GDTR.
    884   %ifndef VMX_SKIP_IDTR
    885         lidt    [rsp + cbFrame + frm_saved_idtr]
    886   %endif
    887   %ifndef VMX_SKIP_GDTR
    888         lgdt    [rsp + cbFrame + frm_saved_gdtr]
    889   %endif
    890 
    891         ; Save the guest state and restore the non-volatile registers.  We use rax=pGstCtx here.
    892         mov     [rsp + cbFrame + frm_guest_rax], rax
    893         mov     rax, [rsp + cbFrame + frm_pGstCtx]
    894 
    895         mov     qword [rax + CPUMCTX.ebp], rbp
    896         lea     rbp, [rsp + cbFrame]    ; re-establish the frame pointer as early as possible.
    897         mov     qword [rax + CPUMCTX.ecx], rcx
    898         mov     rcx, SPECTRE_FILLER
    899         mov     qword [rax + CPUMCTX.edx], rdx
    900         mov     rdx, [rbp + frm_guest_rax]
    901         mov     qword [rax + CPUMCTX.eax], rdx
    902         mov     rdx, rcx
    903         mov     qword [rax + CPUMCTX.r8],  r8
    904         mov     r8, rcx
    905         mov     qword [rax + CPUMCTX.r9],  r9
    906         mov     r9, rcx
    907         mov     qword [rax + CPUMCTX.r10], r10
    908         mov     r10, rcx
    909         mov     qword [rax + CPUMCTX.r11], r11
    910         mov     r11, rcx
    911         mov     qword [rax + CPUMCTX.esi], rsi
    912   %ifdef ASM_CALL64_MSC
    913         mov     rsi, [rbp + frm_saved_rsi]
    914   %else
    915         mov     rbx, rcx
    916   %endif
    917         mov     qword [rax + CPUMCTX.edi], rdi
    918   %ifdef ASM_CALL64_MSC
    919         mov     rdi, [rbp + frm_saved_rdi]
    920   %else
    921         mov     rbx, rcx
    922   %endif
    923         mov     qword [rax + CPUMCTX.ebx], rbx
    924         mov     rbx, [rbp + frm_saved_rbx]
    925         mov     qword [rax + CPUMCTX.r12], r12
    926         mov     r12,  [rbp + frm_saved_r12]
    927         mov     qword [rax + CPUMCTX.r13], r13
    928         mov     r13,  [rbp + frm_saved_r13]
    929         mov     qword [rax + CPUMCTX.r14], r14
    930         mov     r14,  [rbp + frm_saved_r14]
    931         mov     qword [rax + CPUMCTX.r15], r15
    932         mov     r15,  [rbp + frm_saved_r15]
    933 
    934         mov     rdx, cr2
    935         mov     qword [rax + CPUMCTX.cr2], rdx
    936         mov     rdx, rcx
    937 
    938   %if %4 != 0
    939         ; Save the context pointer in r8 for the SSE save/restore.
    940         mov     r8, rax
    941   %endif
    942 
    943   %if %3 & CPUMCTX_WSF_IBPB_EXIT
    944         ; Fight spectre (trashes rax, rdx and rcx).
    945    %if %1 = 0 ; Skip this in failure branch (=> guru)
    946         mov     ecx, MSR_IA32_PRED_CMD
    947         mov     eax, MSR_IA32_PRED_CMD_F_IBPB
    948         xor     edx, edx
    949         wrmsr
    950    %endif
    951   %endif
    952 
    953   %ifndef VMX_SKIP_TR
    954         ; Restore TSS selector; must mark it as not busy before using ltr!
    955         ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
    956    %ifndef VMX_SKIP_GDTR
    957         lgdt    [rbp + frm_saved_gdtr]
    958    %endif
    959         movzx   eax, word [rbp + frm_saved_tr]
    960         mov     ecx, eax
    961         and     eax, X86_SEL_MASK_OFF_RPL           ; mask away TI and RPL bits leaving only the descriptor offset
    962         add     rax, [rbp + frm_saved_gdtr + 2]     ; eax <- GDTR.address + descriptor offset
    963         and     dword [rax + 4], ~RT_BIT(9)         ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
    964         ltr     cx
    965   %endif
    966         movzx   edx, word [rbp + frm_saved_ldtr]
    967         test    edx, edx
    968         jz      %%skip_ldt_write
    969         lldt    dx
    970 %%skip_ldt_write:
    971 
    972   %if %1 != 0
    973 .return_after_vmwrite_error:
    974   %endif
    975         ; Restore segment registers.
    976         ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
    977 
    978   %if %2 != 0
    979         ; Restore the host XCR0.
    980         xor     ecx, ecx
    981         mov     eax, [rbp + frm_uHostXcr0]
    982         mov     edx, [rbp + frm_uHostXcr0 + 4]
    983         xsetbv
    984   %endif
    985  %endmacro ; RESTORE_STATE_VMX
    986 
    987 
    988987        RESTORE_STATE_VMX 0, %2, %3, %4
    989988        mov     eax, VINF_SUCCESS
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette