Changeset 87451 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jan 27, 2021 10:47:54 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142469
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87444 r87451 559 559 ret 560 560 ENDPROC VMXDispatchHostNmi 561 562 563 ;; 564 ; Common restore logic for success and error paths. We duplicate this because we 565 ; don't want to waste writing the VINF_SUCCESS return value to the stack in the 566 ; regular code path. 567 ; 568 ; @param 1 Zero if regular return, non-zero if error return. Controls label emission. 569 ; @param 2 fLoadSaveGuestXcr0 value 570 ; @param 3 The (CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY) + CPUMCTX_WSF_IBPB_EXIT value. 571 ; The entry values are either all set or not at all, as we're too lazy to flesh out all the variants. 572 ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor. 573 ; 574 ; @note Important that this does not modify cbFrame or rsp. 575 %macro RESTORE_STATE_VMX 4 576 ; Restore base and limit of the IDTR & GDTR. 577 %ifndef VMX_SKIP_IDTR 578 lidt [rsp + cbFrame + frm_saved_idtr] 579 %endif 580 %ifndef VMX_SKIP_GDTR 581 lgdt [rsp + cbFrame + frm_saved_gdtr] 582 %endif 583 584 ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here. 585 mov [rsp + cbFrame + frm_guest_rax], rax 586 mov rax, [rsp + cbFrame + frm_pGstCtx] 587 588 mov qword [rax + CPUMCTX.ebp], rbp 589 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible. 590 mov qword [rax + CPUMCTX.ecx], rcx 591 mov rcx, SPECTRE_FILLER 592 mov qword [rax + CPUMCTX.edx], rdx 593 mov rdx, [rbp + frm_guest_rax] 594 mov qword [rax + CPUMCTX.eax], rdx 595 mov rdx, rcx 596 mov qword [rax + CPUMCTX.r8], r8 597 mov r8, rcx 598 mov qword [rax + CPUMCTX.r9], r9 599 mov r9, rcx 600 mov qword [rax + CPUMCTX.r10], r10 601 mov r10, rcx 602 mov qword [rax + CPUMCTX.r11], r11 603 mov r11, rcx 604 mov qword [rax + CPUMCTX.esi], rsi 605 %ifdef ASM_CALL64_MSC 606 mov rsi, [rbp + frm_saved_rsi] 607 %else 608 mov rbx, rcx 609 %endif 610 mov qword [rax + CPUMCTX.edi], rdi 611 %ifdef ASM_CALL64_MSC 612 mov rdi, [rbp + frm_saved_rdi] 613 %else 614 mov rbx, rcx 615 %endif 616 mov qword [rax + CPUMCTX.ebx], rbx 617 mov rbx, [rbp + frm_saved_rbx] 618 mov qword [rax + CPUMCTX.r12], r12 619 mov r12, [rbp + frm_saved_r12] 620 mov qword [rax + CPUMCTX.r13], r13 621 mov r13, [rbp + frm_saved_r13] 622 mov qword [rax + CPUMCTX.r14], r14 623 mov r14, [rbp + frm_saved_r14] 624 mov qword [rax + CPUMCTX.r15], r15 625 mov r15, [rbp + frm_saved_r15] 626 627 mov rdx, cr2 628 mov qword [rax + CPUMCTX.cr2], rdx 629 mov rdx, rcx 630 631 %if %4 != 0 632 ; Save the context pointer in r8 for the SSE save/restore. 633 mov r8, rax 634 %endif 635 636 %if %3 & CPUMCTX_WSF_IBPB_EXIT 637 ; Fight spectre (trashes rax, rdx and rcx). 638 %if %1 = 0 ; Skip this in failure branch (=> guru) 639 mov ecx, MSR_IA32_PRED_CMD 640 mov eax, MSR_IA32_PRED_CMD_F_IBPB 641 xor edx, edx 642 wrmsr 643 %endif 644 %endif 645 646 %ifndef VMX_SKIP_TR 647 ; Restore TSS selector; must mark it as not busy before using ltr! 648 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p). 649 %ifndef VMX_SKIP_GDTR 650 lgdt [rbp + frm_saved_gdtr] 651 %endif 652 movzx eax, word [rbp + frm_saved_tr] 653 mov ecx, eax 654 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset 655 add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset 656 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 657 ltr cx 658 %endif 659 movzx edx, word [rbp + frm_saved_ldtr] 660 test edx, edx 661 jz %%skip_ldt_write 662 lldt dx 663 %%skip_ldt_write: 664 665 %if %1 != 0 666 .return_after_vmwrite_error: 667 %endif 668 ; Restore segment registers. 669 ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken. 670 671 %if %2 != 0 672 ; Restore the host XCR0. 673 xor ecx, ecx 674 mov eax, [rbp + frm_uHostXcr0] 675 mov edx, [rbp + frm_uHostXcr0 + 4] 676 xsetbv 677 %endif 678 %endmacro ; RESTORE_STATE_VMX 561 679 562 680 … … 867 985 ALIGNCODE(64) 868 986 GLOBALNAME RT_CONCAT(hmR0VmxStartVmHostRIP,%1) 869 870 ;;871 ; Common restore logic for success and error paths. We duplicate this because we872 ; don't want to waste writing the VINF_SUCCESS return value to the stack in the873 ; regular code path.874 ;875 ; @param 1 Zero if regular return, non-zero if error return. Controls label emission.876 ; @param 2 fLoadSaveGuestXcr0 value877 ; @param 3 The (CPUMCTX_WSF_IBPB_ENTRY | CPUMCTX_WSF_L1D_ENTRY | CPUMCTX_WSF_MDS_ENTRY) + CPUMCTX_WSF_IBPB_EXIT value.878 ; The entry values are either all set or not at all, as we're too lazy to flesh out all the variants.879 ; @param 4 The SSE saving/restoring: 0 to do nothing, 1 to do it manually, 2 to use xsave/xrstor.880 ;881 ; @note Important that this does not modify cbFrame or rsp.882 %macro RESTORE_STATE_VMX 4883 ; Restore base and limit of the IDTR & GDTR.884 %ifndef VMX_SKIP_IDTR885 lidt [rsp + cbFrame + frm_saved_idtr]886 %endif887 %ifndef VMX_SKIP_GDTR888 lgdt [rsp + cbFrame + frm_saved_gdtr]889 %endif890 891 ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here.892 mov [rsp + cbFrame + frm_guest_rax], rax893 mov rax, [rsp + cbFrame + frm_pGstCtx]894 895 mov qword [rax + CPUMCTX.ebp], rbp896 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible.897 mov qword [rax + CPUMCTX.ecx], rcx898 mov rcx, SPECTRE_FILLER899 mov qword [rax + CPUMCTX.edx], rdx900 mov rdx, [rbp + frm_guest_rax]901 mov qword [rax + CPUMCTX.eax], rdx902 mov rdx, rcx903 mov qword [rax + CPUMCTX.r8], r8904 mov r8, rcx905 mov qword [rax + CPUMCTX.r9], r9906 mov r9, rcx907 mov qword [rax + CPUMCTX.r10], r10908 mov r10, rcx909 mov qword [rax + CPUMCTX.r11], r11910 mov r11, rcx911 mov qword [rax + CPUMCTX.esi], rsi912 %ifdef ASM_CALL64_MSC913 mov rsi, [rbp + frm_saved_rsi]914 %else915 mov rbx, rcx916 %endif917 mov qword [rax + CPUMCTX.edi], rdi918 %ifdef ASM_CALL64_MSC919 mov rdi, [rbp + frm_saved_rdi]920 %else921 mov rbx, rcx922 %endif923 mov qword [rax + CPUMCTX.ebx], rbx924 mov rbx, [rbp + frm_saved_rbx]925 mov qword [rax + CPUMCTX.r12], r12926 mov r12, [rbp + frm_saved_r12]927 mov qword [rax + CPUMCTX.r13], r13928 mov r13, [rbp + frm_saved_r13]929 mov qword [rax + CPUMCTX.r14], r14930 mov r14, [rbp + frm_saved_r14]931 mov qword [rax + CPUMCTX.r15], r15932 mov r15, [rbp + frm_saved_r15]933 934 mov rdx, cr2935 mov qword [rax + CPUMCTX.cr2], rdx936 mov rdx, rcx937 938 %if %4 != 0939 ; Save the context pointer in r8 for the SSE save/restore.940 mov r8, rax941 %endif942 943 %if %3 & CPUMCTX_WSF_IBPB_EXIT944 ; Fight spectre (trashes rax, rdx and rcx).945 %if %1 = 0 ; Skip this in failure branch (=> guru)946 mov ecx, MSR_IA32_PRED_CMD947 mov eax, MSR_IA32_PRED_CMD_F_IBPB948 xor edx, edx949 wrmsr950 %endif951 %endif952 953 %ifndef VMX_SKIP_TR954 ; Restore TSS selector; must mark it as not busy before using ltr!955 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).956 %ifndef VMX_SKIP_GDTR957 lgdt [rbp + frm_saved_gdtr]958 %endif959 movzx eax, word [rbp + frm_saved_tr]960 mov ecx, eax961 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset962 add rax, [rbp + frm_saved_gdtr + 2] ; eax <- GDTR.address + descriptor offset963 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)964 ltr cx965 %endif966 movzx edx, word [rbp + frm_saved_ldtr]967 test edx, edx968 jz %%skip_ldt_write969 lldt dx970 %%skip_ldt_write:971 972 %if %1 != 0973 .return_after_vmwrite_error:974 %endif975 ; Restore segment registers.976 ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.977 978 %if %2 != 0979 ; Restore the host XCR0.980 xor ecx, ecx981 mov eax, [rbp + frm_uHostXcr0]982 mov edx, [rbp + frm_uHostXcr0 + 4]983 xsetbv984 %endif985 %endmacro ; RESTORE_STATE_VMX986 987 988 987 RESTORE_STATE_VMX 0, %2, %3, %4 989 988 mov eax, VINF_SUCCESS
Note:
See TracChangeset
for help on using the changeset viewer.