- Timestamp:
- Jan 19, 2021 11:39:19 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r87134 r87310 496 496 VMMR0/HMR0.cpp \ 497 497 VMMR0/HMR0A.asm \ 498 VMMR0/HMR0UtilA.asm \ 498 499 VMMR0/HMVMXR0.cpp \ 499 500 VMMR0/HMSVMR0.cpp \ -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r83067 r87310 27 27 %include "HMInternal.mac" 28 28 29 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. 30 %macro vmwrite 2, 31 int3 32 %endmacro 33 %define vmlaunch int3 34 %define vmresume int3 35 %define vmsave int3 36 %define vmload int3 37 %define vmrun int3 38 %define clgi int3 39 %define stgi int3 40 %macro invlpga 2, 41 int3 42 %endmacro 29 %ifndef RT_ARCH_AMD64 30 %error AMD64 only. 43 31 %endif 32 44 33 45 34 ;********************************************************************************************************************************* … … 447 436 448 437 449 ;;450 ; Executes VMWRITE, 64-bit value.451 ;452 ; @returns VBox status code.453 ; @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.454 ; @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.455 ;456 ALIGNCODE(16)457 BEGINPROC VMXWriteVmcs64458 %ifdef RT_ARCH_AMD64459 %ifdef ASM_CALL64_GCC460 and edi, 0ffffffffh461 xor rax, rax462 vmwrite rdi, rsi463 %else464 and ecx, 0ffffffffh465 xor rax, rax466 vmwrite rcx, rdx467 %endif468 %else ; RT_ARCH_X86469 mov ecx, [esp + 4] ; idxField470 lea edx, [esp + 8] ; &u64Data471 vmwrite ecx, [edx] ; low dword472 jz .done473 jc .done474 inc ecx475 xor eax, eax476 vmwrite ecx, [edx + 4] ; high dword477 .done:478 %endif ; RT_ARCH_X86479 jnc .valid_vmcs480 mov eax, VERR_VMX_INVALID_VMCS_PTR481 ret482 .valid_vmcs:483 jnz .the_end484 mov eax, VERR_VMX_INVALID_VMCS_FIELD485 .the_end:486 ret487 ENDPROC VMXWriteVmcs64488 489 490 ;;491 ; Executes VMREAD, 64-bit value.492 ;493 ; @returns VBox status code.494 ; @param idxField VMCS index.495 ; @param pData Where to store VM field value.496 ;497 ;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);498 ALIGNCODE(16)499 BEGINPROC VMXReadVmcs64500 %ifdef RT_ARCH_AMD64501 %ifdef ASM_CALL64_GCC502 and edi, 0ffffffffh503 xor rax, rax504 vmread [rsi], rdi505 %else506 and ecx, 0ffffffffh507 xor rax, rax508 vmread [rdx], rcx509 %endif510 %else ; RT_ARCH_X86511 mov ecx, [esp + 4] ; idxField512 mov edx, [esp + 8] ; pData513 vmread [edx], ecx ; low dword514 jz .done515 jc .done516 inc ecx517 xor eax, eax518 vmread [edx + 4], ecx ; high dword519 .done:520 %endif ; RT_ARCH_X86521 jnc .valid_vmcs522 mov eax, VERR_VMX_INVALID_VMCS_PTR523 ret524 .valid_vmcs:525 jnz .the_end526 mov eax, VERR_VMX_INVALID_VMCS_FIELD527 .the_end:528 ret529 ENDPROC VMXReadVmcs64530 531 532 ;;533 ; Executes VMREAD, 32-bit value.534 ;535 ; @returns VBox status code.536 ; @param idxField VMCS index.537 ; @param pu32Data Where to store VM field value.538 ;539 ;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);540 ALIGNCODE(16)541 BEGINPROC VMXReadVmcs32542 %ifdef RT_ARCH_AMD64543 %ifdef ASM_CALL64_GCC544 and edi, 0ffffffffh545 xor rax, rax546 vmread r10, rdi547 mov [rsi], r10d548 %else549 and ecx, 0ffffffffh550 xor rax, rax551 vmread r10, rcx552 mov [rdx], r10d553 %endif554 %else ; RT_ARCH_X86555 mov ecx, [esp + 4] ; idxField556 mov edx, [esp + 8] ; pu32Data557 xor eax, eax558 vmread [edx], ecx559 %endif ; RT_ARCH_X86560 jnc .valid_vmcs561 mov eax, VERR_VMX_INVALID_VMCS_PTR562 ret563 .valid_vmcs:564 jnz .the_end565 mov eax, VERR_VMX_INVALID_VMCS_FIELD566 .the_end:567 ret568 ENDPROC VMXReadVmcs32569 570 571 ;;572 ; Executes VMWRITE, 32-bit value.573 ;574 ; @returns VBox status code.575 ; @param idxField VMCS index.576 ; @param u32Data Where to store VM field value.577 ;578 ;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);579 ALIGNCODE(16)580 BEGINPROC VMXWriteVmcs32581 %ifdef RT_ARCH_AMD64582 %ifdef ASM_CALL64_GCC583 and edi, 0ffffffffh584 and esi, 0ffffffffh585 xor rax, rax586 vmwrite rdi, rsi587 %else588 and ecx, 0ffffffffh589 and edx, 0ffffffffh590 xor rax, rax591 vmwrite rcx, rdx592 %endif593 %else ; RT_ARCH_X86594 mov ecx, [esp + 4] ; idxField595 mov edx, [esp + 8] ; u32Data596 xor eax, eax597 vmwrite ecx, edx598 %endif ; RT_ARCH_X86599 jnc .valid_vmcs600 mov eax, VERR_VMX_INVALID_VMCS_PTR601 ret602 .valid_vmcs:603 jnz .the_end604 mov eax, VERR_VMX_INVALID_VMCS_FIELD605 .the_end:606 ret607 ENDPROC VMXWriteVmcs32608 609 610 ;;611 ; Executes VMXON.612 ;613 ; @returns VBox status code.614 ; @param HCPhysVMXOn Physical address of VMXON structure.615 ;616 ;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);617 BEGINPROC VMXEnable618 %ifdef RT_ARCH_AMD64619 xor rax, rax620 %ifdef ASM_CALL64_GCC621 push rdi622 %else623 push rcx624 %endif625 vmxon [rsp]626 %else ; RT_ARCH_X86627 xor eax, eax628 vmxon [esp + 4]629 %endif ; RT_ARCH_X86630 jnc .good631 mov eax, VERR_VMX_INVALID_VMXON_PTR632 jmp .the_end633 634 .good:635 jnz .the_end636 mov eax, VERR_VMX_VMXON_FAILED637 638 .the_end:639 %ifdef RT_ARCH_AMD64640 add rsp, 8641 %endif642 ret643 ENDPROC VMXEnable644 645 646 ;;647 ; Executes VMXOFF.648 ;649 ;DECLASM(void) VMXDisable(void);650 BEGINPROC VMXDisable651 vmxoff652 .the_end:653 ret654 ENDPROC VMXDisable655 656 657 ;;658 ; Executes VMCLEAR.659 ;660 ; @returns VBox status code.661 ; @param HCPhysVmcs Physical address of VM control structure.662 ;663 ;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);664 ALIGNCODE(16)665 BEGINPROC VMXClearVmcs666 %ifdef RT_ARCH_AMD64667 xor rax, rax668 %ifdef ASM_CALL64_GCC669 push rdi670 %else671 push rcx672 %endif673 vmclear [rsp]674 %else ; RT_ARCH_X86675 xor eax, eax676 vmclear [esp + 4]677 %endif ; RT_ARCH_X86678 jnc .the_end679 mov eax, VERR_VMX_INVALID_VMCS_PTR680 .the_end:681 %ifdef RT_ARCH_AMD64682 add rsp, 8683 %endif684 ret685 ENDPROC VMXClearVmcs686 687 688 ;;689 ; Executes VMPTRLD.690 ;691 ; @returns VBox status code.692 ; @param HCPhysVmcs Physical address of VMCS structure.693 ;694 ;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);695 ALIGNCODE(16)696 BEGINPROC VMXLoadVmcs697 %ifdef RT_ARCH_AMD64698 xor rax, rax699 %ifdef ASM_CALL64_GCC700 push rdi701 %else702 push rcx703 %endif704 vmptrld [rsp]705 %else706 xor eax, eax707 vmptrld [esp + 4]708 %endif709 jnc .the_end710 mov eax, VERR_VMX_INVALID_VMCS_PTR711 .the_end:712 %ifdef RT_ARCH_AMD64713 add rsp, 8714 %endif715 ret716 ENDPROC VMXLoadVmcs717 718 719 ;;720 ; Executes VMPTRST.721 ;722 ; @returns VBox status code.723 ; @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.724 ;725 ;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);726 BEGINPROC VMXGetCurrentVmcs727 %ifdef RT_OS_OS2728 mov eax, VERR_NOT_SUPPORTED729 ret730 %else731 %ifdef RT_ARCH_AMD64732 %ifdef ASM_CALL64_GCC733 vmptrst qword [rdi]734 %else735 vmptrst qword [rcx]736 %endif737 %else738 vmptrst qword [esp+04h]739 %endif740 xor eax, eax741 .the_end:742 ret743 %endif744 ENDPROC VMXGetCurrentVmcs745 746 ;;747 ; Invalidate a page using INVEPT.748 ;749 ; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.750 ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.751 ;752 ;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);753 BEGINPROC VMXR0InvEPT754 %ifdef RT_ARCH_AMD64755 %ifdef ASM_CALL64_GCC756 and edi, 0ffffffffh757 xor rax, rax758 ; invept rdi, qword [rsi]759 DB 0x66, 0x0F, 0x38, 0x80, 0x3E760 %else761 and ecx, 0ffffffffh762 xor rax, rax763 ; invept rcx, qword [rdx]764 DB 0x66, 0x0F, 0x38, 0x80, 0xA765 %endif766 %else767 mov ecx, [esp + 4]768 mov edx, [esp + 8]769 xor eax, eax770 ; invept ecx, qword [edx]771 DB 0x66, 0x0F, 0x38, 0x80, 0xA772 %endif773 jnc .valid_vmcs774 mov eax, VERR_VMX_INVALID_VMCS_PTR775 ret776 .valid_vmcs:777 jnz .the_end778 mov eax, VERR_INVALID_PARAMETER779 .the_end:780 ret781 ENDPROC VMXR0InvEPT782 783 784 ;;785 ; Invalidate a page using INVVPID.786 ;787 ; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush788 ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer789 ;790 ;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);791 BEGINPROC VMXR0InvVPID792 %ifdef RT_ARCH_AMD64793 %ifdef ASM_CALL64_GCC794 and edi, 0ffffffffh795 xor rax, rax796 ; invvpid rdi, qword [rsi]797 DB 0x66, 0x0F, 0x38, 0x81, 0x3E798 %else799 and ecx, 0ffffffffh800 xor rax, rax801 ; invvpid rcx, qword [rdx]802 DB 0x66, 0x0F, 0x38, 0x81, 0xA803 %endif804 %else805 mov ecx, [esp + 4]806 mov edx, [esp + 8]807 xor eax, eax808 ; invvpid ecx, qword [edx]809 DB 0x66, 0x0F, 0x38, 0x81, 0xA810 %endif811 jnc .valid_vmcs812 mov eax, VERR_VMX_INVALID_VMCS_PTR813 ret814 .valid_vmcs:815 jnz .the_end816 mov eax, VERR_INVALID_PARAMETER817 .the_end:818 ret819 ENDPROC VMXR0InvVPID820 821 822 %if GC_ARCH_BITS == 64823 ;;824 ; Executes INVLPGA.825 ;826 ; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate827 ; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id828 ;829 ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);830 BEGINPROC SVMR0InvlpgA831 %ifdef RT_ARCH_AMD64832 %ifdef ASM_CALL64_GCC833 mov rax, rdi834 mov rcx, rsi835 %else836 mov rax, rcx837 mov rcx, rdx838 %endif839 %else840 mov eax, [esp + 4]841 mov ecx, [esp + 0Ch]842 %endif843 invlpga [xAX], ecx844 ret845 ENDPROC SVMR0InvlpgA846 847 %else ; GC_ARCH_BITS != 64848 ;;849 ; Executes INVLPGA850 ;851 ; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate852 ; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id853 ;854 ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);855 BEGINPROC SVMR0InvlpgA856 %ifdef RT_ARCH_AMD64857 %ifdef ASM_CALL64_GCC858 movzx rax, edi859 mov ecx, esi860 %else861 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:862 ; "Perhaps unexpectedly, instructions that move or generate 32-bit register863 ; values also set the upper 32 bits of the register to zero. Consequently864 ; there is no need for an instruction movzlq."865 mov eax, ecx866 mov ecx, edx867 %endif868 %else869 mov eax, [esp + 4]870 mov ecx, [esp + 8]871 %endif872 invlpga [xAX], ecx873 ret874 ENDPROC SVMR0InvlpgA875 876 %endif ; GC_ARCH_BITS != 64877 878 879 438 %ifdef VBOX_WITH_KERNEL_USING_XMM 880 439 -
trunk/src/VBox/VMM/VMMR0/HMR0UtilA.asm
r87305 r87310 1 1 ; $Id$ 2 2 ;; @file 3 ; HM - Ring-0 VMX , SVM world-switch and helper routines.3 ; HM - Ring-0 VMX & SVM Helpers. 4 4 ; 5 5 … … 22 22 %include "VBox/err.mac" 23 23 %include "VBox/vmm/hm_vmx.mac" 24 %include "VBox/vmm/cpum.mac"25 %include "VBox/vmm/vm.mac"26 24 %include "iprt/x86.mac" 27 %include "HMInternal.mac" 28 29 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. 30 %macro vmwrite 2, 31 int3 32 %endmacro 33 %define vmlaunch int3 34 %define vmresume int3 35 %define vmsave int3 36 %define vmload int3 37 %define vmrun int3 38 %define clgi int3 39 %define stgi int3 40 %macro invlpga 2, 41 int3 42 %endmacro 43 %endif 44 45 ;********************************************************************************************************************************* 46 ;* Defined Constants And Macros * 47 ;********************************************************************************************************************************* 48 ;; The offset of the XMM registers in X86FXSTATE. 49 ; Use define because I'm too lazy to convert the struct. 50 %define XMM_OFF_IN_X86FXSTATE 160 51 52 ;; Spectre filler for 32-bit mode. 53 ; Some user space address that points to a 4MB page boundrary in hope that it 54 ; will somehow make it less useful. 55 %define SPECTRE_FILLER32 0x227fffff 56 ;; Spectre filler for 64-bit mode. 57 ; Choosen to be an invalid address (also with 5 level paging). 58 %define SPECTRE_FILLER64 0x02204204207fffff 59 ;; Spectre filler for the current CPU mode. 60 %ifdef RT_ARCH_AMD64 61 %define SPECTRE_FILLER SPECTRE_FILLER64 62 %else 63 %define SPECTRE_FILLER SPECTRE_FILLER32 64 %endif 65 66 ;; 67 ; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation. 68 ; 69 %ifdef RT_ARCH_AMD64 70 %define VMX_SKIP_GDTR 71 %define VMX_SKIP_TR 72 %define VBOX_SKIP_RESTORE_SEG 73 %ifdef RT_OS_DARWIN 74 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't 75 ; risk loading a stale LDT value or something invalid. 76 %define HM_64_BIT_USE_NULL_SEL 77 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always. 78 ; See @bugref{6875}. 79 %else 80 %define VMX_SKIP_IDTR 81 %endif 82 %endif 83 84 ;; @def MYPUSHAD 85 ; Macro generating an equivalent to PUSHAD instruction. 86 87 ;; @def MYPOPAD 88 ; Macro generating an equivalent to POPAD instruction. 89 90 ;; @def MYPUSHSEGS 91 ; Macro saving all segment registers on the stack. 92 ; @param 1 Full width register name. 93 ; @param 2 16-bit register name for \a 1. 94 95 ;; @def MYPOPSEGS 96 ; Macro restoring all segment registers on the stack. 97 ; @param 1 Full width register name. 98 ; @param 2 16-bit register name for \a 1. 99 100 %ifdef ASM_CALL64_GCC 101 %macro MYPUSHAD64 0 102 push r15 103 push r14 104 push r13 105 push r12 106 push rbx 107 %endmacro 108 %macro MYPOPAD64 0 109 pop rbx 110 pop r12 111 pop r13 112 pop r14 113 pop r15 114 %endmacro 115 116 %else ; ASM_CALL64_MSC 117 %macro MYPUSHAD64 0 118 push r15 119 push r14 120 push r13 121 push r12 122 push rbx 123 push rsi 124 push rdi 125 %endmacro 126 %macro MYPOPAD64 0 127 pop rdi 128 pop rsi 129 pop rbx 130 pop r12 131 pop r13 132 pop r14 133 pop r15 134 %endmacro 135 %endif 136 137 %ifdef VBOX_SKIP_RESTORE_SEG 138 %macro MYPUSHSEGS64 2 139 %endmacro 140 141 %macro MYPOPSEGS64 2 142 %endmacro 143 %else ; !VBOX_SKIP_RESTORE_SEG 144 ; Trashes, rax, rdx & rcx. 145 %macro MYPUSHSEGS64 2 146 %ifndef HM_64_BIT_USE_NULL_SEL 147 mov %2, es 148 push %1 149 mov %2, ds 150 push %1 151 %endif 152 153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, 154 ; Solaris OTOH doesn't and we must save it. 155 mov ecx, MSR_K8_FS_BASE 156 rdmsr 157 push rdx 158 push rax 159 %ifndef HM_64_BIT_USE_NULL_SEL 160 push fs 161 %endif 162 163 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. 164 ; The same happens on exit. 165 mov ecx, MSR_K8_GS_BASE 166 rdmsr 167 push rdx 168 push rax 169 %ifndef HM_64_BIT_USE_NULL_SEL 170 push gs 171 %endif 172 %endmacro 173 174 ; trashes, rax, rdx & rcx 175 %macro MYPOPSEGS64 2 176 ; Note: do not step through this code with a debugger! 177 %ifndef HM_64_BIT_USE_NULL_SEL 178 xor eax, eax 179 mov ds, ax 180 mov es, ax 181 mov fs, ax 182 mov gs, ax 183 %endif 184 185 %ifndef HM_64_BIT_USE_NULL_SEL 186 pop gs 187 %endif 188 pop rax 189 pop rdx 190 mov ecx, MSR_K8_GS_BASE 191 wrmsr 192 193 %ifndef HM_64_BIT_USE_NULL_SEL 194 pop fs 195 %endif 196 pop rax 197 pop rdx 198 mov ecx, MSR_K8_FS_BASE 199 wrmsr 200 ; Now it's safe to step again 201 202 %ifndef HM_64_BIT_USE_NULL_SEL 203 pop %1 204 mov ds, %2 205 pop %1 206 mov es, %2 207 %endif 208 %endmacro 209 %endif ; VBOX_SKIP_RESTORE_SEG 210 211 %macro MYPUSHAD32 0 212 pushad 213 %endmacro 214 %macro MYPOPAD32 0 215 popad 216 %endmacro 217 218 %macro MYPUSHSEGS32 2 219 push ds 220 push es 221 push fs 222 push gs 223 %endmacro 224 %macro MYPOPSEGS32 2 225 pop gs 226 pop fs 227 pop es 228 pop ds 229 %endmacro 230 231 %ifdef RT_ARCH_AMD64 232 %define MYPUSHAD MYPUSHAD64 233 %define MYPOPAD MYPOPAD64 234 %define MYPUSHSEGS MYPUSHSEGS64 235 %define MYPOPSEGS MYPOPSEGS64 236 %else 237 %define MYPUSHAD MYPUSHAD32 238 %define MYPOPAD MYPOPAD32 239 %define MYPUSHSEGS MYPUSHSEGS32 240 %define MYPOPSEGS MYPOPSEGS32 241 %endif 242 243 ;; 244 ; Creates an indirect branch prediction barrier on CPUs that need and supports that. 245 ; @clobbers eax, edx, ecx 246 ; @param 1 How to address CPUMCTX. 247 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 248 %macro INDIRECT_BRANCH_PREDICTION_BARRIER 2 249 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 250 jz %%no_indirect_branch_barrier 251 mov ecx, MSR_IA32_PRED_CMD 252 mov eax, MSR_IA32_PRED_CMD_F_IBPB 253 xor edx, edx 254 wrmsr 255 %%no_indirect_branch_barrier: 256 %endmacro 257 258 ;; 259 ; Creates an indirect branch prediction and L1D barrier on CPUs that need and supports that. 260 ; @clobbers eax, edx, ecx 261 ; @param 1 How to address CPUMCTX. 262 ; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 263 ; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY) 264 ; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY) 265 %macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4 266 ; Only one test+jmp when disabled CPUs. 267 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4) 268 jz %%no_barrier_needed 269 270 ; The eax:edx value is the same for both. 271 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D) 272 mov eax, MSR_IA32_PRED_CMD_F_IBPB 273 xor edx, edx 274 275 ; Indirect branch barrier. 276 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 277 jz %%no_indirect_branch_barrier 278 mov ecx, MSR_IA32_PRED_CMD 279 wrmsr 280 %%no_indirect_branch_barrier: 281 282 ; Level 1 data cache flush. 283 test byte [%1 + CPUMCTX.fWorldSwitcher], %3 284 jz %%no_cache_flush_barrier 285 mov ecx, MSR_IA32_FLUSH_CMD 286 wrmsr 287 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH 288 %%no_cache_flush_barrier: 289 290 ; MDS buffer flushing. 291 test byte [%1 + CPUMCTX.fWorldSwitcher], %4 292 jz %%no_mds_buffer_flushing 293 sub xSP, xSP 294 mov [xSP], ds 295 verw [xSP] 296 add xSP, xSP 297 %%no_mds_buffer_flushing: 298 299 %%no_barrier_needed: 300 %endmacro 301 302 303 ;********************************************************************************************************************************* 304 ;* External Symbols * 305 ;********************************************************************************************************************************* 306 %ifdef VBOX_WITH_KERNEL_USING_XMM 307 extern NAME(CPUMIsGuestFPUStateActive) 308 %endif 25 309 26 310 27 311 28 BEGINCODE 312 313 314 ;;315 ; Restores host-state fields.316 ;317 ; @returns VBox status code318 ; @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.319 ; @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.320 ;321 ALIGNCODE(16)322 BEGINPROC VMXRestoreHostState323 %ifdef RT_ARCH_AMD64324 %ifndef ASM_CALL64_GCC325 ; Use GCC's input registers since we'll be needing both rcx and rdx further326 ; down with the wrmsr instruction. Use the R10 and R11 register for saving327 ; RDI and RSI since MSC preserve the two latter registers.328 mov r10, rdi329 mov r11, rsi330 mov rdi, rcx331 mov rsi, rdx332 %endif333 334 test edi, VMX_RESTORE_HOST_GDTR335 jz .test_idtr336 lgdt [rsi + VMXRESTOREHOST.HostGdtr]337 338 .test_idtr:339 test edi, VMX_RESTORE_HOST_IDTR340 jz .test_ds341 lidt [rsi + VMXRESTOREHOST.HostIdtr]342 343 .test_ds:344 test edi, VMX_RESTORE_HOST_SEL_DS345 jz .test_es346 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]347 mov ds, eax348 349 .test_es:350 test edi, VMX_RESTORE_HOST_SEL_ES351 jz .test_tr352 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]353 mov es, eax354 355 .test_tr:356 test edi, VMX_RESTORE_HOST_SEL_TR357 jz .test_fs358 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.359 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]360 mov ax, dx361 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset362 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE363 jnz .gdt_readonly364 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.365 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)366 ltr dx367 jmp short .test_fs368 .gdt_readonly:369 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE370 jnz .gdt_readonly_need_writable371 mov rcx, cr0372 mov r9, rcx373 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.374 and rcx, ~X86_CR0_WP375 mov cr0, rcx376 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)377 ltr dx378 mov cr0, r9379 jmp short .test_fs380 .gdt_readonly_need_writable:381 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw382 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)383 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]384 ltr dx385 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT386 387 .test_fs:388 ;389 ; When restoring the selector values for FS and GS, we'll temporarily trash390 ; the base address (at least the high 32-bit bits, but quite possibly the391 ; whole base address), the wrmsr will restore it correctly. (VT-x actually392 ; restores the base correctly when leaving guest mode, but not the selector393 ; value, so there is little problem with interrupts being enabled prior to394 ; this restore job.)395 ; We'll disable ints once for both FS and GS as that's probably faster.396 ;397 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS398 jz .restore_success399 pushfq400 cli ; (see above)401 402 test edi, VMX_RESTORE_HOST_SEL_FS403 jz .test_gs404 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]405 mov fs, eax406 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo407 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi408 mov ecx, MSR_K8_FS_BASE409 wrmsr410 411 .test_gs:412 test edi, VMX_RESTORE_HOST_SEL_GS413 jz .restore_flags414 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]415 mov gs, eax416 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo417 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi418 mov ecx, MSR_K8_GS_BASE419 wrmsr420 421 .restore_flags:422 popfq423 424 .restore_success:425 mov eax, VINF_SUCCESS426 %ifndef ASM_CALL64_GCC427 ; Restore RDI and RSI on MSC.428 mov rdi, r10429 mov rsi, r11430 %endif431 %else ; RT_ARCH_X86432 mov eax, VERR_NOT_IMPLEMENTED433 %endif434 ret435 ENDPROC VMXRestoreHostState436 437 438 ;;439 ; Dispatches an NMI to the host.440 ;441 ALIGNCODE(16)442 BEGINPROC VMXDispatchHostNmi443 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".444 int 2445 ret446 ENDPROC VMXDispatchHostNmi447 448 29 449 30 ;; … … 744 325 ENDPROC VMXGetCurrentVmcs 745 326 327 746 328 ;; 747 329 ; Invalidate a page using INVEPT. … … 876 458 %endif ; GC_ARCH_BITS != 64 877 459 878 879 %ifdef VBOX_WITH_KERNEL_USING_XMM880 881 ;;882 ; Wrapper around vmx.pfnStartVM that preserves host XMM registers and883 ; load the guest ones when necessary.884 ;885 ; @cproto DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVM pVM,886 ; PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);887 ;888 ; @returns eax889 ;890 ; @param fResumeVM msc:rcx891 ; @param pCtx msc:rdx892 ; @param pvUnused msc:r8893 ; @param pVM msc:r9894 ; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.895 ; @param pfnStartVM msc:[rbp+38h]896 ;897 ; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.898 ;899 ; @remarks Drivers shouldn't use AVX registers without saving+loading:900 ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396901 ; However the compiler docs have different idea:902 ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx903 ; We'll go with the former for now.904 ;905 ; ASSUMING 64-bit and windows for now.906 ;907 ALIGNCODE(16)908 BEGINPROC hmR0VMXStartVMWrapXMM909 push xBP910 mov xBP, xSP911 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.912 913 ; Spill input parameters.914 mov [xBP + 010h], rcx ; fResumeVM915 mov [xBP + 018h], rdx ; pCtx916 mov [xBP + 020h], r8 ; pvUnused917 mov [xBP + 028h], r9 ; pVM918 919 ; Ask CPUM whether we've started using the FPU yet.920 mov rcx, [xBP + 30h] ; pVCpu921 call NAME(CPUMIsGuestFPUStateActive)922 test al, al923 jnz .guest_fpu_state_active924 925 ; No need to mess with XMM registers just call the start routine and return.926 mov r11, [xBP + 38h] ; pfnStartVM927 mov r10, [xBP + 30h] ; pVCpu928 mov [xSP + 020h], r10929 mov rcx, [xBP + 010h] ; fResumeVM930 mov rdx, [xBP + 018h] ; pCtx931 mov r8, [xBP + 020h] ; pvUnused932 mov r9, [xBP + 028h] ; pVM933 call r11934 935 leave936 ret937 938 ALIGNCODE(8)939 .guest_fpu_state_active:940 ; Save the non-volatile host XMM registers.941 movdqa [rsp + 040h + 000h], xmm6942 movdqa [rsp + 040h + 010h], xmm7943 movdqa [rsp + 040h + 020h], xmm8944 movdqa [rsp + 040h + 030h], xmm9945 movdqa [rsp + 040h + 040h], xmm10946 movdqa [rsp + 040h + 050h], xmm11947 movdqa [rsp + 040h + 060h], xmm12948 movdqa [rsp + 040h + 070h], xmm13949 movdqa [rsp + 040h + 080h], xmm14950 movdqa [rsp + 040h + 090h], xmm15951 stmxcsr [rsp + 040h + 0a0h]952 953 mov r10, [xBP + 018h] ; pCtx954 mov eax, [r10 + CPUMCTX.fXStateMask]955 test eax, eax956 jz .guest_fpu_state_manually957 958 ;959 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.960 ;961 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS962 xor edx, edx963 mov r10, [r10 + CPUMCTX.pXStateR0]964 xrstor [r10]965 966 ; Make the call (same as in the other case).967 mov r11, [xBP + 38h] ; pfnStartVM968 mov r10, [xBP + 30h] ; pVCpu969 mov [xSP + 020h], r10970 mov rcx, [xBP + 010h] ; fResumeVM971 mov rdx, [xBP + 018h] ; pCtx972 mov r8, [xBP + 020h] ; pvUnused973 mov r9, [xBP + 028h] ; pVM974 call r11975 976 mov r11d, eax ; save return value (xsave below uses eax)977 978 ; Save the guest XMM registers.979 mov r10, [xBP + 018h] ; pCtx980 mov eax, [r10 + CPUMCTX.fXStateMask]981 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS982 xor edx, edx983 mov r10, [r10 + CPUMCTX.pXStateR0]984 xsave [r10]985 986 mov eax, r11d ; restore return value987 988 .restore_non_volatile_host_xmm_regs:989 ; Load the non-volatile host XMM registers.990 movdqa xmm6, [rsp + 040h + 000h]991 movdqa xmm7, [rsp + 040h + 010h]992 movdqa xmm8, [rsp + 040h + 020h]993 movdqa xmm9, [rsp + 040h + 030h]994 movdqa xmm10, [rsp + 040h + 040h]995 movdqa xmm11, [rsp + 040h + 050h]996 movdqa xmm12, [rsp + 040h + 060h]997 movdqa xmm13, [rsp + 040h + 070h]998 movdqa xmm14, [rsp + 040h + 080h]999 movdqa xmm15, [rsp + 040h + 090h]1000 ldmxcsr [rsp + 040h + 0a0h]1001 leave1002 ret1003 1004 ;1005 ; No XSAVE, load and save the guest XMM registers manually.1006 ;1007 .guest_fpu_state_manually:1008 ; Load the full guest XMM register state.1009 mov r10, [r10 + CPUMCTX.pXStateR0]1010 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]1011 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]1012 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]1013 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]1014 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]1015 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]1016 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]1017 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]1018 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]1019 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]1020 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]1021 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]1022 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]1023 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]1024 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]1025 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]1026 ldmxcsr [r10 + X86FXSTATE.MXCSR]1027 1028 ; Make the call (same as in the other case).1029 mov r11, [xBP + 38h] ; pfnStartVM1030 mov r10, [xBP + 30h] ; pVCpu1031 mov [xSP + 020h], r101032 mov rcx, [xBP + 010h] ; fResumeVM1033 mov rdx, [xBP + 018h] ; pCtx1034 mov r8, [xBP + 020h] ; pvUnused1035 mov r9, [xBP + 028h] ; pVM1036 call r111037 1038 ; Save the guest XMM registers.1039 mov r10, [xBP + 018h] ; pCtx1040 mov r10, [r10 + CPUMCTX.pXStateR0]1041 stmxcsr [r10 + X86FXSTATE.MXCSR]1042 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm01043 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm11044 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm21045 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm31046 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm41047 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm51048 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm61049 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm71050 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm81051 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm91052 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm101053 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm111054 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm121055 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm131056 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm141057 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm151058 jmp .restore_non_volatile_host_xmm_regs1059 ENDPROC hmR0VMXStartVMWrapXMM1060 1061 ;;1062 ; Wrapper around svm.pfnVMRun that preserves host XMM registers and1063 ; load the guest ones when necessary.1064 ;1065 ; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,1066 ; PFNHMSVMVMRUN pfnVMRun);1067 ;1068 ; @returns eax1069 ;1070 ; @param HCPhysVmcbHost msc:rcx1071 ; @param HCPhysVmcb msc:rdx1072 ; @param pCtx msc:r81073 ; @param pVM msc:r91074 ; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.1075 ; @param pfnVMRun msc:[rbp+38h]1076 ;1077 ; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.1078 ;1079 ; @remarks Drivers shouldn't use AVX registers without saving+loading:1080 ; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-21472173961081 ; However the compiler docs have different idea:1082 ; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx1083 ; We'll go with the former for now.1084 ;1085 ; ASSUMING 64-bit and windows for now.1086 ALIGNCODE(16)1087 BEGINPROC hmR0SVMRunWrapXMM1088 push xBP1089 mov xBP, xSP1090 sub xSP, 0b0h + 040h ; don't bother optimizing the frame size1091 1092 ; Spill input parameters.1093 mov [xBP + 010h], rcx ; HCPhysVmcbHost1094 mov [xBP + 018h], rdx ; HCPhysVmcb1095 mov [xBP + 020h], r8 ; pCtx1096 mov [xBP + 028h], r9 ; pVM1097 1098 ; Ask CPUM whether we've started using the FPU yet.1099 mov rcx, [xBP + 30h] ; pVCpu1100 call NAME(CPUMIsGuestFPUStateActive)1101 test al, al1102 jnz .guest_fpu_state_active1103 1104 ; No need to mess with XMM registers just call the start routine and return.1105 mov r11, [xBP + 38h] ; pfnVMRun1106 mov r10, [xBP + 30h] ; pVCpu1107 mov [xSP + 020h], r101108 mov rcx, [xBP + 010h] ; HCPhysVmcbHost1109 mov rdx, [xBP + 018h] ; HCPhysVmcb1110 mov r8, [xBP + 020h] ; pCtx1111 mov r9, [xBP + 028h] ; pVM1112 call r111113 1114 leave1115 ret1116 1117 ALIGNCODE(8)1118 .guest_fpu_state_active:1119 ; Save the non-volatile host XMM registers.1120 movdqa [rsp + 040h + 000h], xmm61121 movdqa [rsp + 040h + 010h], xmm71122 movdqa [rsp + 040h + 020h], xmm81123 movdqa [rsp + 040h + 030h], xmm91124 movdqa [rsp + 040h + 040h], xmm101125 movdqa [rsp + 040h + 050h], xmm111126 movdqa [rsp + 040h + 060h], xmm121127 movdqa [rsp + 040h + 070h], xmm131128 movdqa [rsp + 040h + 080h], xmm141129 movdqa [rsp + 040h + 090h], xmm151130 stmxcsr [rsp + 040h + 0a0h]1131 1132 mov r10, [xBP + 020h] ; pCtx1133 mov eax, [r10 + CPUMCTX.fXStateMask]1134 test eax, eax1135 jz .guest_fpu_state_manually1136 1137 ;1138 ; Using XSAVE.1139 ;1140 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS1141 xor edx, edx1142 mov r10, [r10 + CPUMCTX.pXStateR0]1143 xrstor [r10]1144 1145 ; Make the call (same as in the other case).1146 mov r11, [xBP + 38h] ; pfnVMRun1147 mov r10, [xBP + 30h] ; pVCpu1148 mov [xSP + 020h], r101149 mov rcx, [xBP + 010h] ; HCPhysVmcbHost1150 mov rdx, [xBP + 018h] ; HCPhysVmcb1151 mov r8, [xBP + 020h] ; pCtx1152 mov r9, [xBP + 028h] ; pVM1153 call r111154 1155 mov r11d, eax ; save return value (xsave below uses eax)1156 1157 ; Save the guest XMM registers.1158 mov r10, [xBP + 020h] ; pCtx1159 mov eax, [r10 + CPUMCTX.fXStateMask]1160 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS1161 xor edx, edx1162 mov r10, [r10 + CPUMCTX.pXStateR0]1163 xsave [r10]1164 1165 mov eax, r11d ; restore return value1166 1167 .restore_non_volatile_host_xmm_regs:1168 ; Load the non-volatile host XMM registers.1169 movdqa xmm6, [rsp + 040h + 000h]1170 movdqa xmm7, [rsp + 040h + 010h]1171 movdqa xmm8, [rsp + 040h + 020h]1172 movdqa xmm9, [rsp + 040h + 030h]1173 movdqa xmm10, [rsp + 040h + 040h]1174 movdqa xmm11, [rsp + 040h + 050h]1175 movdqa xmm12, [rsp + 040h + 060h]1176 movdqa xmm13, [rsp + 040h + 070h]1177 movdqa xmm14, [rsp + 040h + 080h]1178 movdqa xmm15, [rsp + 040h + 090h]1179 ldmxcsr [rsp + 040h + 0a0h]1180 leave1181 ret1182 1183 ;1184 ; No XSAVE, load and save the guest XMM registers manually.1185 ;1186 .guest_fpu_state_manually:1187 ; Load the full guest XMM register state.1188 mov r10, [r10 + CPUMCTX.pXStateR0]1189 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]1190 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]1191 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]1192 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]1193 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]1194 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]1195 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]1196 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]1197 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]1198 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]1199 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]1200 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]1201 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]1202 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]1203 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]1204 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]1205 ldmxcsr [r10 + X86FXSTATE.MXCSR]1206 1207 ; Make the call (same as in the other case).1208 mov r11, [xBP + 38h] ; pfnVMRun1209 mov r10, [xBP + 30h] ; pVCpu1210 mov [xSP + 020h], r101211 mov rcx, [xBP + 010h] ; HCPhysVmcbHost1212 mov rdx, [xBP + 018h] ; HCPhysVmcb1213 mov r8, [xBP + 020h] ; pCtx1214 mov r9, [xBP + 028h] ; pVM1215 call r111216 1217 ; Save the guest XMM registers.1218 mov r10, [xBP + 020h] ; pCtx1219 mov r10, [r10 + CPUMCTX.pXStateR0]1220 stmxcsr [r10 + X86FXSTATE.MXCSR]1221 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm01222 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm11223 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm21224 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm31225 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm41226 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm51227 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm61228 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm71229 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm81230 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm91231 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm101232 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm111233 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm121234 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm131235 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm141236 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm151237 jmp .restore_non_volatile_host_xmm_regs1238 ENDPROC hmR0SVMRunWrapXMM1239 1240 %endif ; VBOX_WITH_KERNEL_USING_XMM1241 1242 1243 %ifdef RT_ARCH_AMD641244 ;; @def RESTORE_STATE_VM641245 ; Macro restoring essential host state and updating guest state1246 ; for 64-bit host, 64-bit guest for VT-x.1247 ;1248 %macro RESTORE_STATE_VM64 01249 ; Restore base and limit of the IDTR & GDTR.1250 %ifndef VMX_SKIP_IDTR1251 lidt [xSP]1252 add xSP, xCB * 21253 %endif1254 %ifndef VMX_SKIP_GDTR1255 lgdt [xSP]1256 add xSP, xCB * 21257 %endif1258 1259 push xDI1260 %ifndef VMX_SKIP_TR1261 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)1262 %else1263 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)1264 %endif1265 1266 mov qword [xDI + CPUMCTX.eax], rax1267 mov rax, SPECTRE_FILLER641268 mov qword [xDI + CPUMCTX.ebx], rbx1269 mov rbx, rax1270 mov qword [xDI + CPUMCTX.ecx], rcx1271 mov rcx, rax1272 mov qword [xDI + CPUMCTX.edx], rdx1273 mov rdx, rax1274 mov qword [xDI + CPUMCTX.esi], rsi1275 mov rsi, rax1276 mov qword [xDI + CPUMCTX.ebp], rbp1277 mov rbp, rax1278 mov qword [xDI + CPUMCTX.r8], r81279 mov r8, rax1280 mov qword [xDI + CPUMCTX.r9], r91281 mov r9, rax1282 mov qword [xDI + CPUMCTX.r10], r101283 mov r10, rax1284 mov qword [xDI + CPUMCTX.r11], r111285 mov r11, rax1286 mov qword [xDI + CPUMCTX.r12], r121287 mov r12, rax1288 mov qword [xDI + CPUMCTX.r13], r131289 mov r13, rax1290 mov qword [xDI + CPUMCTX.r14], r141291 mov r14, rax1292 mov qword [xDI + CPUMCTX.r15], r151293 mov r15, rax1294 mov rax, cr21295 mov qword [xDI + CPUMCTX.cr2], rax1296 1297 pop xAX ; The guest rdi we pushed above1298 mov qword [xDI + CPUMCTX.edi], rax1299 1300 ; Fight spectre.1301 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT1302 1303 %ifndef VMX_SKIP_TR1304 ; Restore TSS selector; must mark it as not busy before using ltr!1305 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).1306 ; @todo get rid of sgdt1307 pop xBX ; Saved TR1308 sub xSP, xCB * 21309 sgdt [xSP]1310 mov xAX, xBX1311 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset1312 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset1313 and dword [xAX + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)1314 ltr bx1315 add xSP, xCB * 21316 %endif1317 1318 pop xAX ; Saved LDTR1319 cmp eax, 01320 je %%skip_ldt_write641321 lldt ax1322 1323 %%skip_ldt_write64:1324 pop xSI ; pCtx (needed in rsi by the macros below)1325 1326 ; Restore segment registers.1327 MYPOPSEGS xAX, ax1328 1329 ; Restore the host XCR0 if necessary.1330 pop xCX1331 test ecx, ecx1332 jnz %%xcr0_after_skip1333 pop xAX1334 pop xDX1335 xsetbv ; ecx is already zero.1336 %%xcr0_after_skip:1337 1338 ; Restore general purpose registers.1339 MYPOPAD1340 %endmacro1341 1342 1343 ;;1344 ; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)1345 ;1346 ; @returns VBox status code1347 ; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.1348 ; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.1349 ; @param pvUnused msc:r8, gcc:rdx Unused argument.1350 ; @param pVM msc:r9, gcc:rcx The cross context VM structure.1351 ; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.1352 ;1353 ALIGNCODE(16)1354 BEGINPROC VMXR0StartVM641355 push xBP1356 mov xBP, xSP1357 1358 pushf1359 cli1360 1361 ; Save all general purpose host registers.1362 MYPUSHAD1363 1364 ; First we have to save some final CPU context registers.1365 lea r10, [.vmlaunch64_done wrt rip]1366 mov rax, VMX_VMCS_HOST_RIP ; return address (too difficult to continue after VMLAUNCH?)1367 vmwrite rax, r101368 ; Note: ASSUMES success!1369 1370 ;1371 ; Unify the input parameter registers.1372 ;1373 %ifdef ASM_CALL64_GCC1374 ; fResume already in rdi1375 ; pCtx already in rsi1376 mov rbx, rdx ; pvUnused1377 %else1378 mov rdi, rcx ; fResume1379 mov rsi, rdx ; pCtx1380 mov rbx, r8 ; pvUnused1381 %endif1382 1383 ;1384 ; Save the host XCR0 and load the guest one if necessary.1385 ; Note! Trashes rdx and rcx.1386 ;1387 %ifdef ASM_CALL64_MSC1388 mov rax, [xBP + 30h] ; pVCpu1389 %else1390 mov rax, r8 ; pVCpu1391 %endif1392 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 11393 jz .xcr0_before_skip1394 1395 xor ecx, ecx1396 xgetbv ; save the host one on the stack1397 push xDX1398 push xAX1399 1400 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest one1401 mov edx, [xSI + CPUMCTX.aXcr + 4]1402 xor ecx, ecx ; paranoia1403 xsetbv1404 1405 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)1406 jmp .xcr0_before_done1407 1408 .xcr0_before_skip:1409 push 3fh ; indicate that we need not1410 .xcr0_before_done:1411 1412 ;1413 ; Save segment registers.1414 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).1415 ;1416 MYPUSHSEGS xAX, ax1417 1418 ; Save the pCtx pointer.1419 push xSI1420 1421 ; Save host LDTR.1422 xor eax, eax1423 sldt ax1424 push xAX1425 1426 %ifndef VMX_SKIP_TR1427 ; The host TR limit is reset to 0x67; save & restore it manually.1428 str eax1429 push xAX1430 %endif1431 1432 %ifndef VMX_SKIP_GDTR1433 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!1434 sub xSP, xCB * 21435 sgdt [xSP]1436 %endif1437 %ifndef VMX_SKIP_IDTR1438 sub xSP, xCB * 21439 sidt [xSP]1440 %endif1441 1442 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).1443 mov rbx, qword [xSI + CPUMCTX.cr2]1444 mov rdx, cr21445 cmp rbx, rdx1446 je .skip_cr2_write1447 mov cr2, rbx1448 1449 .skip_cr2_write:1450 mov eax, VMX_VMCS_HOST_RSP1451 vmwrite xAX, xSP1452 ; Note: ASSUMES success!1453 ; Don't mess with ESP anymore!!!1454 1455 ; Fight spectre and similar.1456 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY1457 1458 ; Load guest general purpose registers.1459 mov rax, qword [xSI + CPUMCTX.eax]1460 mov rbx, qword [xSI + CPUMCTX.ebx]1461 mov rcx, qword [xSI + CPUMCTX.ecx]1462 mov rdx, qword [xSI + CPUMCTX.edx]1463 mov rbp, qword [xSI + CPUMCTX.ebp]1464 mov r8, qword [xSI + CPUMCTX.r8]1465 mov r9, qword [xSI + CPUMCTX.r9]1466 mov r10, qword [xSI + CPUMCTX.r10]1467 mov r11, qword [xSI + CPUMCTX.r11]1468 mov r12, qword [xSI + CPUMCTX.r12]1469 mov r13, qword [xSI + CPUMCTX.r13]1470 mov r14, qword [xSI + CPUMCTX.r14]1471 mov r15, qword [xSI + CPUMCTX.r15]1472 1473 ; Resume or start VM?1474 cmp xDI, 0 ; fResume1475 1476 ; Load guest rdi & rsi.1477 mov rdi, qword [xSI + CPUMCTX.edi]1478 mov rsi, qword [xSI + CPUMCTX.esi]1479 1480 je .vmlaunch64_launch1481 1482 vmresume1483 jc near .vmxstart64_invalid_vmcs_ptr1484 jz near .vmxstart64_start_failed1485 jmp .vmlaunch64_done; ; here if vmresume detected a failure1486 1487 .vmlaunch64_launch:1488 vmlaunch1489 jc near .vmxstart64_invalid_vmcs_ptr1490 jz near .vmxstart64_start_failed1491 jmp .vmlaunch64_done; ; here if vmlaunch detected a failure1492 1493 ALIGNCODE(16)1494 .vmlaunch64_done:1495 RESTORE_STATE_VM641496 mov eax, VINF_SUCCESS1497 1498 .vmstart64_end:1499 popf1500 pop xBP1501 ret1502 1503 .vmxstart64_invalid_vmcs_ptr:1504 RESTORE_STATE_VM641505 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM1506 jmp .vmstart64_end1507 1508 .vmxstart64_start_failed:1509 RESTORE_STATE_VM641510 mov eax, VERR_VMX_UNABLE_TO_START_VM1511 jmp .vmstart64_end1512 ENDPROC VMXR0StartVM641513 %endif ; RT_ARCH_AMD641514 1515 1516 ;;1517 ; Clears the MDS buffers using VERW.1518 ALIGNCODE(16)1519 BEGINPROC hmR0MdsClear1520 sub xSP, xCB1521 mov [xSP], ds1522 verw [xSP]1523 add xSP, xCB1524 ret1525 ENDPROC hmR0MdsClear1526 1527 1528 %ifdef RT_ARCH_AMD641529 ;;1530 ; Prepares for and executes VMRUN (32-bit and 64-bit guests).1531 ;1532 ; @returns VBox status code1533 ; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.1534 ; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.1535 ; @param pCtx msc:r8,gcc:rdx Pointer to the guest-CPU context.1536 ; @param pVM msc:r9,gcc:rcx The cross context VM structure.1537 ; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.1538 ;1539 ALIGNCODE(16)1540 BEGINPROC SVMR0VMRun1541 ; Fake a cdecl stack frame1542 %ifdef ASM_CALL64_GCC1543 push r8 ; pVCpu1544 push rcx ; pVM1545 push rdx ; pCtx1546 push rsi ; HCPhysVmcb1547 push rdi ; HCPhysVmcbHost1548 %else1549 mov rax, [rsp + 28h]1550 push rax ; rbp + 30h pVCpu1551 push r9 ; rbp + 28h pVM1552 push r8 ; rbp + 20h pCtx1553 push rdx ; rbp + 18h HCPhysVmcb1554 push rcx ; rbp + 10h HCPhysVmcbHost1555 %endif1556 push 0 ; rbp + 08h "fake ret addr"1557 push rbp ; rbp + 00h1558 mov rbp, rsp1559 pushf1560 1561 ; Manual save and restore:1562 ; - General purpose registers except RIP, RSP, RAX1563 ;1564 ; Trashed:1565 ; - CR2 (we don't care)1566 ; - LDTR (reset to 0)1567 ; - DRx (presumably not changed at all)1568 ; - DR7 (reset to 0x400)1569 1570 ; Save all general purpose host registers.1571 MYPUSHAD1572 1573 ; Load pCtx into xSI.1574 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]1575 1576 ; Save the host XCR0 and load the guest one if necessary.1577 mov rax, [xBP + 30h] ; pVCpu1578 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 11579 jz .xcr0_before_skip1580 1581 xor ecx, ecx1582 xgetbv ; save the host XCR0 on the stack1583 push xDX1584 push xAX1585 1586 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx1587 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR01588 mov edx, [xSI + CPUMCTX.aXcr + 4]1589 xor ecx, ecx ; paranoia1590 xsetbv1591 1592 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)1593 jmp .xcr0_before_done1594 1595 .xcr0_before_skip:1596 push 3fh ; indicate that we need not restore XCR01597 .xcr0_before_done:1598 1599 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.1600 push rsi1601 1602 ; Save host fs, gs, sysenter msr etc.1603 mov rax, [rbp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)1604 push rax ; save for the vmload after vmrun1605 vmsave1606 1607 ; Fight spectre.1608 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY1609 1610 ; Setup rax for VMLOAD.1611 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only)1612 1613 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).1614 mov rbx, qword [xSI + CPUMCTX.ebx]1615 mov rcx, qword [xSI + CPUMCTX.ecx]1616 mov rdx, qword [xSI + CPUMCTX.edx]1617 mov rdi, qword [xSI + CPUMCTX.edi]1618 mov rbp, qword [xSI + CPUMCTX.ebp]1619 mov r8, qword [xSI + CPUMCTX.r8]1620 mov r9, qword [xSI + CPUMCTX.r9]1621 mov r10, qword [xSI + CPUMCTX.r10]1622 mov r11, qword [xSI + CPUMCTX.r11]1623 mov r12, qword [xSI + CPUMCTX.r12]1624 mov r13, qword [xSI + CPUMCTX.r13]1625 mov r14, qword [xSI + CPUMCTX.r14]1626 mov r15, qword [xSI + CPUMCTX.r15]1627 mov rsi, qword [xSI + CPUMCTX.esi]1628 1629 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.1630 clgi1631 sti1632 1633 ; Load guest FS, GS, Sysenter MSRs etc.1634 vmload1635 1636 ; Run the VM.1637 vmrun1638 1639 ; Save guest fs, gs, sysenter msr etc.1640 vmsave1641 1642 ; Load host fs, gs, sysenter msr etc.1643 pop rax ; load HCPhysVmcbHost (pushed above)1644 vmload1645 1646 ; Set the global interrupt flag again, but execute cli to make sure IF=0.1647 cli1648 stgi1649 1650 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).1651 pop rax1652 1653 mov qword [rax + CPUMCTX.ebx], rbx1654 mov rbx, SPECTRE_FILLER641655 mov qword [rax + CPUMCTX.ecx], rcx1656 mov rcx, rbx1657 mov qword [rax + CPUMCTX.edx], rdx1658 mov rdx, rbx1659 mov qword [rax + CPUMCTX.esi], rsi1660 mov rsi, rbx1661 mov qword [rax + CPUMCTX.edi], rdi1662 mov rdi, rbx1663 mov qword [rax + CPUMCTX.ebp], rbp1664 mov rbp, rbx1665 mov qword [rax + CPUMCTX.r8], r81666 mov r8, rbx1667 mov qword [rax + CPUMCTX.r9], r91668 mov r9, rbx1669 mov qword [rax + CPUMCTX.r10], r101670 mov r10, rbx1671 mov qword [rax + CPUMCTX.r11], r111672 mov r11, rbx1673 mov qword [rax + CPUMCTX.r12], r121674 mov r12, rbx1675 mov qword [rax + CPUMCTX.r13], r131676 mov r13, rbx1677 mov qword [rax + CPUMCTX.r14], r141678 mov r14, rbx1679 mov qword [rax + CPUMCTX.r15], r151680 mov r15, rbx1681 1682 ; Fight spectre. Note! Trashes rax!1683 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT1684 1685 ; Restore the host xcr0 if necessary.1686 pop xCX1687 test ecx, ecx1688 jnz .xcr0_after_skip1689 pop xAX1690 pop xDX1691 xsetbv ; ecx is already zero1692 .xcr0_after_skip:1693 1694 ; Restore host general purpose registers.1695 MYPOPAD1696 1697 mov eax, VINF_SUCCESS1698 1699 popf1700 pop rbp1701 add rsp, 6 * xCB1702 ret1703 ENDPROC SVMR0VMRun1704 %endif ; RT_ARCH_AMD641705
Note:
See TracChangeset
for help on using the changeset viewer.