Changeset 87330 in vbox
- Timestamp:
- Jan 20, 2021 7:02:24 PM (4 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.mac
r86098 r87330 82 82 alignb 4096 83 83 .cpum resb 4096 84 %define VMCPU.cpum.GstCtx VMCPU.cpum 84 85 alignb 4096 85 86 .em resb 40960 -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87321 r87330 192 192 ; @param 1 How to address CPUMCTX. 193 193 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 194 %macro INDIRECT_BRANCH_PREDICTION_BARRIER_OLD 2 195 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 196 jz %%no_indirect_branch_barrier 197 mov ecx, MSR_IA32_PRED_CMD 198 mov eax, MSR_IA32_PRED_CMD_F_IBPB 199 xor edx, edx 200 wrmsr 201 %%no_indirect_branch_barrier: 202 %endmacro 203 204 ;; 205 ; Creates an indirect branch prediction barrier on CPUs that need and supports that. 206 ; @clobbers eax, edx, ecx 207 ; @param 1 How to address VMCPU. 208 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 194 209 %macro INDIRECT_BRANCH_PREDICTION_BARRIER 2 195 test byte [%1 + CPUMCTX.fWorldSwitcher], %2210 test byte [%1 + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher], %2 196 211 jz %%no_indirect_branch_barrier 197 212 mov ecx, MSR_IA32_PRED_CMD … … 575 590 ; load the guest ones when necessary. 576 591 ; 577 ; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, 578 ; PFNHMSVMVMRUN pfnVMRun); 592 ; @cproto DECLASM(int) hmR0SVMRunWrapXMM(PVM pVM, PVMCPU pVCpu, RTHCPHYS HCPhysVmcb, PFNHMSVMVMRUN pfnVMRun); 579 593 ; 580 594 ; @returns eax 581 595 ; 582 ; @param HCPhysVmcbHost msc:rcx 583 ; @param HCPhysVmcb msc:rdx 584 ; @param pCtx msc:r8 585 ; @param pVM msc:r9 586 ; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT. 587 ; @param pfnVMRun msc:[rbp+38h] 596 ; @param pVM msc:rcx 597 ; @param pVCpu msc:rdx The cross context virtual CPU structure of the calling EMT. 598 ; @param HCPhysVmcb msc:r8 599 ; @param pfnVMRun msc:r9 588 600 ; 589 601 ; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit. … … 596 608 ; 597 609 ; ASSUMING 64-bit and windows for now. 598 ALIGNCODE( 16)610 ALIGNCODE(64) 599 611 BEGINPROC hmR0SVMRunWrapXMM 600 612 push xBP … … 602 614 sub xSP, 0b0h + 040h ; don't bother optimizing the frame size 603 615 616 %ifndef ASM_CALL64_MSC 617 %error "MSC only" 618 %endif 604 619 ; Spill input parameters. 605 mov [xBP + 010h], rcx ; HCPhysVmcbHost606 mov [xBP + 018h], rdx ; HCPhysVmcb607 mov [xBP + 020h], r8 ; pCtx608 mov [xBP + 028h], r9 ; p VM620 mov [xBP + 010h], rcx ; pVM 621 mov [xBP + 018h], rdx ; pVCpu 622 mov [xBP + 020h], r8 ; HCPhysVmcb 623 mov [xBP + 028h], r9 ; pfnVMRun 609 624 610 625 ; Ask CPUM whether we've started using the FPU yet. 611 mov rcx, [xBP + 30h] ; pVCpu 626 ;; @todo implement this in assembly, it's just checking a couple of things. Or have the C code do it. 627 mov rcx, rdx ; pVCpu 612 628 call NAME(CPUMIsGuestFPUStateActive) 613 629 test al, al 630 631 mov rcx, [xBP + 010h] ; pVM 632 mov rdx, [xBP + 018h] ; pVCpu 633 mov r8, [xBP + 020h] ; HCPhysVmcb 634 mov r9, [xBP + 028h] ; pfnVMRun 635 614 636 jnz .guest_fpu_state_active 615 637 616 638 ; No need to mess with XMM registers just call the start routine and return. 617 mov r11, [xBP + 38h] ; pfnVMRun 618 mov r10, [xBP + 30h] ; pVCpu 619 mov [xSP + 020h], r10 620 mov rcx, [xBP + 010h] ; HCPhysVmcbHost 621 mov rdx, [xBP + 018h] ; HCPhysVmcb 622 mov r8, [xBP + 020h] ; pCtx 623 mov r9, [xBP + 028h] ; pVM 624 call r11 639 call r9 625 640 626 641 leave … … 630 645 .guest_fpu_state_active: 631 646 ; Save the non-volatile host XMM registers. 647 ;; @todo change to rbp relative addressing as that saves a byte per instruction! 632 648 movdqa [rsp + 040h + 000h], xmm6 633 649 movdqa [rsp + 040h + 010h], xmm7 … … 642 658 stmxcsr [rsp + 040h + 0a0h] 643 659 644 mov r1 0, [xBP + 020h] ; pCtx645 mov eax, [r 10+ CPUMCTX.fXStateMask]660 mov r11, rdx ; r11 = pVCpu (rdx may get trashed) 661 mov eax, [rdx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask] 646 662 test eax, eax 647 663 jz .guest_fpu_state_manually … … 652 668 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 653 669 xor edx, edx 654 mov r10, [r1 0+ CPUMCTX.pXStateR0]670 mov r10, [r11 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 655 671 xrstor [r10] 656 672 657 673 ; Make the call (same as in the other case). 658 mov r11, [xBP + 38h] ; pfnVMRun 659 mov r10, [xBP + 30h] ; pVCpu 660 mov [xSP + 020h], r10 661 mov rcx, [xBP + 010h] ; HCPhysVmcbHost 662 mov rdx, [xBP + 018h] ; HCPhysVmcb 663 mov r8, [xBP + 020h] ; pCtx 664 mov r9, [xBP + 028h] ; pVM 665 call r11 666 667 mov r11d, eax ; save return value (xsave below uses eax) 674 mov rdx, r11 ; restore pVCpu to rdx 675 call r9 676 677 mov r10d, eax ; save return value (xsave below uses eax) 668 678 669 679 ; Save the guest XMM registers. 670 mov r 10, [xBP + 020h] ; pCtx671 mov eax, [r 10+ CPUMCTX.fXStateMask]680 mov rcx, [xBP + 018h] ; pVCpu 681 mov eax, [rcx + VMCPU.cpum.GstCtx + CPUMCTX.fXStateMask] 672 682 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS 683 mov rcx, [rcx + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 673 684 xor edx, edx 674 mov r10, [r10 + CPUMCTX.pXStateR0] 675 xsave [r10] 676 677 mov eax, r11d ; restore return value 685 xsave [rcx] 686 687 mov eax, r10d ; restore return value 678 688 679 689 .restore_non_volatile_host_xmm_regs: 680 690 ; Load the non-volatile host XMM registers. 691 ;; @todo change to rbp relative addressing as that saves a byte per instruction! 681 692 movdqa xmm6, [rsp + 040h + 000h] 682 693 movdqa xmm7, [rsp + 040h + 010h] … … 696 707 ; No XSAVE, load and save the guest XMM registers manually. 697 708 ; 709 ALIGNCODE(8) 698 710 .guest_fpu_state_manually: 699 711 ; Load the full guest XMM register state. 700 mov r 10, [r10+ CPUMCTX.pXStateR0]701 movdqa xmm0, [r 10+ XMM_OFF_IN_X86FXSTATE + 000h]702 movdqa xmm1, [r 10+ XMM_OFF_IN_X86FXSTATE + 010h]703 movdqa xmm2, [r 10+ XMM_OFF_IN_X86FXSTATE + 020h]704 movdqa xmm3, [r 10+ XMM_OFF_IN_X86FXSTATE + 030h]705 movdqa xmm4, [r 10+ XMM_OFF_IN_X86FXSTATE + 040h]706 movdqa xmm5, [r 10+ XMM_OFF_IN_X86FXSTATE + 050h]707 movdqa xmm6, [r 10+ XMM_OFF_IN_X86FXSTATE + 060h]708 movdqa xmm7, [r 10+ XMM_OFF_IN_X86FXSTATE + 070h]709 movdqa xmm8, [r 10+ XMM_OFF_IN_X86FXSTATE + 080h]710 movdqa xmm9, [r 10+ XMM_OFF_IN_X86FXSTATE + 090h]711 movdqa xmm10, [r 10+ XMM_OFF_IN_X86FXSTATE + 0a0h]712 movdqa xmm11, [r 10+ XMM_OFF_IN_X86FXSTATE + 0b0h]713 movdqa xmm12, [r 10+ XMM_OFF_IN_X86FXSTATE + 0c0h]714 movdqa xmm13, [r 10+ XMM_OFF_IN_X86FXSTATE + 0d0h]715 movdqa xmm14, [r 10+ XMM_OFF_IN_X86FXSTATE + 0e0h]716 movdqa xmm15, [r 10+ XMM_OFF_IN_X86FXSTATE + 0f0h]717 ldmxcsr [r 10+ X86FXSTATE.MXCSR]712 mov rdx, [r11 + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 713 movdqa xmm0, [rdx + XMM_OFF_IN_X86FXSTATE + 000h] 714 movdqa xmm1, [rdx + XMM_OFF_IN_X86FXSTATE + 010h] 715 movdqa xmm2, [rdx + XMM_OFF_IN_X86FXSTATE + 020h] 716 movdqa xmm3, [rdx + XMM_OFF_IN_X86FXSTATE + 030h] 717 movdqa xmm4, [rdx + XMM_OFF_IN_X86FXSTATE + 040h] 718 movdqa xmm5, [rdx + XMM_OFF_IN_X86FXSTATE + 050h] 719 movdqa xmm6, [rdx + XMM_OFF_IN_X86FXSTATE + 060h] 720 movdqa xmm7, [rdx + XMM_OFF_IN_X86FXSTATE + 070h] 721 movdqa xmm8, [rdx + XMM_OFF_IN_X86FXSTATE + 080h] 722 movdqa xmm9, [rdx + XMM_OFF_IN_X86FXSTATE + 090h] 723 movdqa xmm10, [rdx + XMM_OFF_IN_X86FXSTATE + 0a0h] 724 movdqa xmm11, [rdx + XMM_OFF_IN_X86FXSTATE + 0b0h] 725 movdqa xmm12, [rdx + XMM_OFF_IN_X86FXSTATE + 0c0h] 726 movdqa xmm13, [rdx + XMM_OFF_IN_X86FXSTATE + 0d0h] 727 movdqa xmm14, [rdx + XMM_OFF_IN_X86FXSTATE + 0e0h] 728 movdqa xmm15, [rdx + XMM_OFF_IN_X86FXSTATE + 0f0h] 729 ldmxcsr [rdx + X86FXSTATE.MXCSR] 718 730 719 731 ; Make the call (same as in the other case). 720 mov r11, [xBP + 38h] ; pfnVMRun 721 mov r10, [xBP + 30h] ; pVCpu 722 mov [xSP + 020h], r10 723 mov rcx, [xBP + 010h] ; HCPhysVmcbHost 724 mov rdx, [xBP + 018h] ; HCPhysVmcb 725 mov r8, [xBP + 020h] ; pCtx 726 mov r9, [xBP + 028h] ; pVM 727 call r11 732 mov rdx, r11 ; restore pVCpu to rdx 733 call r9 728 734 729 735 ; Save the guest XMM registers. 730 mov r 10, [xBP + 020h] ; pCtx731 mov r 10, [r10+ CPUMCTX.pXStateR0]732 stmxcsr [r 10+ X86FXSTATE.MXCSR]733 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 000h], xmm0734 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 010h], xmm1735 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 020h], xmm2736 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 030h], xmm3737 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 040h], xmm4738 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 050h], xmm5739 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 060h], xmm6740 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 070h], xmm7741 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 080h], xmm8742 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 090h], xmm9743 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10744 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11745 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12746 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13747 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14748 movdqa [r 10+ XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15736 mov rdx, [xBP + 018h] ; pVCpu 737 mov rdx, [rdx + VMCPU.cpum.GstCtx + CPUMCTX.pXStateR0] 738 stmxcsr [rdx + X86FXSTATE.MXCSR] 739 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 000h], xmm0 740 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 010h], xmm1 741 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 020h], xmm2 742 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 030h], xmm3 743 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 040h], xmm4 744 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 050h], xmm5 745 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 060h], xmm6 746 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 070h], xmm7 747 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 080h], xmm8 748 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 090h], xmm9 749 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10 750 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11 751 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12 752 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13 753 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 754 movdqa [rdx + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 749 755 jmp .restore_non_volatile_host_xmm_regs 750 756 ENDPROC hmR0SVMRunWrapXMM … … 810 816 811 817 ; Fight spectre. 812 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT818 INDIRECT_BRANCH_PREDICTION_BARRIER_OLD xDI, CPUMCTX_WSF_IBPB_EXIT 813 819 814 820 %ifndef VMX_SKIP_TR … … 1040 1046 ; 1041 1047 ; @returns VBox status code 1042 ; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB. 1043 ; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB. 1044 ; @param pCtx msc:r8,gcc:rdx Pointer to the guest-CPU context. 1045 ; @param pVM msc:r9,gcc:rcx The cross context VM structure. 1046 ; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT. 1047 ; 1048 ALIGNCODE(16) 1048 ; @param pVM msc:rcx,gcc:rdi The cross context VM structure (unused). 1049 ; @param pVCpu msc:rdx,gcc:rsi The cross context virtual CPU structure of the calling EMT. 1050 ; @param HCPhysVmcb msc:r8, gcc:rdx Physical address of guest VMCB. 1051 ; 1052 ALIGNCODE(64) 1049 1053 BEGINPROC SVMR0VMRun 1050 ; Fake a cdecl stack frame 1051 %ifdef ASM_CALL64_GCC 1052 push r8 ; pVCpu 1053 push rcx ; pVM 1054 push rdx ; pCtx 1055 push rsi ; HCPhysVmcb 1056 push rdi ; HCPhysVmcbHost 1057 %else 1058 mov rax, [rsp + 28h] 1059 push rax ; rbp + 30h pVCpu 1060 push r9 ; rbp + 28h pVM 1061 push r8 ; rbp + 20h pCtx 1062 push rdx ; rbp + 18h HCPhysVmcb 1063 push rcx ; rbp + 10h HCPhysVmcbHost 1064 %endif 1065 push 0 ; rbp + 08h "fake ret addr" 1066 push rbp ; rbp + 00h 1054 push rbp 1067 1055 mov rbp, rsp 1068 1056 pushf … … 1080 1068 PUSH_CALLEE_PRESERVED_REGISTERS 1081 1069 1082 ; Load pCtx into xSI. 1083 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2] 1070 ; Shuffle parameter registers so that r8=HCPhysVmcb and rsi=pVCpu. (rdx & rcx will soon be trashed.) 1071 %ifdef ASM_CALL64_GCC 1072 mov r8, rdx ; Put HCPhysVmcb in r8 like on MSC as rdx is trashed below. 1073 %else 1074 mov rsi, rdx ; Put pVCpu in rsi like on GCC as rdx is trashed below. 1075 ;mov rdi, rcx ; Put pVM in rdi like on GCC as rcx is trashed below. 1076 %endif 1084 1077 1085 1078 ; Save the host XCR0 and load the guest one if necessary. 1086 mov rax, [xBP + 30h] ; pVCpu 1087 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1079 test byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1088 1080 jz .xcr0_before_skip 1089 1081 1090 1082 xor ecx, ecx 1091 1083 xgetbv ; save the host XCR0 on the stack 1092 push xDX 1093 push xAX 1094 1095 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 1096 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0 1097 mov edx, [xSI + CPUMCTX.aXcr + 4] 1084 push rdx 1085 push rax 1086 1087 mov eax, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr] ; load the guest XCR0 1088 mov edx, [rsi + VMCPU.cpum.GstCtx + CPUMCTX.aXcr + 4] 1098 1089 xor ecx, ecx ; paranoia 1099 1090 xsetbv … … 1106 1097 .xcr0_before_done: 1107 1098 1108 ; Save guest CPU-contextpointer for simplifying saving of the GPRs afterwards.1099 ; Save pVCpu pointer for simplifying saving of the GPRs afterwards. 1109 1100 push rsi 1110 1101 1111 1102 ; Save host fs, gs, sysenter msr etc. 1112 mov rax, [r bp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)1103 mov rax, [rsi + VMCPU.hm + HMCPU.u + HMCPUSVM.HCPhysVmcbHost] 1113 1104 push rax ; save for the vmload after vmrun 1114 1105 vmsave 1115 1106 1116 ; Fight spectre .1117 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY1107 ; Fight spectre (trashes rax, rdx and rcx). 1108 INDIRECT_BRANCH_PREDICTION_BARRIER rsi, CPUMCTX_WSF_IBPB_ENTRY 1118 1109 1119 1110 ; Setup rax for VMLOAD. 1120 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB]; HCPhysVmcb (64 bits physical address; take low dword only)1111 mov rax, r8 ; HCPhysVmcb (64 bits physical address; take low dword only) 1121 1112 1122 1113 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN). 1123 mov rbx, qword [ xSI+ CPUMCTX.ebx]1124 mov rcx, qword [ xSI+ CPUMCTX.ecx]1125 mov rdx, qword [ xSI+ CPUMCTX.edx]1126 mov rdi, qword [ xSI+ CPUMCTX.edi]1127 mov rbp, qword [ xSI+ CPUMCTX.ebp]1128 mov r8, qword [ xSI+ CPUMCTX.r8]1129 mov r9, qword [ xSI+ CPUMCTX.r9]1130 mov r10, qword [ xSI+ CPUMCTX.r10]1131 mov r11, qword [ xSI+ CPUMCTX.r11]1132 mov r12, qword [ xSI+ CPUMCTX.r12]1133 mov r13, qword [ xSI+ CPUMCTX.r13]1134 mov r14, qword [ xSI+ CPUMCTX.r14]1135 mov r15, qword [ xSI+ CPUMCTX.r15]1136 mov rsi, qword [ xSI+ CPUMCTX.esi]1114 mov rbx, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.ebx] 1115 mov rcx, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.ecx] 1116 mov rdx, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.edx] 1117 mov rdi, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.edi] 1118 mov rbp, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.ebp] 1119 mov r8, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r8] 1120 mov r9, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r9] 1121 mov r10, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r10] 1122 mov r11, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r11] 1123 mov r12, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r12] 1124 mov r13, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r13] 1125 mov r14, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r14] 1126 mov r15, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.r15] 1127 mov rsi, qword [rsi + VMCPU.cpum.GstCtx + CPUMCTX.esi] 1137 1128 1138 1129 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch. … … 1157 1148 stgi 1158 1149 1159 ; Pop the context pointer(pushed above) and save the guest GPRs (sans RSP and RAX).1150 ; Pop pVCpu (pushed above) and save the guest GPRs (sans RSP and RAX). 1160 1151 pop rax 1161 1152 1162 mov qword [rax + CPUMCTX.ebx], rbx1153 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.ebx], rbx 1163 1154 mov rbx, SPECTRE_FILLER 1164 mov qword [rax + CPUMCTX.ecx], rcx1155 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.ecx], rcx 1165 1156 mov rcx, rbx 1166 mov qword [rax + CPUMCTX.edx], rdx1157 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.edx], rdx 1167 1158 mov rdx, rbx 1168 mov qword [rax + CPUMCTX.esi], rsi1159 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.esi], rsi 1169 1160 mov rsi, rbx 1170 mov qword [rax + CPUMCTX.edi], rdi1161 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.edi], rdi 1171 1162 mov rdi, rbx 1172 mov qword [rax + CPUMCTX.ebp], rbp1163 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.ebp], rbp 1173 1164 mov rbp, rbx 1174 mov qword [rax + CPUMCTX.r8], r81165 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r8], r8 1175 1166 mov r8, rbx 1176 mov qword [rax + CPUMCTX.r9], r91167 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r9], r9 1177 1168 mov r9, rbx 1178 mov qword [rax + CPUMCTX.r10], r101169 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r10], r10 1179 1170 mov r10, rbx 1180 mov qword [rax + CPUMCTX.r11], r111171 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r11], r11 1181 1172 mov r11, rbx 1182 mov qword [rax + CPUMCTX.r12], r121173 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r12], r12 1183 1174 mov r12, rbx 1184 mov qword [rax + CPUMCTX.r13], r131175 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r13], r13 1185 1176 mov r13, rbx 1186 mov qword [rax + CPUMCTX.r14], r141177 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r14], r14 1187 1178 mov r14, rbx 1188 mov qword [rax + CPUMCTX.r15], r151179 mov qword [rax + VMCPU.cpum.GstCtx + CPUMCTX.r15], r15 1189 1180 mov r15, rbx 1190 1181 1191 ; Fight spectre. Note! Trashes rax !1182 ; Fight spectre. Note! Trashes rax, rdx and rcx! 1192 1183 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT 1193 1184 1194 1185 ; Restore the host xcr0 if necessary. 1195 pop xCX1186 pop rcx 1196 1187 test ecx, ecx 1197 1188 jnz .xcr0_after_skip 1198 pop xAX1199 pop xDX1189 pop rax 1190 pop rdx 1200 1191 xsetbv ; ecx is already zero 1201 1192 .xcr0_after_skip: … … 1207 1198 1208 1199 popf 1209 pop rbp 1210 add rsp, 6 * xCB 1200 pop rbp ; Do not use leave! rbp is trashed. 1211 1201 ret 1212 1202 ENDPROC SVMR0VMRun -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87309 r87330 4267 4267 { 4268 4268 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4269 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4270 pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4269 pVCpu->cpum.GstCtx.fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4271 4270 4272 4271 /* … … 4279 4278 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 4280 4279 #ifdef VBOX_WITH_KERNEL_USING_XMM 4281 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun); 4280 return hmR0SVMRunWrapXMM(pVM, pVCpu, HCPhysVmcb, pVCpu->hm.s.svm.pfnVMRun); 4281 //pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun); 4282 4282 #else 4283 return pVCpu->hm.s.svm.pfnVMRun(pV Cpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu);4283 return pVCpu->hm.s.svm.pfnVMRun(pVM, pvCpu, HCPhysVmcb); 4284 4284 #endif 4285 4285 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r83066 r87330 57 57 * 58 58 * @returns VBox status code. 59 * @param pVMCBHostPhys Physical address of host VMCB.60 * @param pVMCBPhys Physical address of the VMCB.61 * @param pCtx Pointer to the guest CPU context.62 59 * @param pVM The cross context VM structure. (Not used.) 63 * @param pVCpu The cross context virtual CPU structure. (Not used.) 60 * @param pVCpu The cross context virtual CPU structure. 61 * @param HCPhyspVMCB Physical address of the VMCB. 64 62 */ 65 DECLASM(int) SVMR0VMRun( RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVMCC pVM, PVMCPUCC pVCpu);63 DECLASM(int) SVMR0VMRun(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB); 66 64 67 65 /** -
trunk/src/VBox/VMM/include/HMInternal.h
r86183 r87330 403 403 * non-standard C++), we have a little hack to make them palatable. 404 404 */ 405 /** @def HM_NAMELESS_UNION_TAG 406 * For tagging a nameless union so tstASMStructs.cpp can find check the nested 407 * structures within the union. 408 */ 405 409 #ifdef VBOX_FOR_DTRACE_LIB 406 # define HM_UNION_NM(a_Nm) a_Nm 407 # define HM_STRUCT_NM(a_Nm) a_Nm 410 # define HM_UNION_NM(a_Nm) a_Nm 411 # define HM_STRUCT_NM(a_Nm) a_Nm 412 # define HM_NAMELESS_UNION_TAG(a_Tag) 408 413 #elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) 409 # define HM_UNION_NM(a_Nm) a_Nm 410 # define HM_STRUCT_NM(a_Nm) a_Nm 414 # define HM_UNION_NM(a_Nm) a_Nm 415 # define HM_STRUCT_NM(a_Nm) a_Nm 416 # define HM_NAMELESS_UNION_TAG(a_Tag) a_Tag 411 417 #else 412 418 # define HM_UNION_NM(a_Nm) 413 419 # define HM_STRUCT_NM(a_Nm) 420 # define HM_NAMELESS_UNION_TAG(a_Tag) 414 421 #endif 415 422 … … 694 701 typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM; 695 702 696 /** SVM VMRun function . */697 typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,( RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVMCC pVM, PVMCPUCC pVCpu));703 /** SVM VMRun function, see SVMR0VMRun(). */ 704 typedef DECLCALLBACKTYPE(int, FNHMSVMVMRUN,(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB)); 698 705 /** Pointer to a SVM VMRun function. */ 699 706 typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN; … … 918 925 uint64_t fCtxChanged; 919 926 920 union /* no tag! */927 union HM_NAMELESS_UNION_TAG(HMCPUUNION) /* no tag! */ 921 928 { 922 929 /** VT-x data. */ 923 struct 930 struct HM_NAMELESS_UNION_TAG(HMCPUVMX) 924 931 { 925 932 /** @name Guest information. … … 986 993 987 994 /** SVM data. */ 988 struct 995 struct HM_NAMELESS_UNION_TAG(HMCPUSVM) 989 996 { 990 997 /** Ring 0 handlers for VT-x. */ … … 995 1002 /** R0 memory object for the host VMCB which holds additional host-state. */ 996 1003 RTR0MEMOBJ hMemObjVmcbHost; 997 /** Padding. */ 1004 /** Padding. 1005 * @todo remove, pointless now */ 998 1006 R0PTRTYPE(void *) pvPadding; 999 1007 … … 1225 1233 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVMCC pVM, PVMCPUCC pVCpu, 1226 1234 PFNHMVMXSTARTVM pfnStartVM); 1227 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVMCC pVM, PVMCPUCC pVCpu, 1228 PFNHMSVMVMRUN pfnVMRun); 1235 DECLASM(int) hmR0SVMRunWrapXMM(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhyspVMCB, PFNHMSVMVMRUN pfnVMRun); 1229 1236 # endif 1230 1237 DECLASM(void) hmR0MdsClear(void); -
trunk/src/VBox/VMM/include/HMInternal.mac
r82968 r87330 15 15 ; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 16 16 ; 17 18 %ifndef VMX_VMCS_GUEST_FIELD_ES 19 %include "VBox/vmm/hm_vmx.mac" ; For VMXRESTOREHOST 20 %endif 21 22 23 struc VMXVMCSINFO 24 .pfnStartVM RTR0PTR_RES 1 25 .HCPhysEPTP RTHCPHYS_RES 1 26 .fVmcsState resd 1 27 .fShadowVmcsState resd 1 28 .idHostCpuState resd 1 29 .idHostCpuExec resd 1 30 .cEntryMsrLoad resd 1 31 .cExitMsrStore resd 1 32 .cExitMsrLoad resd 1 33 34 .u32PinCtls resd 1 35 .u32ProcCtls resd 1 36 .u32ProcCtls2 resd 1 37 .u32EntryCtls resd 1 38 .u32ExitCtls resd 1 39 .u32XcptBitmap resd 1 40 .u32XcptPFMask resd 1 41 .u32XcptPFMatch resd 1 42 43 alignb 8 44 .u64TscOffset resq 1 45 .u64VmcsLinkPtr resq 1 46 .u64Cr0Mask resq 1 47 .u64Cr4Mask resq 1 48 49 .pvVmcs RTR0PTR_RES 1 50 .pvShadowVmcs RTR0PTR_RES 1 51 .pbVirtApic RTR0PTR_RES 1 52 .pvMsrBitmap RTR0PTR_RES 1 53 .pvGuestMsrLoad RTR0PTR_RES 1 54 .pvGuestMsrStore RTR0PTR_RES 1 55 .pvHostMsrLoad RTR0PTR_RES 1 56 57 .fWasInRealMode resb 1 58 .fSwitchedTo64on32Obsolete resb 1 59 alignb 8 60 .RealMode.AttrCS resd 1 61 .RealMode.AttrDS resd 1 62 .RealMode.AttrES resd 1 63 .RealMode.AttrFS resd 1 64 .RealMode.AttrGS resd 1 65 .RealMode.AttrSS resd 1 66 .RealMode.Eflags resd 1 ; should be EFlags? 67 .RealMode.fRealOnV86Active resb 1 68 69 alignb 8 70 .HCPhysVmcs RTHCPHYS_RES 1 71 .HCPhysShadowVmcs RTHCPHYS_RES 1 72 .HCPhysVirtApic RTHCPHYS_RES 1 73 .HCPhysMsrBitmap RTHCPHYS_RES 1 74 .HCPhysGuestMsrLoad RTHCPHYS_RES 1 75 .HCPhysGuestMsrStore RTHCPHYS_RES 1 76 .HCPhysHostMsrLoad RTHCPHYS_RES 1 77 78 .hMemObj RTR0PTR_RES 1 79 80 alignb 8 81 .au64LbrFromIpMsr resq 32 82 .au64LbrToIpMsr resq 32 83 .u64LbrTosMsr resq 1 84 endstruc 85 86 struc HMCPUVMX 87 .VmcsInfo resb VMXVMCSINFO_size 88 .VmcsInfoNstGst resb VMXVMCSINFO_size 89 .fSwitchedToNstGstVmcs resb 1 90 .fMergedNstGstCtls resb 1 91 .fCopiedNstGstToShadowVmcs resb 1 92 .fSwitchedNstGstFlushTlb resb 1 93 94 alignb 8 95 .u64GstMsrApicBase resq 1 96 97 .u64HostMsrLStar resq 1 98 .u64HostMsrStar resq 1 99 .u64HostMsrSfMask resq 1 100 .u64HostMsrKernelGsBase resq 1 101 .fLazyMsrs resd 1 102 .fUpdatedHostAutoMsrs resb 1 103 alignb 4 104 .fRestoreHostFlags resd 1 105 alignb 8 106 .RestoreHost resb VMXRESTOREHOST_size 107 108 .LastError.idCurrentCpu resd 1 109 .LastError.idEnteredCpu resd 1 110 .LastError.HCPhysCurrentVmcs resq 1 111 .LastError.u32VmcsRev resd 1 112 .LastError.u32InstrError resd 1 113 .LastError.u32ExitReason resd 1 114 .LastError.u32GuestIntrState resd 1 115 endstruc 116 117 struc HMCPUSVM 118 .pfnVMRun RTR0PTR_RES 1 119 .HCPhysVmcbHost RTHCPHYS_RES 1 120 121 .hMemObjVmcbHost RTR0PTR_RES 1 122 .pvPadding RTR0PTR_RES 1 ; pointless padding 123 124 .HCPhysVmcb RTHCPHYS_RES 1 125 .hMemObjVmcb RTR0PTR_RES 1 126 .pVmcb RTR0PTR_RES 1 127 128 .HCPhysMsrBitmap RTHCPHYS_RES 1 129 .hMemObjMsrBitmap RTR0PTR_RES 1 130 .pvMsrBitmap RTR0PTR_RES 1 131 132 .fSyncVTpr resb 1 133 .fEmulateLongModeSysEnterExit resb 1 134 135 alignb 8 136 .u64HostTscAux resq 1 137 138 .NstGstVmcbCache resb 40 139 endstruc 17 140 18 141 struc HMCPU … … 39 162 .u32HMError resd 1 40 163 .rcLastExitToR3 resd 1 164 alignb 8 41 165 .fCtxChanged resq 1 42 166 43 ; incomplete to save unnecessary pain... 167 alignb 8 168 %if HMCPUVMX_size > HMCPUSVM_size 169 .u resb HMCPUVMX_size 170 %else 171 .u resb HMCPUSVM_size 172 %endif 173 174 .Event.fPending resd 1 175 .Event.u32ErrCode resd 1 176 .Event.cbInstr resd 1 177 alignb 8 178 .Event.u64IntInfo resq 1 179 .Event.GCPtrFaultAddress RTGCPTR_RES 1 180 181 .idEnteredCpu resd 1 182 .enmShadowMode resd 1 183 alignb 8 184 .aPdpes resq 4 185 186 ; The remainer is disassembly state and statistics. 44 187 endstruc 45 188 -
trunk/src/VBox/VMM/testcase/tstAsmStructs.cpp
r82968 r87330 22 22 #include <VBox/vmm/cpum.h> 23 23 #include "CPUMInternal.h" 24 #define IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS 1 /* For HMInternal */ 24 25 #include "HMInternal.h" 26 #undef IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS /* probably not necessary */ 25 27 #include "VMMInternal.h" 26 28 #include <VBox/vmm/vm.h> … … 30 32 #include <stdio.h> 31 33 34 /* Hack for validating nested HMCPU structures. */ 35 typedef HMCPU::HMCPUUNION::HMCPUVMX HMCPUVMX; 36 typedef HMCPU::HMCPUUNION::HMCPUSVM HMCPUSVM; 32 37 33 38 /* For sup.mac simplifications. */ -
trunk/src/VBox/VMM/testcase/tstAsmStructsAsm.asm
r82968 r87330 28 28 %include "VBox/vmm/cpum.mac" 29 29 %include "VBox/vmm/vm.mac" 30 %include "VBox/vmm/hm_vmx.mac"31 30 %include "VBox/sup.mac" 32 31 %ifdef DO_GLOBALS
Note:
See TracChangeset
for help on using the changeset viewer.