Changeset 55290 in vbox for trunk/src/VBox
- Timestamp:
- Apr 15, 2015 3:04:30 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 99609
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r55048 r55290 5 5 6 6 ; 7 ; Copyright (C) 2006-201 3Oracle Corporation7 ; Copyright (C) 2006-2015 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 23 23 %include "VBox/vmm/hm_vmx.mac" 24 24 %include "VBox/vmm/cpum.mac" 25 %include "VBox/vmm/vm.mac" 25 26 %include "iprt/x86.mac" 26 27 %include "HMInternal.mac" … … 1410 1411 ; 1411 1412 1412 ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);1413 ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 1413 1414 ALIGNCODE(16) 1414 1415 BEGINPROC VMXR0StartVM32 … … 1433 1434 mov esi, [rsp + 20h + 18h] ; pCtx 1434 1435 mov edx, [rsp + 20h + 1Ch] ; pCache 1436 mov ecx, [rsp + 20h + 20h] ; pVM 1437 mov r8, [rsp + 20h + 24h] ; pVCpu 1435 1438 call NAME(VMXR0StartVM32_64) 1436 1439 add esp, 20h … … 1450 1453 1451 1454 1452 ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);1455 ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 1453 1456 ALIGNCODE(16) 1454 1457 BEGINPROC VMXR0StartVM64 … … 1473 1476 mov esi, [rsp + 20h + 18h] ; pCtx 1474 1477 mov edx, [rsp + 20h + 1Ch] ; pCache 1478 mov ecx, [rsp + 20h + 20h] ; pVM 1479 mov r8, [rsp + 20h + 24h] ; pVCpu 1475 1480 call NAME(VMXR0StartVM64_64) 1476 1481 add esp, 20h … … 1493 1498 ENDPROC VMXR0StartVM64 1494 1499 1495 ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);1500 ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 1496 1501 ALIGNCODE(16) 1497 1502 BEGINPROC SVMR0VMRun … … 1516 1521 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys 1517 1522 mov edx, [rsp + 20h + 24h] ; pCtx 1523 mov ecx, [rsp + 20h + 20h] ; pVM 1524 mov r8, [rsp + 20h + 24h] ; pVCpu 1518 1525 call NAME(SVMR0VMRun_64) 1519 1526 add esp, 20h … … 1533 1540 1534 1541 1535 ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);1542 ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 1536 1543 ALIGNCODE(16) 1537 1544 BEGINPROC SVMR0VMRun64 … … 1556 1563 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys 1557 1564 mov edx, [rbp + 20h + 24h] ; pCtx 1565 mov ecx, [rsp + 20h + 20h] ; pVM 1566 mov r8, [rsp + 20h + 24h] ; pVCpu 1558 1567 call NAME(SVMR0VMRun64_64) 1559 1568 add esp, 20h -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r50428 r55290 124 124 MYPOPSEGS xAX, ax 125 125 126 ; Restore the host XCR0 if necessary. 127 pop xCX 128 test ecx, ecx 129 jnz %%xcr0_after_skip 130 pop xAX 131 pop xDX 132 xsetbv ; ecx is already zero. 133 %%xcr0_after_skip: 134 126 135 ; Restore general purpose registers. 127 136 MYPOPAD … … 129 138 130 139 131 ;/** 132 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode) 133 ; * 134 ; * @returns VBox status code 135 ; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume. 136 ; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context. 137 ; * @param pCache x86:[esp+10],msc:r8, gcc:rdx Pointer to the VMCS cache. 138 ; */ 140 ;; 141 ; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode) 142 ; 143 ; @returns VBox status code 144 ; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume. 145 ; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context. 146 ; @param pCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache. 147 ; @param pVM x86:[ebp+14],msc:r9, gcc:rcx Pointer to the cross context VM structure. 148 ; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 Pointer to the cross context VMCPU structure. 149 ; 139 150 ALIGNCODE(16) 140 151 BEGINPROC MY_NAME(VMXR0StartVM32) … … 145 156 cli 146 157 158 ; 147 159 ; Save all general purpose host registers. 160 ; 148 161 MYPUSHAD 149 162 150 ; First we have to save some final CPU context registers. 163 ; 164 ; First we have to write some final guest CPU context registers. 165 ; 151 166 mov eax, VMX_VMCS_HOST_RIP 152 167 %ifdef RT_ARCH_AMD64 … … 159 174 ; Note: assumes success! 160 175 161 ; Save guest-CPU context pointer. 176 ; 177 ; Unify input parameter registers. 178 ; 162 179 %ifdef RT_ARCH_AMD64 163 180 %ifdef ASM_CALL64_GCC … … 176 193 %endif 177 194 195 ; 196 ; Save the host XCR0 and load the guest one if necessary. 197 ; Note! Trashes rdx and rcx. 198 ; 199 %ifdef ASM_CALL64_MSC 200 mov rax, [xBP + 30h] ; pVCpu 201 %elifdef ASM_CALL64_GCC 202 mov rax, r8 ; pVCpu 203 %else 204 mov eax, [xBP + 18h] ; pVCpu 205 %endif 206 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 207 jz .xcr0_before_skip 208 209 xor ecx, ecx 210 xgetbv ; Save the host one on the stack. 211 push xDX 212 push xAX 213 214 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 215 mov edx, [xSI + CPUMCTX.aXcr + 4] 216 xor ecx, ecx ; paranoia 217 xsetbv 218 219 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 220 jmp .xcr0_before_done 221 222 .xcr0_before_skip: 223 push 3fh ; indicate that we need not. 224 .xcr0_before_done: 225 226 ; 178 227 ; Save segment registers. 179 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case). 228 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case). 229 ; 180 230 MYPUSHSEGS xAX, ax 181 231 … … 382 432 MYPOPSEGS xAX, ax 383 433 434 ; Restore the host XCR0 if necessary. 435 pop xCX 436 test ecx, ecx 437 jnz %%xcr0_after_skip 438 pop xAX 439 pop xDX 440 xsetbv ; ecx is already zero. 441 %%xcr0_after_skip: 442 384 443 ; Restore general purpose registers. 385 444 MYPOPAD … … 387 446 388 447 389 ;/** 390 ; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode) 391 ; * 392 ; * @returns VBox status code 393 ; * @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume. 394 ; * @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context. 395 ; * @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache. 396 ; */ 448 ;; 449 ; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode) 450 ; 451 ; @returns VBox status code 452 ; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume. 453 ; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context. 454 ; @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache. 455 ; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure. 456 ; @param pVCpu msc:[ebp+30], gcc:r8 Pointer to the cross context VMCPU structure. 457 ; 397 458 ALIGNCODE(16) 398 459 BEGINPROC MY_NAME(VMXR0StartVM64) … … 412 473 ; Note: assumes success! 413 474 414 ; Save guest-CPU context pointer. 475 ; 476 ; Unify the input parameter registers. 477 ; 415 478 %ifdef ASM_CALL64_GCC 416 479 ; fResume already in rdi … … 423 486 %endif 424 487 488 ; 489 ; Save the host XCR0 and load the guest one if necessary. 490 ; Note! Trashes rdx and rcx. 491 ; 492 %ifdef ASM_CALL64_MSC 493 mov rax, [xBP + 30h] ; pVCpu 494 %else 495 mov rax, r8 ; pVCpu 496 %endif 497 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 498 jz .xcr0_before_skip 499 500 xor ecx, ecx 501 xgetbv ; Save the host one on the stack. 502 push xDX 503 push xAX 504 505 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 506 mov edx, [xSI + CPUMCTX.aXcr + 4] 507 xor ecx, ecx ; paranoia 508 xsetbv 509 510 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 511 jmp .xcr0_before_done 512 513 .xcr0_before_skip: 514 push 3fh ; indicate that we need not. 515 .xcr0_before_done: 516 517 ; 425 518 ; Save segment registers. 426 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case). 519 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case). 520 ; 427 521 MYPUSHSEGS xAX, ax 428 522 … … 547 641 548 642 549 ;/** 550 ; * Prepares for and executes VMRUN (32 bits guests) 551 ; * 552 ; * @returns VBox status code 553 ; * @param HCPhysVMCB Physical address of host VMCB. 554 ; * @param HCPhysVMCB Physical address of guest VMCB. 555 ; * @param pCtx Pointer to the guest CPU-context. 556 ; */ 643 ;; 644 ; Prepares for and executes VMRUN (32 bits guests) 645 ; 646 ; @returns VBox status code 647 ; @param HCPhysVMCB Physical address of host VMCB. 648 ; @param HCPhysVMCB Physical address of guest VMCB. 649 ; @param pCtx Pointer to the guest CPU-context. 650 ; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure. 651 ; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure. 652 ; 557 653 ALIGNCODE(16) 558 654 BEGINPROC MY_NAME(SVMR0VMRun) 559 655 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame 560 656 %ifdef ASM_CALL64_GCC 657 push r8 658 push rcx 561 659 push rdx 562 660 push rsi 563 661 push rdi 564 662 %else 565 push r8 566 push rdx 567 push rcx 663 mov rax, [rsp + 28h] 664 push rax ; pVCpu 665 push r9 ; pVM 666 push r8 ; pCtx 667 push rdx ; HCPHYSGuestVMCB 668 push rcx ; HCPhysHostVMCB 568 669 %endif 569 670 push 0 … … 573 674 pushf 574 675 676 ; 575 677 ; Save all general purpose host registers. 678 ; 576 679 MYPUSHAD 577 680 578 ; Save guest CPU-context pointer. 681 ; 682 ; Load pCtx into xSI. 683 ; 579 684 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 580 push xSI ; push for saving the state at the end 685 686 ; 687 ; Save the host XCR0 and load the guest one if necessary. 688 ; 689 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu 690 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 691 jz .xcr0_before_skip 692 693 xor ecx, ecx 694 xgetbv ; Save the host one on the stack. 695 push xDX 696 push xAX 697 698 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 699 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 700 mov edx, [xSI + CPUMCTX.aXcr + 4] 701 xor ecx, ecx ; paranoia 702 xsetbv 703 704 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 705 jmp .xcr0_before_done 706 707 .xcr0_before_skip: 708 push 3fh ; indicate that we need not. 709 .xcr0_before_done: 710 711 ; 712 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards. 713 ; 714 push xSI 581 715 582 716 ; Save host fs, gs, sysenter msr etc. … … 619 753 stgi 620 754 621 pop xAX ; pCtx 755 ; 756 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX). 757 ; 758 pop xAX 622 759 623 760 mov [ss:xAX + CPUMCTX.ebx], ebx … … 628 765 mov [ss:xAX + CPUMCTX.ebp], ebp 629 766 767 ; 768 ; Restore the host xcr0 if necessary. 769 ; 770 pop xCX 771 test ecx, ecx 772 jnz .xcr0_after_skip 773 pop xAX 774 pop xDX 775 xsetbv ; ecx is already zero. 776 .xcr0_after_skip: 777 778 ; 630 779 ; Restore host general purpose registers. 780 ; 631 781 MYPOPAD 632 782 … … 636 786 pop xBP 637 787 %ifdef RT_ARCH_AMD64 638 add xSP, 4*xCB788 add xSP, 6*xCB 639 789 %endif 640 790 ret … … 642 792 643 793 %ifdef RT_ARCH_AMD64 644 ;/** 645 ; * Prepares for and executes VMRUN (64 bits guests) 646 ; * 647 ; * @returns VBox status code 648 ; * @param HCPhysVMCB Physical address of host VMCB. 649 ; * @param HCPhysVMCB Physical address of guest VMCB. 650 ; * @param pCtx Pointer to the guest-CPU context. 651 ; */ 794 ;; 795 ; Prepares for and executes VMRUN (64 bits guests) 796 ; 797 ; @returns VBox status code 798 ; @param HCPhysVMCB Physical address of host VMCB. 799 ; @param HCPhysVMCB Physical address of guest VMCB. 800 ; @param pCtx Pointer to the guest-CPU context. 801 ; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure. 802 ; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure. 803 ; 652 804 ALIGNCODE(16) 653 805 BEGINPROC MY_NAME(SVMR0VMRun64) 654 806 ; Fake a cdecl stack frame 655 807 %ifdef ASM_CALL64_GCC 808 push r8 809 push rcx 656 810 push rdx 657 811 push rsi 658 812 push rdi 659 813 %else 660 push r8 661 push rdx 662 push rcx 663 %endif 664 push 0 665 push rbp 814 mov rax, [rsp + 28h] 815 push rax ; rbp + 30h pVCpu 816 push r9 ; rbp + 28h pVM 817 push r8 ; rbp + 20h pCtx 818 push rdx ; rbp + 18h HCPHYSGuestVMCB 819 push rcx ; rbp + 10h HCPhysHostVMCB 820 %endif 821 push 0 ; rbp + 08h "fake ret addr" 822 push rbp ; rbp + 00h 666 823 mov rbp, rsp 667 824 pushf … … 677 834 ; 678 835 836 ; 679 837 ; Save all general purpose host registers. 838 ; 680 839 MYPUSHAD 681 840 682 ; Save guest CPU-context pointer. 683 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 684 push rsi ; push for saving the state at the end 685 841 ; 842 ; Load pCtx into xSI. 843 ; 844 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2] 845 846 ; 847 ; Save the host XCR0 and load the guest one if necessary. 848 ; 849 mov rax, [xBP + 30h] ; pVCpu 850 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 851 jz .xcr0_before_skip 852 853 xor ecx, ecx 854 xgetbv ; Save the host one on the stack. 855 push xDX 856 push xAX 857 858 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 859 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 860 mov edx, [xSI + CPUMCTX.aXcr + 4] 861 xor ecx, ecx ; paranoia 862 xsetbv 863 864 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 865 jmp .xcr0_before_done 866 867 .xcr0_before_skip: 868 push 3fh ; indicate that we need not. 869 .xcr0_before_done: 870 871 ; 872 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards. 873 ; 874 push rsi 875 876 ; 686 877 ; Save host fs, gs, sysenter msr etc. 878 ; 687 879 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only) 688 880 push rax ; Save for the vmload after vmrun … … 723 915 vmsave 724 916 917 ; 725 918 ; Load host fs, gs, sysenter msr etc. 919 ; 726 920 pop rax ; pushed above 727 921 vmload 728 922 923 ; 729 924 ; Set the global interrupt flag again, but execute cli to make sure IF=0. 925 ; 730 926 cli 731 927 stgi 732 928 733 pop rax ; pCtx 929 ; 930 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX). 931 ; 932 pop rax 734 933 735 934 mov qword [rax + CPUMCTX.ebx], rbx … … 748 947 mov qword [rax + CPUMCTX.r15], r15 749 948 949 ; 950 ; Restore the host xcr0 if necessary. 951 ; 952 pop xCX 953 test ecx, ecx 954 jnz .xcr0_after_skip 955 pop xAX 956 pop xDX 957 xsetbv ; ecx is already zero. 958 .xcr0_after_skip: 959 960 ; 750 961 ; Restore host general purpose registers. 962 ; 751 963 MYPOPAD 752 964 … … 755 967 popf 756 968 pop rbp 757 add rsp, 4* xCB969 add rsp, 6 * xCB 758 970 ret 759 971 ENDPROC MY_NAME(SVMR0VMRun64) -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r55131 r55290 1026 1026 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu) 1027 1027 { 1028 uint32_t aParam[ 4];1028 uint32_t aParam[8]; 1029 1029 aParam[0] = (uint32_t)(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */ 1030 1030 aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32); /* Param 1: HCPhysVmcbHost - Hi. */ 1031 1031 aParam[2] = (uint32_t)(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */ 1032 1032 aParam[3] = (uint32_t)(HCPhysVmcb >> 32); /* Param 2: HCPhysVmcb - Hi. */ 1033 1034 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]); 1033 aParam[4] = VM_RC_ADDR(pVM, pVM); 1034 aParam[5] = 0; 1035 aParam[6] = VM_RC_ADDR(pVM, pVCpu); 1036 aParam[7] = 0; 1037 1038 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, RT_ELEMENTS(aParam), &aParam[0]); 1035 1039 } 1036 1040 … … 1044 1048 * @param pCtx Pointer to the guest-CPU context. 1045 1049 * @param enmOp The operation to perform. 1046 * @param c bParamNumber of parameters.1050 * @param cParams Number of parameters. 1047 1051 * @param paParam Array of 32-bit parameters. 1048 1052 */ 1049 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,1050 uint32_t *paParam)1053 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, 1054 uint32_t cParams, uint32_t *paParam) 1051 1055 { 1052 1056 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); … … 1063 1067 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 1064 1068 CPUMSetHyperEIP(pVCpu, enmOp); 1065 for (int i = (int)c bParam- 1; i >= 0; i--)1069 for (int i = (int)cParams - 1; i >= 0; i--) 1066 1070 CPUMPushHyper(pVCpu, paParam[i]); 1067 1071 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r55257 r55290 5216 5216 * @param pCtx Pointer to the guest CPU context. 5217 5217 * @param enmOp The operation to perform. 5218 * @param c bParamNumber of parameters.5218 * @param cParams Number of parameters. 5219 5219 * @param paParam Array of 32-bit parameters. 5220 5220 */ 5221 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,5222 uint32_t *paParam)5221 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, 5222 uint32_t cParams, uint32_t *paParam) 5223 5223 { 5224 5224 int rc, rc2; … … 5261 5261 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 5262 5262 CPUMSetHyperEIP(pVCpu, enmOp); 5263 for (int i = (int)c bParam- 1; i >= 0; i--)5263 for (int i = (int)cParams - 1; i >= 0; i--) 5264 5264 CPUMPushHyper(pVCpu, paParam[i]); 5265 5265 … … 5304 5304 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu) 5305 5305 { 5306 uint32_t aParam[6];5307 5306 PHMGLOBALCPUINFO pCpu = NULL; 5308 5307 RTHCPHYS HCPhysCpuPage = 0; … … 5328 5327 #endif 5329 5328 5329 uint32_t aParam[10]; 5330 5330 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */ 5331 5331 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */ … … 5334 5334 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache); 5335 5335 aParam[5] = 0; 5336 aParam[6] = VM_RC_ADDR(pVM, pVM); 5337 aParam[7] = 0; 5338 aParam[8] = VM_RC_ADDR(pVM, pVCpu); 5339 aParam[9] = 0; 5336 5340 5337 5341 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 5339 5343 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 5340 5344 #endif 5341 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);5345 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]); 5342 5346 5343 5347 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 6694 6698 /** 6695 6699 * Saves the entire guest state from the currently active VMCS into the 6696 * guest-CPU context. This essentially VMREADs all guest-data. 6700 * guest-CPU context. 6701 * 6702 * This essentially VMREADs all guest-data. 6697 6703 * 6698 6704 * @returns VBox status code. … … 6754 6760 VMMR0LogFlushEnable(pVCpu); 6755 6761 6762 return VINF_SUCCESS; 6763 } 6764 6765 6766 /** 6767 * Saves basic guest registers needed for IEM instruction execution. 6768 * 6769 * @returns VBox status code (OR-able). 6770 * @param pVCpu Pointer to the cross context CPU data for the calling 6771 * EMT. 6772 * @param pMixedCtx Pointer to the CPU context of the guest. 6773 * @param fMemory Whether the instruction being executed operates on 6774 * memory or not. Only CR0 is synced up if clear. 6775 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack). 6776 */ 6777 static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp) 6778 { 6779 /* 6780 * We assume all general purpose registers other than RSP are available. 6781 * 6782 * RIP is a must as it will be incremented or otherwise changed. 6783 * 6784 * RFLAGS are always required to figure the CPL. 6785 * 6786 * RSP isn't always required, however it's a GPR so frequently required. 6787 * 6788 * SS and CS are the only segment register needed if IEM doesn't do memory 6789 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers. 6790 * 6791 * CR0 is always required by IEM for the CPL, while CR3 and CR4 will only 6792 * be required for memory accesses. 6793 * 6794 * Note! Before IEM dispatches an exception, it will call us to sync in everything. 6795 */ 6796 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6797 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6798 if (fNeedRsp) 6799 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 6800 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 6801 if (!fMemory) 6802 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6803 else 6804 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 6756 6805 return rc; 6757 6806 } … … 10593 10642 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 10594 10643 10595 /* We expose XSETBV to the guest, fallback to the interpreter for emulation. */ 10596 /** @todo check if XSETBV is supported by the recompiler. */ 10597 return VERR_EM_INTERPRETER; 10644 int rc = hmR0VmxReadEntryInstrLenVmcs(pVmxTransient); 10645 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 10646 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 10647 AssertRCReturn(rc, rc); 10648 10649 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr); 10650 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST); 10651 10652 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); 10653 10654 return VBOXSTRICTRC_VAL(rcStrict); 10598 10655 } 10599 10656 … … 10967 11024 PVM pVM = pVCpu->CTX_SUFF(pVM); 10968 11025 VBOXSTRICTRC rcStrict; 10969 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 10970 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* Only really need CS+SS. */ 11026 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/); 10971 11027 switch (uAccessType) 10972 11028 { … … 10996 11052 break; 10997 11053 case 4: /* CR4 */ 11054 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); 10998 11055 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 10999 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4)); 11056 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", 11057 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 11000 11058 break; 11001 11059 case 8: /* CR8 */ … … 11038 11096 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */ 11039 11097 { 11040 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);11041 11098 AssertRCReturn(rc, rc); 11042 11099 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr); … … 11050 11107 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 11051 11108 { 11052 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);11053 11109 AssertRCReturn(rc, rc); 11054 11110 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r55148 r55290 809 809 ; * @param HCPhysVmcs VMCS physical address [rsp+16] 810 810 ; * @param pCache VMCS cache [rsp+24] 811 ; * @param pVM The VM handle [rbp+28h] 812 ; * @param pVM The VMCPU handle. [rbp+30h] 811 813 ; * @param pCtx Guest context (rsi) 812 814 ; */ … … 953 955 ; Manual save and restore: 954 956 ; - General purpose registers except RIP, RSP 957 ; - XCR0 955 958 ; 956 959 ; Trashed: … … 964 967 mov qword [rbx + VMCSCACHE.uPos], 5 965 968 %endif 969 970 ; 971 ; Save the host XCR0 and load the guest one if necessary. 972 ; Note! Trashes rdx and rcx. 973 ; 974 mov rax, [rbp + 30h] ; pVCpu 975 test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 976 jz .xcr0_before_skip 977 978 xor ecx, ecx 979 xgetbv ; Save the host one on the stack. 980 push rdx 981 push rax 982 983 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 984 mov edx, [xSI + CPUMCTX.aXcr + 4] 985 xor ecx, ecx ; paranoia 986 xsetbv 987 988 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 989 jmp .xcr0_before_done 990 991 .xcr0_before_skip: 992 push 3fh ; indicate that we need not. 993 .xcr0_before_done: 966 994 967 995 ; Save the pCtx pointer … … 1039 1067 pop rsi ; pCtx (needed in rsi by the macros below) 1040 1068 1069 ; Restore the host xcr0 if necessary. 1070 pop rcx 1071 test ecx, ecx 1072 jnz .xcr0_after_skip 1073 pop rax 1074 pop rdx 1075 xsetbv ; ecx is already zero. 1076 .xcr0_after_skip: 1077 1041 1078 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1042 1079 pop rdi ; Saved pCache … … 1116 1153 pop rsi ; pCtx (needed in rsi by the macros below) 1117 1154 1155 ; Restore the host xcr0 if necessary. 1156 pop rcx 1157 test ecx, ecx 1158 jnz .xcr0_after_skip2 1159 pop rax 1160 pop rdx 1161 xsetbv ; ecx is already zero. 1162 .xcr0_after_skip2: 1163 1118 1164 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1119 1165 pop rdi ; pCache … … 1137 1183 .vmstart64_start_failed: 1138 1184 pop rsi ; pCtx (needed in rsi by the macros below) 1185 1186 ; Restore the host xcr0 if necessary. 1187 pop rcx 1188 test ecx, ecx 1189 jnz .xcr0_after_skip3 1190 pop rax 1191 pop rdx 1192 xsetbv ; ecx is already zero. 1193 .xcr0_after_skip3: 1139 1194 1140 1195 %ifdef VMX_USE_CACHED_VMCS_ACCESSES … … 1165 1220 ; * @param HCPhysVMCB Physical address of host VMCB (rsp+8) 1166 1221 ; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16) 1222 ; * @param pVM The VM handle (rbp+18h) 1223 ; * @param pVM The VMCPU handle. (rbp+20h) 1167 1224 ; * @param pCtx Guest context (rsi) 1168 1225 ; */ … … 1184 1241 ; Save the Guest CPU context pointer. 1185 1242 push rsi ; Push for saving the state at the end 1243 1244 ; 1245 ; Save the host XCR0 and load the guest one if necessary. 1246 ; 1247 mov rax, [rbx + 20h] ; pVCpu 1248 test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1249 jz .xcr0_before_skip 1250 1251 xor ecx, ecx 1252 xgetbv ; Save the host one on the stack. 1253 push rdx 1254 push rax 1255 1256 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 1257 mov edx, [xSI + CPUMCTX.aXcr + 4] 1258 xor ecx, ecx ; paranoia 1259 xsetbv 1260 1261 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 1262 jmp .xcr0_before_done 1263 1264 .xcr0_before_skip: 1265 push 3fh ; indicate that we need not. 1266 .xcr0_before_done: 1186 1267 1187 1268 ; Save host fs, gs, sysenter msr etc … … 1248 1329 mov qword [rax + CPUMCTX.r14], r14 1249 1330 mov qword [rax + CPUMCTX.r15], r15 1331 1332 ; 1333 ; Restore the host xcr0 if necessary. 1334 ; 1335 pop rcx 1336 test ecx, ecx 1337 jnz .xcr0_after_skip 1338 pop rax 1339 pop rdx 1340 xsetbv ; ecx is already zero. 1341 .xcr0_after_skip: 1250 1342 1251 1343 mov eax, VINF_SUCCESS -
trunk/src/VBox/VMM/include/HMInternal.h
r55129 r55290 582 582 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */ 583 583 bool fPreloadGuestFpu; 584 /** Set if XCR0 needs to be loaded and saved when entering and exiting guest 585 * code execution. */ 586 bool fLoadSaveGuestXcr0; 584 587 585 588 /** Whether #UD needs to be intercepted (required by certain GIM providers). */ … … 587 590 /** Whether paravirt. hypercalls are enabled. */ 588 591 bool fHypercallsEnabled; 589 uint8_t u8Alignment0[ 6];592 uint8_t u8Alignment0[5]; 590 593 591 594 /** World switch exit counter. */ -
trunk/src/VBox/VMM/include/HMInternal.mac
r49523 r55290 63 63 endstruc 64 64 65 66 struc HMCPU 67 .fForceTLBFlush resb 1 68 .fActive resb 1 69 .fCheckedTLBFlush resb 1 70 .fSingleInstruction resb 1 71 .fClearTrapFlag resb 1 72 .fLeaveDone resb 1 73 .fUsingHyperDR7 resb 1 74 .fPreloadGuestFpu resb 1 75 .fLoadSaveGuestXcr0 resb 1 76 .fGIMTrapXcptUD resb 1 77 .fHypercallsEnabled resb 1 78 alignb 8 79 .cWorldSwitchExits resd 1 80 .fContextUseFlags resd 1 81 .idLastCpu resd 1 82 .cTlbFlushes resd 1 83 .uCurrentAsid resd 1 84 .u32HMError resd 1 85 alignb 8 86 .u64HostTscAux resq 1 87 88 ; incomplete to save unnecessary pain... 89 endstruc -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r55044 r55290 612 612 -e '/VMMCPU_size$$/d' \ 613 613 -e '/SUPDRVTRACERUSRCTX32_size$$/d' \ 614 -e '/HMCPU_size$$/d' \ 614 615 \ 615 616 -e '/00[0-9a-fA-F]* [aA] [^_.]*_size$$/!d' \
Note:
See TracChangeset
for help on using the changeset viewer.