Changeset 83067 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Feb 13, 2020 4:39:07 AM (5 years ago)
- svn:sync-xref-src-repo-rev:
- 136107
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r83066 r83067 1 1 ; $Id$ 2 2 ;; @file 3 ; HM - Ring-0 VMX, SVM world-switch and helper routines 3 ; HM - Ring-0 VMX, SVM world-switch and helper routines. 4 4 ; 5 5 … … 65 65 66 66 ;; 67 ; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation 67 ; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation. 68 68 ; 69 69 %ifdef RT_ARCH_AMD64 … … 83 83 84 84 ;; @def MYPUSHAD 85 ; Macro generating an equivalent to pushad85 ; Macro generating an equivalent to PUSHAD instruction. 86 86 87 87 ;; @def MYPOPAD 88 ; Macro generating an equivalent to popad88 ; Macro generating an equivalent to POPAD instruction. 89 89 90 90 ;; @def MYPUSHSEGS 91 91 ; Macro saving all segment registers on the stack. 92 ; @param 1 full width register name92 ; @param 1 Full width register name. 93 93 ; @param 2 16-bit register name for \a 1. 94 94 95 95 ;; @def MYPOPSEGS 96 ; Macro restoring all segment registers on the stack 97 ; @param 1 full width register name96 ; Macro restoring all segment registers on the stack. 97 ; @param 1 Full width register name. 98 98 ; @param 2 16-bit register name for \a 1. 99 99 … … 142 142 %endmacro 143 143 %else ; !VBOX_SKIP_RESTORE_SEG 144 ; trashes, rax, rdx & rcx144 ; Trashes, rax, rdx & rcx. 145 145 %macro MYPUSHSEGS64 2 146 146 %ifndef HM_64_BIT_USE_NULL_SEL … … 151 151 %endif 152 152 153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it. 153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, 154 ; Solaris OTOH doesn't and we must save it. 154 155 mov ecx, MSR_K8_FS_BASE 155 156 rdmsr … … 160 161 %endif 161 162 162 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit 163 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. 164 ; The same happens on exit. 163 165 mov ecx, MSR_K8_GS_BASE 164 166 rdmsr … … 283 285 mov ecx, MSR_IA32_FLUSH_CMD 284 286 wrmsr 285 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH .287 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH 286 288 %%no_cache_flush_barrier: 287 289 … … 310 312 311 313 312 ; /**313 ; *Restores host-state fields.314 ; *315 ; *@returns VBox status code316 ; *@param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.317 ; *@param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.318 ; */314 ;; 315 ; Restores host-state fields. 316 ; 317 ; @returns VBox status code 318 ; @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags. 319 ; @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct. 320 ; 319 321 ALIGNCODE(16) 320 322 BEGINPROC VMXRestoreHostState … … 357 359 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR] 358 360 mov ax, dx 359 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.361 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset 360 362 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE 361 363 jnz .gdt_readonly 362 364 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt. 363 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).365 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 364 366 ltr dx 365 367 jmp short .test_fs … … 372 374 and rcx, ~X86_CR0_WP 373 375 mov cr0, rcx 374 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).376 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 375 377 ltr dx 376 378 mov cr0, r9 377 379 jmp short .test_fs 378 380 .gdt_readonly_need_writable: 379 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw .380 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).381 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw 382 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 381 383 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw] 382 384 ltr dx 383 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; Load the original GDT385 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT 384 386 385 387 .test_fs: … … 434 436 435 437 436 ; /**437 ; *Dispatches an NMI to the host.438 ; */438 ;; 439 ; Dispatches an NMI to the host. 440 ; 439 441 ALIGNCODE(16) 440 442 BEGINPROC VMXDispatchHostNmi 441 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts". 443 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts". 444 int 2 442 445 ret 443 446 ENDPROC VMXDispatchHostNmi 444 447 445 448 446 ; /**447 ; *Executes VMWRITE, 64-bit value.448 ; *449 ; *@returns VBox status code.450 ; *@param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.451 ; *@param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.452 ; */449 ;; 450 ; Executes VMWRITE, 64-bit value. 451 ; 452 ; @returns VBox status code. 453 ; @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index. 454 ; @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value. 455 ; 453 456 ALIGNCODE(16) 454 457 BEGINPROC VMXWriteVmcs64 … … 485 488 486 489 487 ; /**488 ; *Executes VMREAD, 64-bit value.489 ; *490 ; *@returns VBox status code.491 ; *@param idxField VMCS index.492 ; *@param pData Where to store VM field value.493 ; */490 ;; 491 ; Executes VMREAD, 64-bit value. 492 ; 493 ; @returns VBox status code. 494 ; @param idxField VMCS index. 495 ; @param pData Where to store VM field value. 496 ; 494 497 ;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData); 495 498 ALIGNCODE(16) … … 527 530 528 531 529 ; /**530 ; *Executes VMREAD, 32-bit value.531 ; *532 ; *@returns VBox status code.533 ; *@param idxField VMCS index.534 ; *@param pu32Data Where to store VM field value.535 ; */532 ;; 533 ; Executes VMREAD, 32-bit value. 534 ; 535 ; @returns VBox status code. 536 ; @param idxField VMCS index. 537 ; @param pu32Data Where to store VM field value. 538 ; 536 539 ;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data); 537 540 ALIGNCODE(16) … … 566 569 567 570 568 ; /**569 ; *Executes VMWRITE, 32-bit value.570 ; *571 ; *@returns VBox status code.572 ; *@param idxField VMCS index.573 ; *@param u32Data Where to store VM field value.574 ; */571 ;; 572 ; Executes VMWRITE, 32-bit value. 573 ; 574 ; @returns VBox status code. 575 ; @param idxField VMCS index. 576 ; @param u32Data Where to store VM field value. 577 ; 575 578 ;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data); 576 579 ALIGNCODE(16) … … 605 608 606 609 607 ; /**608 ; *Executes VMXON.609 ; *610 ; *@returns VBox status code.611 ; *@param HCPhysVMXOn Physical address of VMXON structure.612 ; */610 ;; 611 ; Executes VMXON. 612 ; 613 ; @returns VBox status code. 614 ; @param HCPhysVMXOn Physical address of VMXON structure. 615 ; 613 616 ;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn); 614 617 BEGINPROC VMXEnable … … 641 644 642 645 643 ; /**644 ; *Executes VMXOFF.645 ; */646 ;; 647 ; Executes VMXOFF. 648 ; 646 649 ;DECLASM(void) VMXDisable(void); 647 650 BEGINPROC VMXDisable … … 652 655 653 656 654 ; /**655 ; *Executes VMCLEAR.656 ; *657 ; *@returns VBox status code.658 ; *@param HCPhysVmcs Physical address of VM control structure.659 ; */657 ;; 658 ; Executes VMCLEAR. 659 ; 660 ; @returns VBox status code. 661 ; @param HCPhysVmcs Physical address of VM control structure. 662 ; 660 663 ;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs); 661 664 ALIGNCODE(16) … … 683 686 684 687 685 ; /**686 ; *Executes VMPTRLD.687 ; *688 ; *@returns VBox status code.689 ; *@param HCPhysVmcs Physical address of VMCS structure.690 ; */688 ;; 689 ; Executes VMPTRLD. 690 ; 691 ; @returns VBox status code. 692 ; @param HCPhysVmcs Physical address of VMCS structure. 693 ; 691 694 ;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs); 692 695 ALIGNCODE(16) … … 714 717 715 718 716 ; /**717 ; *Executes VMPTRST.718 ; *719 ; *@returns VBox status code.720 ; *@param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.721 ; */719 ;; 720 ; Executes VMPTRST. 721 ; 722 ; @returns VBox status code. 723 ; @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer. 724 ; 722 725 ;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS); 723 726 BEGINPROC VMXGetCurrentVmcs … … 741 744 ENDPROC VMXGetCurrentVmcs 742 745 743 ;/** 744 ; * Invalidate a page using INVEPT. 746 ;; 747 ; Invalidate a page using INVEPT. 748 ; 745 749 ; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush. 746 750 ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer. 747 ; */751 ; 748 752 ;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor); 749 753 BEGINPROC VMXR0InvEPT … … 778 782 779 783 780 ;/** 781 ; * Invalidate a page using invvpid 784 ;; 785 ; Invalidate a page using INVVPID. 786 ; 782 787 ; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush 783 788 ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer 784 ; */789 ; 785 790 ;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor); 786 791 BEGINPROC VMXR0InvVPID … … 817 822 %if GC_ARCH_BITS == 64 818 823 ;; 819 ; Executes INVLPGA 824 ; Executes INVLPGA. 820 825 ; 821 826 ; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate … … 855 860 %else 856 861 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf: 857 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register858 ; 859 ; there is no need for an instruction movzlq.''862 ; "Perhaps unexpectedly, instructions that move or generate 32-bit register 863 ; values also set the upper 32 bits of the register to zero. Consequently 864 ; there is no need for an instruction movzlq." 860 865 mov eax, ecx 861 866 mov ecx, edx … … 906 911 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size. 907 912 908 ; spill input parameters.913 ; Spill input parameters. 909 914 mov [xBP + 010h], rcx ; fResumeVM 910 915 mov [xBP + 018h], rdx ; pCtx … … 959 964 xrstor [r10] 960 965 961 ; Make the call (same as in the other case 966 ; Make the call (same as in the other case). 962 967 mov r11, [xBP + 38h] ; pfnStartVM 963 968 mov r10, [xBP + 30h] ; pVCpu … … 979 984 xsave [r10] 980 985 981 mov eax, r11d ; restore return value .986 mov eax, r11d ; restore return value 982 987 983 988 .restore_non_volatile_host_xmm_regs: … … 1021 1026 ldmxcsr [r10 + X86FXSTATE.MXCSR] 1022 1027 1023 ; Make the call (same as in the other case 1028 ; Make the call (same as in the other case). 1024 1029 mov r11, [xBP + 38h] ; pfnStartVM 1025 1030 mov r10, [xBP + 30h] ; pVCpu … … 1083 1088 push xBP 1084 1089 mov xBP, xSP 1085 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.1086 1087 ; spill input parameters.1090 sub xSP, 0b0h + 040h ; don't bother optimizing the frame size 1091 1092 ; Spill input parameters. 1088 1093 mov [xBP + 010h], rcx ; HCPhysVmcbHost 1089 1094 mov [xBP + 018h], rdx ; HCPhysVmcb … … 1138 1143 xrstor [r10] 1139 1144 1140 ; Make the call (same as in the other case 1145 ; Make the call (same as in the other case). 1141 1146 mov r11, [xBP + 38h] ; pfnVMRun 1142 1147 mov r10, [xBP + 30h] ; pVCpu … … 1158 1163 xsave [r10] 1159 1164 1160 mov eax, r11d ; restore return value .1165 mov eax, r11d ; restore return value 1161 1166 1162 1167 .restore_non_volatile_host_xmm_regs: … … 1200 1205 ldmxcsr [r10 + X86FXSTATE.MXCSR] 1201 1206 1202 ; Make the call (same as in the other case 1207 ; Make the call (same as in the other case). 1203 1208 mov r11, [xBP + 38h] ; pfnVMRun 1204 1209 mov r10, [xBP + 30h] ; pVCpu … … 1242 1247 ; 1243 1248 %macro RESTORE_STATE_VM64 0 1244 ; Restore base and limit of the IDTR & GDTR 1249 ; Restore base and limit of the IDTR & GDTR. 1245 1250 %ifndef VMX_SKIP_IDTR 1246 1251 lidt [xSP] … … 1297 1302 1298 1303 %ifndef VMX_SKIP_TR 1299 ; Restore TSS selector; must mark it as not busy before using ltr (!)1300 ; ASSUME that this is supposed to be 'BUSY' .(saves 20-30 ticks on the T42p).1304 ; Restore TSS selector; must mark it as not busy before using ltr! 1305 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p). 1301 1306 ; @todo get rid of sgdt 1302 1307 pop xBX ; Saved TR … … 1304 1309 sgdt [xSP] 1305 1310 mov xAX, xBX 1306 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.1307 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset .1308 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).1311 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset 1312 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset 1313 and dword [xAX + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 1309 1314 ltr bx 1310 1315 add xSP, xCB * 2 … … 1340 1345 ; 1341 1346 ; @returns VBox status code 1342 ; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.1343 ; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.1344 ; @param pvUnused msc:r8, gcc:rdx Unused argument.1345 ; @param pVM msc:r9, gcc:rcx The cross context VM structure.1346 ; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.1347 ; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume. 1348 ; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context. 1349 ; @param pvUnused msc:r8, gcc:rdx Unused argument. 1350 ; @param pVM msc:r9, gcc:rcx The cross context VM structure. 1351 ; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT. 1347 1352 ; 1348 1353 ALIGNCODE(16) … … 1359 1364 ; First we have to save some final CPU context registers. 1360 1365 lea r10, [.vmlaunch64_done wrt rip] 1361 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).1366 mov rax, VMX_VMCS_HOST_RIP ; return address (too difficult to continue after VMLAUNCH?) 1362 1367 vmwrite rax, r10 1363 ; Note: assumessuccess!1368 ; Note: ASSUMES success! 1364 1369 1365 1370 ; … … 1389 1394 1390 1395 xor ecx, ecx 1391 xgetbv ; Save the host one on the stack.1396 xgetbv ; save the host one on the stack 1392 1397 push xDX 1393 1398 push xAX 1394 1399 1395 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.1400 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest one 1396 1401 mov edx, [xSI + CPUMCTX.aXcr + 4] 1397 1402 xor ecx, ecx ; paranoia 1398 1403 xsetbv 1399 1404 1400 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).1405 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0) 1401 1406 jmp .xcr0_before_done 1402 1407 1403 1408 .xcr0_before_skip: 1404 push 3fh ; indicate that we need not .1409 push 3fh ; indicate that we need not 1405 1410 .xcr0_before_done: 1406 1411 … … 1445 1450 mov eax, VMX_VMCS_HOST_RSP 1446 1451 vmwrite xAX, xSP 1447 ; Note: assumessuccess!1452 ; Note: ASSUMES success! 1448 1453 ; Don't mess with ESP anymore!!! 1449 1454 … … 1478 1483 jc near .vmxstart64_invalid_vmcs_ptr 1479 1484 jz near .vmxstart64_start_failed 1480 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.1485 jmp .vmlaunch64_done; ; here if vmresume detected a failure 1481 1486 1482 1487 .vmlaunch64_launch: … … 1484 1489 jc near .vmxstart64_invalid_vmcs_ptr 1485 1490 jz near .vmxstart64_start_failed 1486 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.1491 jmp .vmlaunch64_done; ; here if vmlaunch detected a failure 1487 1492 1488 1493 ALIGNCODE(16) … … 1536 1541 ; Fake a cdecl stack frame 1537 1542 %ifdef ASM_CALL64_GCC 1538 push r8 ; pVCpu1539 push rcx ; pVM1540 push rdx ; pCtx1541 push rsi ; HCPhysVmcb1542 push rdi ; HCPhysVmcbHost1543 push r8 ; pVCpu 1544 push rcx ; pVM 1545 push rdx ; pCtx 1546 push rsi ; HCPhysVmcb 1547 push rdi ; HCPhysVmcbHost 1543 1548 %else 1544 1549 mov rax, [rsp + 28h] … … 1575 1580 1576 1581 xor ecx, ecx 1577 xgetbv ; save the host XCR0 on the stack .1582 xgetbv ; save the host XCR0 on the stack 1578 1583 push xDX 1579 1584 push xAX … … 1596 1601 1597 1602 ; Save host fs, gs, sysenter msr etc. 1598 mov rax, [rbp + xCB * 2] 1599 push rax 1603 mov rax, [rbp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only) 1604 push rax ; save for the vmload after vmrun 1600 1605 vmsave 1601 1606 … … 1604 1609 1605 1610 ; Setup rax for VMLOAD. 1606 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] 1611 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only) 1607 1612 1608 1613 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
Note:
See TracChangeset
for help on using the changeset viewer.