Changeset 87417 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jan 25, 2021 1:31:11 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142405
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87412 r87417 162 162 %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2 163 163 %ifndef HM_64_BIT_USE_NULL_SEL 164 mov %2, es165 push %1166 mov %2, ds167 push %1164 mov %2, es 165 push %1 166 mov %2, ds 167 push %1 168 168 %endif 169 169 170 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode,171 ; Solaris OTOH doesn't and we must save it.172 mov ecx, MSR_K8_FS_BASE173 rdmsr174 push rdx175 push rax170 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, 171 ; Solaris OTOH doesn't and we must save it. 172 mov ecx, MSR_K8_FS_BASE 173 rdmsr 174 push rdx 175 push rax 176 176 %ifndef HM_64_BIT_USE_NULL_SEL 177 push fs177 push fs 178 178 %endif 179 179 180 180 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. 181 181 ; The same happens on exit. 182 mov ecx, MSR_K8_GS_BASE183 rdmsr184 push rdx185 push rax182 mov ecx, MSR_K8_GS_BASE 183 rdmsr 184 push rdx 185 push rax 186 186 %ifndef HM_64_BIT_USE_NULL_SEL 187 push gs187 push gs 188 188 %endif 189 189 %endmacro … … 191 191 ; trashes, rax, rdx & rcx 192 192 %macro POP_RELEVANT_SEGMENT_REGISTERS 2 193 ; Note: do not step through this code with a debugger!193 ; Note: do not step through this code with a debugger! 194 194 %ifndef HM_64_BIT_USE_NULL_SEL 195 xor eax, eax196 mov ds, ax197 mov es, ax198 mov fs, ax199 mov gs, ax195 xor eax, eax 196 mov ds, ax 197 mov es, ax 198 mov fs, ax 199 mov gs, ax 200 200 %endif 201 201 202 202 %ifndef HM_64_BIT_USE_NULL_SEL 203 pop gs203 pop gs 204 204 %endif 205 pop rax206 pop rdx207 mov ecx, MSR_K8_GS_BASE208 wrmsr205 pop rax 206 pop rdx 207 mov ecx, MSR_K8_GS_BASE 208 wrmsr 209 209 210 210 %ifndef HM_64_BIT_USE_NULL_SEL 211 pop fs211 pop fs 212 212 %endif 213 pop rax214 pop rdx215 mov ecx, MSR_K8_FS_BASE216 wrmsr217 ; Now it's safe to step again213 pop rax 214 pop rdx 215 mov ecx, MSR_K8_FS_BASE 216 wrmsr 217 ; Now it's safe to step again 218 218 219 219 %ifndef HM_64_BIT_USE_NULL_SEL 220 pop %1221 mov ds, %2222 pop %1223 mov es, %2220 pop %1 221 mov ds, %2 222 pop %1 223 mov es, %2 224 224 %endif 225 225 %endmacro … … 233 233 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 234 234 %macro INDIRECT_BRANCH_PREDICTION_BARRIER_CTX 2 235 test byte [%1 + CPUMCTX.fWorldSwitcher], %2236 jz %%no_indirect_branch_barrier237 mov ecx, MSR_IA32_PRED_CMD238 mov eax, MSR_IA32_PRED_CMD_F_IBPB239 xor edx, edx240 wrmsr235 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 236 jz %%no_indirect_branch_barrier 237 mov ecx, MSR_IA32_PRED_CMD 238 mov eax, MSR_IA32_PRED_CMD_F_IBPB 239 xor edx, edx 240 wrmsr 241 241 %%no_indirect_branch_barrier: 242 242 %endmacro … … 248 248 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 249 249 %macro INDIRECT_BRANCH_PREDICTION_BARRIER 2 250 test byte [%1 + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher], %2251 jz %%no_indirect_branch_barrier252 mov ecx, MSR_IA32_PRED_CMD253 mov eax, MSR_IA32_PRED_CMD_F_IBPB254 xor edx, edx255 wrmsr250 test byte [%1 + VMCPU.cpum.GstCtx + CPUMCTX.fWorldSwitcher], %2 251 jz %%no_indirect_branch_barrier 252 mov ecx, MSR_IA32_PRED_CMD 253 mov eax, MSR_IA32_PRED_CMD_F_IBPB 254 xor edx, edx 255 wrmsr 256 256 %%no_indirect_branch_barrier: 257 257 %endmacro … … 265 265 ; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY) 266 266 %macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4 267 ; Only one test+jmp when disabled CPUs.268 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4)269 jz %%no_barrier_needed270 271 ; The eax:edx value is the same for both.272 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)273 mov eax, MSR_IA32_PRED_CMD_F_IBPB274 xor edx, edx275 276 ; Indirect branch barrier.277 test byte [%1 + CPUMCTX.fWorldSwitcher], %2278 jz %%no_indirect_branch_barrier279 mov ecx, MSR_IA32_PRED_CMD280 wrmsr267 ; Only one test+jmp when disabled CPUs. 268 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4) 269 jz %%no_barrier_needed 270 271 ; The eax:edx value is the same for both. 272 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D) 273 mov eax, MSR_IA32_PRED_CMD_F_IBPB 274 xor edx, edx 275 276 ; Indirect branch barrier. 277 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 278 jz %%no_indirect_branch_barrier 279 mov ecx, MSR_IA32_PRED_CMD 280 wrmsr 281 281 %%no_indirect_branch_barrier: 282 282 283 ; Level 1 data cache flush.284 test byte [%1 + CPUMCTX.fWorldSwitcher], %3285 jz %%no_cache_flush_barrier286 mov ecx, MSR_IA32_FLUSH_CMD287 wrmsr288 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH283 ; Level 1 data cache flush. 284 test byte [%1 + CPUMCTX.fWorldSwitcher], %3 285 jz %%no_cache_flush_barrier 286 mov ecx, MSR_IA32_FLUSH_CMD 287 wrmsr 288 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH 289 289 %%no_cache_flush_barrier: 290 290 291 ; MDS buffer flushing.292 test byte [%1 + CPUMCTX.fWorldSwitcher], %4293 jz %%no_mds_buffer_flushing294 sub xSP, xSP295 mov [xSP], ds296 verw [xSP]297 add xSP, xSP291 ; MDS buffer flushing. 292 test byte [%1 + CPUMCTX.fWorldSwitcher], %4 293 jz %%no_mds_buffer_flushing 294 sub xSP, xSP 295 mov [xSP], ds 296 verw [xSP] 297 add xSP, xSP 298 298 %%no_mds_buffer_flushing: 299 299 … … 408 408 BEGINPROC VMXRestoreHostState 409 409 %ifndef ASM_CALL64_GCC 410 ; Use GCC's input registers since we'll be needing both rcx and rdx further411 ; down with the wrmsr instruction. Use the R10 and R11 register for saving412 ; RDI and RSI since MSC preserve the two latter registers.413 movr10, rdi414 movr11, rsi415 movrdi, rcx416 movrsi, rdx410 ; Use GCC's input registers since we'll be needing both rcx and rdx further 411 ; down with the wrmsr instruction. Use the R10 and R11 register for saving 412 ; RDI and RSI since MSC preserve the two latter registers. 413 mov r10, rdi 414 mov r11, rsi 415 mov rdi, rcx 416 mov rsi, rdx 417 417 %endif 418 SEH64_END_PROLOGUE418 SEH64_END_PROLOGUE 419 419 420 420 .restore_gdtr: 421 testedi, VMX_RESTORE_HOST_GDTR422 jz.restore_idtr423 lgdt[rsi + VMXRESTOREHOST.HostGdtr]421 test edi, VMX_RESTORE_HOST_GDTR 422 jz .restore_idtr 423 lgdt [rsi + VMXRESTOREHOST.HostGdtr] 424 424 425 425 .restore_idtr: 426 testedi, VMX_RESTORE_HOST_IDTR427 jz.restore_ds428 lidt[rsi + VMXRESTOREHOST.HostIdtr]426 test edi, VMX_RESTORE_HOST_IDTR 427 jz .restore_ds 428 lidt [rsi + VMXRESTOREHOST.HostIdtr] 429 429 430 430 .restore_ds: 431 testedi, VMX_RESTORE_HOST_SEL_DS432 jz.restore_es433 movax, [rsi + VMXRESTOREHOST.uHostSelDS]434 movds, eax431 test edi, VMX_RESTORE_HOST_SEL_DS 432 jz .restore_es 433 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS] 434 mov ds, eax 435 435 436 436 .restore_es: 437 testedi, VMX_RESTORE_HOST_SEL_ES438 jz.restore_tr439 movax, [rsi + VMXRESTOREHOST.uHostSelES]440 moves, eax437 test edi, VMX_RESTORE_HOST_SEL_ES 438 jz .restore_tr 439 mov ax, [rsi + VMXRESTOREHOST.uHostSelES] 440 mov es, eax 441 441 442 442 .restore_tr: 443 testedi, VMX_RESTORE_HOST_SEL_TR444 jz.restore_fs445 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.446 movdx, [rsi + VMXRESTOREHOST.uHostSelTR]447 movax, dx448 andeax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset449 testedi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE450 jnz.gdt_readonly_or_need_writable451 addrax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.452 anddword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)453 ltrdx443 test edi, VMX_RESTORE_HOST_SEL_TR 444 jz .restore_fs 445 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting. 446 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR] 447 mov ax, dx 448 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset 449 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE 450 jnz .gdt_readonly_or_need_writable 451 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt. 452 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 453 ltr dx 454 454 455 455 .restore_fs: 456 ;457 ; When restoring the selector values for FS and GS, we'll temporarily trash458 ; the base address (at least the high 32-bit bits, but quite possibly the459 ; whole base address), the wrmsr will restore it correctly. (VT-x actually460 ; restores the base correctly when leaving guest mode, but not the selector461 ; value, so there is little problem with interrupts being enabled prior to462 ; this restore job.)463 ; We'll disable ints once for both FS and GS as that's probably faster.464 ;465 testedi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS466 jz.restore_success467 pushfq468 cli ; (see above)469 470 testedi, VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE471 jz.restore_fs_using_wrmsr456 ; 457 ; When restoring the selector values for FS and GS, we'll temporarily trash 458 ; the base address (at least the high 32-bit bits, but quite possibly the 459 ; whole base address), the wrmsr will restore it correctly. (VT-x actually 460 ; restores the base correctly when leaving guest mode, but not the selector 461 ; value, so there is little problem with interrupts being enabled prior to 462 ; this restore job.) 463 ; We'll disable ints once for both FS and GS as that's probably faster. 464 ; 465 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS 466 jz .restore_success 467 pushfq 468 cli ; (see above) 469 470 test edi, VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE 471 jz .restore_fs_using_wrmsr 472 472 473 473 .restore_fs_using_wrfsbase: 474 testedi, VMX_RESTORE_HOST_SEL_FS475 jz.restore_gs_using_wrgsbase476 movrax, qword [rsi + VMXRESTOREHOST.uHostFSBase]477 movcx, word [rsi + VMXRESTOREHOST.uHostSelFS]478 movfs, ecx479 wrfsbaserax474 test edi, VMX_RESTORE_HOST_SEL_FS 475 jz .restore_gs_using_wrgsbase 476 mov rax, qword [rsi + VMXRESTOREHOST.uHostFSBase] 477 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS] 478 mov fs, ecx 479 wrfsbase rax 480 480 481 481 .restore_gs_using_wrgsbase: 482 testedi, VMX_RESTORE_HOST_SEL_GS483 jz.restore_flags484 movrax, qword [rsi + VMXRESTOREHOST.uHostGSBase]485 movcx, word [rsi + VMXRESTOREHOST.uHostSelGS]486 movgs, ecx487 wrgsbaserax482 test edi, VMX_RESTORE_HOST_SEL_GS 483 jz .restore_flags 484 mov rax, qword [rsi + VMXRESTOREHOST.uHostGSBase] 485 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS] 486 mov gs, ecx 487 wrgsbase rax 488 488 489 489 .restore_flags: 490 popfq490 popfq 491 491 492 492 .restore_success: 493 moveax, VINF_SUCCESS493 mov eax, VINF_SUCCESS 494 494 %ifndef ASM_CALL64_GCC 495 ; Restore RDI and RSI on MSC.496 movrdi, r10497 movrsi, r11495 ; Restore RDI and RSI on MSC. 496 mov rdi, r10 497 mov rsi, r11 498 498 %endif 499 ret499 ret 500 500 501 501 ALIGNCODE(8) 502 502 .gdt_readonly_or_need_writable: 503 testedi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE504 jnz.gdt_readonly_need_writable503 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE 504 jnz .gdt_readonly_need_writable 505 505 .gdt_readonly: 506 movrcx, cr0507 movr9, rcx508 addrax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.509 andrcx, ~X86_CR0_WP510 movcr0, rcx511 anddword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)512 ltrdx513 movcr0, r9514 jmp.restore_fs506 mov rcx, cr0 507 mov r9, rcx 508 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt. 509 and rcx, ~X86_CR0_WP 510 mov cr0, rcx 511 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 512 ltr dx 513 mov cr0, r9 514 jmp .restore_fs 515 515 516 516 ALIGNCODE(8) 517 517 .gdt_readonly_need_writable: 518 addrax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw519 anddword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)520 lgdt[rsi + VMXRESTOREHOST.HostGdtrRw]521 ltrdx522 lgdt[rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT523 jmp.restore_fs518 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw 519 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 520 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw] 521 ltr dx 522 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT 523 jmp .restore_fs 524 524 525 525 ALIGNCODE(8) 526 526 .restore_fs_using_wrmsr: 527 testedi, VMX_RESTORE_HOST_SEL_FS528 jz.restore_gs_using_wrmsr529 moveax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo530 movedx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi531 movcx, word [rsi + VMXRESTOREHOST.uHostSelFS]532 movfs, ecx533 movecx, MSR_K8_FS_BASE534 wrmsr527 test edi, VMX_RESTORE_HOST_SEL_FS 528 jz .restore_gs_using_wrmsr 529 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo 530 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi 531 mov cx, word [rsi + VMXRESTOREHOST.uHostSelFS] 532 mov fs, ecx 533 mov ecx, MSR_K8_FS_BASE 534 wrmsr 535 535 536 536 .restore_gs_using_wrmsr: 537 testedi, VMX_RESTORE_HOST_SEL_GS538 jz.restore_flags539 moveax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo540 movedx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi541 movcx, word [rsi + VMXRESTOREHOST.uHostSelGS]542 movgs, ecx543 movecx, MSR_K8_GS_BASE544 wrmsr545 jmp.restore_flags537 test edi, VMX_RESTORE_HOST_SEL_GS 538 jz .restore_flags 539 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo 540 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi 541 mov cx, word [rsi + VMXRESTOREHOST.uHostSelGS] 542 mov gs, ecx 543 mov ecx, MSR_K8_GS_BASE 544 wrmsr 545 jmp .restore_flags 546 546 ENDPROC VMXRestoreHostState 547 547 … … 552 552 ALIGNCODE(16) 553 553 BEGINPROC VMXDispatchHostNmi 554 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".555 SEH64_END_PROLOGUE556 int 2557 ret554 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts". 555 SEH64_END_PROLOGUE 556 int 2 557 ret 558 558 ENDPROC VMXDispatchHostNmi 559 559 … … 745 745 ; 746 746 %macro RESTORE_STATE_VM64 0 747 ; Restore base and limit of the IDTR & GDTR.747 ; Restore base and limit of the IDTR & GDTR. 748 748 %ifndef VMX_SKIP_IDTR 749 lidt [xSP]750 add xSP, xCB * 2749 lidt [xSP] 750 add xSP, xCB * 2 751 751 %endif 752 752 %ifndef VMX_SKIP_GDTR 753 lgdt [xSP]754 add xSP, xCB * 2755 %endif 756 757 ; Save the guest state.758 push xDI753 lgdt [xSP] 754 add xSP, xCB * 2 755 %endif 756 757 ; Save the guest state. 758 push xDI 759 759 %ifndef VMX_SKIP_TR 760 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)760 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR) 761 761 %else 762 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)763 %endif 764 765 mov qword [xDI + CPUMCTX.eax], rax766 mov rax, SPECTRE_FILLER767 mov qword [xDI + CPUMCTX.ebx], rbx768 mov rbx, rax769 mov qword [xDI + CPUMCTX.ecx], rcx770 mov rcx, rax771 mov qword [xDI + CPUMCTX.edx], rdx772 mov rdx, rax773 mov qword [xDI + CPUMCTX.esi], rsi774 mov rsi, rax775 mov qword [xDI + CPUMCTX.ebp], rbp776 mov rbp, rax777 mov qword [xDI + CPUMCTX.r8], r8778 mov r8, rax779 mov qword [xDI + CPUMCTX.r9], r9780 mov r9, rax781 mov qword [xDI + CPUMCTX.r10], r10782 mov r10, rax783 mov qword [xDI + CPUMCTX.r11], r11784 mov r11, rax785 mov qword [xDI + CPUMCTX.r12], r12786 mov r12, rax787 mov qword [xDI + CPUMCTX.r13], r13788 mov r13, rax789 mov qword [xDI + CPUMCTX.r14], r14790 mov r14, rax791 mov qword [xDI + CPUMCTX.r15], r15792 mov r15, rax793 mov rax, cr2794 mov qword [xDI + CPUMCTX.cr2], rax795 796 pop xAX ; The guest rdi we pushed above797 mov qword [xDI + CPUMCTX.edi], rax798 799 ; Fight spectre.800 INDIRECT_BRANCH_PREDICTION_BARRIER_CTX xDI, CPUMCTX_WSF_IBPB_EXIT762 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR) 763 %endif 764 765 mov qword [xDI + CPUMCTX.eax], rax 766 mov rax, SPECTRE_FILLER 767 mov qword [xDI + CPUMCTX.ebx], rbx 768 mov rbx, rax 769 mov qword [xDI + CPUMCTX.ecx], rcx 770 mov rcx, rax 771 mov qword [xDI + CPUMCTX.edx], rdx 772 mov rdx, rax 773 mov qword [xDI + CPUMCTX.esi], rsi 774 mov rsi, rax 775 mov qword [xDI + CPUMCTX.ebp], rbp 776 mov rbp, rax 777 mov qword [xDI + CPUMCTX.r8], r8 778 mov r8, rax 779 mov qword [xDI + CPUMCTX.r9], r9 780 mov r9, rax 781 mov qword [xDI + CPUMCTX.r10], r10 782 mov r10, rax 783 mov qword [xDI + CPUMCTX.r11], r11 784 mov r11, rax 785 mov qword [xDI + CPUMCTX.r12], r12 786 mov r12, rax 787 mov qword [xDI + CPUMCTX.r13], r13 788 mov r13, rax 789 mov qword [xDI + CPUMCTX.r14], r14 790 mov r14, rax 791 mov qword [xDI + CPUMCTX.r15], r15 792 mov r15, rax 793 mov rax, cr2 794 mov qword [xDI + CPUMCTX.cr2], rax 795 796 pop xAX ; The guest rdi we pushed above 797 mov qword [xDI + CPUMCTX.edi], rax 798 799 ; Fight spectre. 800 INDIRECT_BRANCH_PREDICTION_BARRIER_CTX xDI, CPUMCTX_WSF_IBPB_EXIT 801 801 802 802 %ifndef VMX_SKIP_TR 803 ; Restore TSS selector; must mark it as not busy before using ltr!804 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).805 ; @todo get rid of sgdt806 pop xBX ; Saved TR807 sub xSP, xCB * 2808 sgdt [xSP]809 mov xAX, xBX810 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset811 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset812 and dword [xAX + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)813 ltr bx814 add xSP, xCB * 2815 %endif 816 817 pop xAX ; Saved LDTR818 cmp eax, 0819 je %%skip_ldt_write64820 lldt ax803 ; Restore TSS selector; must mark it as not busy before using ltr! 804 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p). 805 ; @todo get rid of sgdt 806 pop xBX ; Saved TR 807 sub xSP, xCB * 2 808 sgdt [xSP] 809 mov xAX, xBX 810 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset 811 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset 812 and dword [xAX + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit) 813 ltr bx 814 add xSP, xCB * 2 815 %endif 816 817 pop xAX ; Saved LDTR 818 cmp eax, 0 819 je %%skip_ldt_write64 820 lldt ax 821 821 822 822 %%skip_ldt_write64: 823 pop xSI ; pCtx (needed in rsi by the macros below)824 825 ; Restore segment registers.826 POP_RELEVANT_SEGMENT_REGISTERS xAX, ax827 828 ; Restore the host XCR0 if necessary.829 pop xCX830 test ecx, ecx831 jnz %%xcr0_after_skip832 pop xAX833 pop xDX834 xsetbv ; ecx is already zero.823 pop xSI ; pCtx (needed in rsi by the macros below) 824 825 ; Restore segment registers. 826 POP_RELEVANT_SEGMENT_REGISTERS xAX, ax 827 828 ; Restore the host XCR0 if necessary. 829 pop xCX 830 test ecx, ecx 831 jnz %%xcr0_after_skip 832 pop xAX 833 pop xDX 834 xsetbv ; ecx is already zero. 835 835 %%xcr0_after_skip: 836 836 837 ; Restore general purpose registers.838 POP_CALLEE_PRESERVED_REGISTERS837 ; Restore general purpose registers. 838 POP_CALLEE_PRESERVED_REGISTERS 839 839 %endmacro 840 840 … … 850 850 ALIGNCODE(64) 851 851 BEGINPROC hmR0VMXStartVM 852 push xBP853 mov xBP, xSP854 855 pushf856 cli857 858 ; Save all general purpose host registers.852 push xBP 853 mov xBP, xSP 854 855 pushf 856 cli 857 858 ; Save all general purpose host registers. 859 859 %assign cbFrame 8 860 PUSH_CALLEE_PRESERVED_REGISTERS861 SEH64_END_PROLOGUE862 863 ; First we have to save some final CPU context registers.864 lea r10, [.vmlaunch64_done wrt rip]865 mov rax, VMX_VMCS_HOST_RIP ; return address (too difficult to continue after VMLAUNCH?)866 vmwrite rax, r10867 ; Note: ASSUMES success!868 869 ;870 ; Unify the input parameter registers: rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx;871 ;860 PUSH_CALLEE_PRESERVED_REGISTERS 861 SEH64_END_PROLOGUE 862 863 ; First we have to save some final CPU context registers. 864 lea r10, [.vmlaunch64_done wrt rip] 865 mov rax, VMX_VMCS_HOST_RIP ; return address (too difficult to continue after VMLAUNCH?) 866 vmwrite rax, r10 867 ; Note: ASSUMES success! 868 869 ; 870 ; Unify the input parameter registers: rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx; 871 ; 872 872 %ifdef ASM_CALL64_GCC 873 mov ebx, edx ; fResume873 mov ebx, edx ; fResume 874 874 %else 875 mov rsi, rdx ; pVCpu876 mov ebx, r8d ; fResume875 mov rsi, rdx ; pVCpu 876 mov ebx, r8d ; fResume 877 877 %endif 878 lea rdi, [rsi + VMCPU.cpum.GstCtx]879 880 ;881 ; Save the host XCR0 and load the guest one if necessary.882 ; Note! Trashes rdx and rcx.883 ;884 test byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1885 jz .xcr0_before_skip886 887 xor ecx, ecx888 xgetbv ; save the host one on the stack889 push xDX890 push xAX891 892 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one893 mov edx, [rdi + CPUMCTX.aXcr + 4]894 xor ecx, ecx ; paranoia895 xsetbv896 897 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)898 jmp .xcr0_before_done878 lea rdi, [rsi + VMCPU.cpum.GstCtx] 879 880 ; 881 ; Save the host XCR0 and load the guest one if necessary. 882 ; Note! Trashes rdx and rcx. 883 ; 884 test byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 885 jz .xcr0_before_skip 886 887 xor ecx, ecx 888 xgetbv ; save the host one on the stack 889 push xDX 890 push xAX 891 892 mov eax, [rdi + CPUMCTX.aXcr] ; load the guest one 893 mov edx, [rdi + CPUMCTX.aXcr + 4] 894 xor ecx, ecx ; paranoia 895 xsetbv 896 897 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0) 898 jmp .xcr0_before_done 899 899 900 900 .xcr0_before_skip: 901 push 3fh ; indicate that we need not901 push 3fh ; indicate that we need not 902 902 .xcr0_before_done: 903 903 904 ;905 ; Save segment registers.906 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).907 ;908 PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax909 910 ; Save the pCtx pointer.911 push rdi912 913 ; Save host LDTR.914 xor eax, eax915 sldt ax916 push xAX904 ; 905 ; Save segment registers. 906 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case). 907 ; 908 PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax 909 910 ; Save the pCtx pointer. 911 push rdi 912 913 ; Save host LDTR. 914 xor eax, eax 915 sldt ax 916 push xAX 917 917 918 918 %ifndef VMX_SKIP_TR 919 ; The host TR limit is reset to 0x67; save & restore it manually.920 str eax921 push xAX919 ; The host TR limit is reset to 0x67; save & restore it manually. 920 str eax 921 push xAX 922 922 %endif 923 923 924 924 %ifndef VMX_SKIP_GDTR 925 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!926 sub xSP, xCB * 2927 sgdt [xSP]925 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 926 sub xSP, xCB * 2 927 sgdt [xSP] 928 928 %endif 929 929 %ifndef VMX_SKIP_IDTR 930 sub xSP, xCB * 2931 sidt [xSP]930 sub xSP, xCB * 2 931 sidt [xSP] 932 932 %endif 933 933 934 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).935 mov rcx, qword [rdi + CPUMCTX.cr2]936 mov rdx, cr2937 cmp rcx, rdx938 je .skip_cr2_write939 mov cr2, rcx934 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 935 mov rcx, qword [rdi + CPUMCTX.cr2] 936 mov rdx, cr2 937 cmp rcx, rdx 938 je .skip_cr2_write 939 mov cr2, rcx 940 940 941 941 .skip_cr2_write: 942 mov eax, VMX_VMCS_HOST_RSP943 vmwrite xAX, xSP944 ; Note: ASSUMES success!945 ; Don't mess with ESP anymore!!!946 947 ; Fight spectre and similar.948 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER rdi, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY949 950 ; Resume or start VM?951 cmp bl, 0 ; fResume952 953 ; Load guest general purpose registers.954 mov rax, qword [rdi + CPUMCTX.eax]955 mov rbx, qword [rdi + CPUMCTX.ebx]956 mov rcx, qword [rdi + CPUMCTX.ecx]957 mov rdx, qword [rdi + CPUMCTX.edx]958 mov rbp, qword [rdi + CPUMCTX.ebp]959 mov rsi, qword [rdi + CPUMCTX.esi]960 mov r8, qword [rdi + CPUMCTX.r8]961 mov r9, qword [rdi + CPUMCTX.r9]962 mov r10, qword [rdi + CPUMCTX.r10]963 mov r11, qword [rdi + CPUMCTX.r11]964 mov r12, qword [rdi + CPUMCTX.r12]965 mov r13, qword [rdi + CPUMCTX.r13]966 mov r14, qword [rdi + CPUMCTX.r14]967 mov r15, qword [rdi + CPUMCTX.r15]968 mov rdi, qword [rdi + CPUMCTX.edi]969 970 je .vmlaunch64_launch971 972 vmresume973 jc near .vmxstart64_invalid_vmcs_ptr974 jz near .vmxstart64_start_failed975 jmp .vmlaunch64_done ; here if vmresume detected a failure942 mov eax, VMX_VMCS_HOST_RSP 943 vmwrite xAX, xSP 944 ; Note: ASSUMES success! 945 ; Don't mess with ESP anymore!!! 946 947 ; Fight spectre and similar. 948 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER rdi, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY 949 950 ; Resume or start VM? 951 cmp bl, 0 ; fResume 952 953 ; Load guest general purpose registers. 954 mov rax, qword [rdi + CPUMCTX.eax] 955 mov rbx, qword [rdi + CPUMCTX.ebx] 956 mov rcx, qword [rdi + CPUMCTX.ecx] 957 mov rdx, qword [rdi + CPUMCTX.edx] 958 mov rbp, qword [rdi + CPUMCTX.ebp] 959 mov rsi, qword [rdi + CPUMCTX.esi] 960 mov r8, qword [rdi + CPUMCTX.r8] 961 mov r9, qword [rdi + CPUMCTX.r9] 962 mov r10, qword [rdi + CPUMCTX.r10] 963 mov r11, qword [rdi + CPUMCTX.r11] 964 mov r12, qword [rdi + CPUMCTX.r12] 965 mov r13, qword [rdi + CPUMCTX.r13] 966 mov r14, qword [rdi + CPUMCTX.r14] 967 mov r15, qword [rdi + CPUMCTX.r15] 968 mov rdi, qword [rdi + CPUMCTX.edi] 969 970 je .vmlaunch64_launch 971 972 vmresume 973 jc near .vmxstart64_invalid_vmcs_ptr 974 jz near .vmxstart64_start_failed 975 jmp .vmlaunch64_done ; here if vmresume detected a failure 976 976 977 977 .vmlaunch64_launch: 978 vmlaunch979 jc near .vmxstart64_invalid_vmcs_ptr980 jz near .vmxstart64_start_failed981 jmp .vmlaunch64_done ; here if vmlaunch detected a failure978 vmlaunch 979 jc near .vmxstart64_invalid_vmcs_ptr 980 jz near .vmxstart64_start_failed 981 jmp .vmlaunch64_done ; here if vmlaunch detected a failure 982 982 983 983 ALIGNCODE(64) 984 984 .vmlaunch64_done: 985 RESTORE_STATE_VM64986 mov eax, VINF_SUCCESS985 RESTORE_STATE_VM64 986 mov eax, VINF_SUCCESS 987 987 988 988 .vmstart64_end: 989 popf990 pop xBP991 ret989 popf 990 pop xBP 991 ret 992 992 993 993 .vmxstart64_invalid_vmcs_ptr: 994 RESTORE_STATE_VM64995 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM996 jmp .vmstart64_end994 RESTORE_STATE_VM64 995 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM 996 jmp .vmstart64_end 997 997 998 998 .vmxstart64_start_failed: 999 RESTORE_STATE_VM641000 mov eax, VERR_VMX_UNABLE_TO_START_VM1001 jmp .vmstart64_end999 RESTORE_STATE_VM64 1000 mov eax, VERR_VMX_UNABLE_TO_START_VM 1001 jmp .vmstart64_end 1002 1002 ENDPROC hmR0VMXStartVM 1003 1003
Note:
See TracChangeset
for help on using the changeset viewer.