Changeset 14802 in vbox for trunk/src/VBox
- Timestamp:
- Nov 29, 2008 1:37:55 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 1 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm
r14799 r14802 45 45 %endif 46 46 47 ;******************************************************************************* 48 ;* Defined Constants And Macros * 49 ;******************************************************************************* 50 %ifdef RT_ARCH_AMD64 51 %define MAYBE_64_BIT 52 %endif 53 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 54 %define MAYBE_64_BIT 55 %endif 56 57 47 58 ;; This is too risky wrt. stability, performance and correctness. 48 59 ;%define VBOX_WITH_DR6_EXPERIMENT 1 … … 64 75 ; @param 2 16-bit regsiter name for \a 1. 65 76 66 %ifdef RT_ARCH_AMD6477 %ifdef MAYBE_64_BIT 67 78 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx) 68 79 %macro LOADGUESTMSR 2 … … 95 106 wrmsr 96 107 %endmacro 97 98 %ifdef ASM_CALL64_GCC 99 %macro MYPUSHAD 0 100 push r15 101 push r14 102 push r13 103 push r12 104 push rbx 105 %endmacro 106 %macro MYPOPAD 0 107 pop rbx 108 pop r12 109 pop r13 110 pop r14 111 pop r15 112 %endmacro 113 114 %else ; ASM_CALL64_MSC 115 %macro MYPUSHAD 0 116 push r15 117 push r14 118 push r13 119 push r12 120 push rbx 121 push rsi 122 push rdi 123 %endmacro 124 %macro MYPOPAD 0 125 pop rdi 126 pop rsi 127 pop rbx 128 pop r12 129 pop r13 130 pop r14 131 pop r15 132 %endmacro 133 %endif 108 %endif 109 110 %ifdef ASM_CALL64_GCC 111 %macro MYPUSHAD64 0 112 push r15 113 push r14 114 push r13 115 push r12 116 push rbx 117 %endmacro 118 %macro MYPOPAD64 0 119 pop rbx 120 pop r12 121 pop r13 122 pop r14 123 pop r15 124 %endmacro 125 126 %else ; ASM_CALL64_MSC 127 %macro MYPUSHAD64 0 128 push r15 129 push r14 130 push r13 131 push r12 132 push rbx 133 push rsi 134 push rdi 135 %endmacro 136 %macro MYPOPAD64 0 137 pop rdi 138 pop rsi 139 pop rbx 140 pop r12 141 pop r13 142 pop r14 143 pop r15 144 %endmacro 145 %endif 134 146 135 147 ; trashes, rax, rdx & rcx 136 %macro MYPUSHSEGS2137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 148 %macro MYPUSHSEGS64 2 149 mov %2, es 150 push %1 151 mov %2, ds 152 push %1 153 154 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it. 155 mov ecx, MSR_K8_FS_BASE 156 rdmsr 157 push rdx 158 push rax 159 push fs 160 161 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit 162 mov ecx, MSR_K8_GS_BASE 163 rdmsr 164 push rdx 165 push rax 166 push gs 167 %endmacro 156 168 157 169 ; trashes, rax, rdx & rcx 158 %macro MYPOPSEGS 2 159 ; Note: do not step through this code with a debugger! 160 pop gs 161 pop rax 162 pop rdx 163 mov ecx, MSR_K8_GS_BASE 164 wrmsr 165 166 pop fs 167 pop rax 168 pop rdx 169 mov ecx, MSR_K8_FS_BASE 170 wrmsr 171 ; Now it's safe to step again 172 173 pop %1 174 mov ds, %2 175 pop %1 176 mov es, %2 177 %endmacro 178 179 %else ; RT_ARCH_X86 180 %macro MYPUSHAD 0 181 pushad 182 %endmacro 183 %macro MYPOPAD 0 184 popad 185 %endmacro 186 187 %macro MYPUSHSEGS 2 188 push ds 189 push es 190 push fs 191 push gs 192 %endmacro 193 %macro MYPOPSEGS 2 194 pop gs 195 pop fs 196 pop es 197 pop ds 198 %endmacro 199 %endif 170 %macro MYPOPSEGS64 2 171 ; Note: do not step through this code with a debugger! 172 pop gs 173 pop rax 174 pop rdx 175 mov ecx, MSR_K8_GS_BASE 176 wrmsr 177 178 pop fs 179 pop rax 180 pop rdx 181 mov ecx, MSR_K8_FS_BASE 182 wrmsr 183 ; Now it's safe to step again 184 185 pop %1 186 mov ds, %2 187 pop %1 188 mov es, %2 189 %endmacro 190 191 %macro MYPUSHAD32 0 192 pushad 193 %endmacro 194 %macro MYPOPAD32 0 195 popad 196 %endmacro 197 198 %macro MYPUSHSEGS32 2 199 push ds 200 push es 201 push fs 202 push gs 203 %endmacro 204 %macro MYPOPSEGS32 2 205 pop gs 206 pop fs 207 pop es 208 pop ds 209 %endmacro 200 210 201 211 … … 220 230 ; needing to clobber a register. (This trick doesn't quite work for PE btw. 221 231 ; but that's not relevant atm.) 222 g_fIs64bit: 232 GLOBALNAME g_fVMXIs64bitHost 223 233 dd NAME(SUPR0AbsIs64bit) 224 234 %endif … … 226 236 227 237 BEGINCODE 228 229 ;/**230 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)231 ; *232 ; * @returns VBox status code233 ; * @param fResume vmlauch/vmresume234 ; * @param pCtx Guest context235 ; */236 BEGINPROC VMXR0StartVM32237 push xBP238 mov xBP, xSP239 240 pushf241 cli242 243 ;/* First we have to save some final CPU context registers. */244 %ifdef RT_ARCH_AMD64245 mov rax, qword .vmlaunch_done246 push rax247 %else248 push .vmlaunch_done249 %endif250 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */251 vmwrite xAX, [xSP]252 ;/* Note: assumes success... */253 add xSP, xS254 255 ;/* Manual save and restore:256 ; * - General purpose registers except RIP, RSP257 ; *258 ; * Trashed:259 ; * - CR2 (we don't care)260 ; * - LDTR (reset to 0)261 ; * - DRx (presumably not changed at all)262 ; * - DR7 (reset to 0x400)263 ; * - EFLAGS (reset to RT_BIT(1); not relevant)264 ; *265 ; */266 267 ;/* Save all general purpose host registers. */268 MYPUSHAD269 270 ;/* Save the Guest CPU context pointer. */271 %ifdef RT_ARCH_AMD64272 %ifdef ASM_CALL64_GCC273 ; fResume already in rdi274 ; pCtx already in rsi275 %else276 mov rdi, rcx ; fResume277 mov rsi, rdx ; pCtx278 %endif279 %else280 mov edi, [ebp + 8] ; fResume281 mov esi, [ebp + 12] ; pCtx282 %endif283 284 ;/* Save segment registers */285 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)286 MYPUSHSEGS xAX, ax287 288 ; Save the pCtx pointer289 push xSI290 291 ; Save LDTR292 xor eax, eax293 sldt ax294 push xAX295 296 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!297 sub xSP, xS*2298 sgdt [xSP]299 300 sub xSP, xS*2301 sidt [xSP]302 303 %ifdef VBOX_WITH_DR6_EXPERIMENT304 ; Restore DR6 - experiment, not safe!305 mov xBX, [xSI + CPUMCTX.dr6]306 mov dr6, xBX307 %endif308 309 ; Restore CR2310 mov ebx, [xSI + CPUMCTX.cr2]311 mov cr2, xBX312 313 mov eax, VMX_VMCS_HOST_RSP314 vmwrite xAX, xSP315 ;/* Note: assumes success... */316 ;/* Don't mess with ESP anymore!! */317 318 ;/* Restore Guest's general purpose registers. */319 mov eax, [xSI + CPUMCTX.eax]320 mov ebx, [xSI + CPUMCTX.ebx]321 mov ecx, [xSI + CPUMCTX.ecx]322 mov edx, [xSI + CPUMCTX.edx]323 mov ebp, [xSI + CPUMCTX.ebp]324 325 ; resume or start?326 cmp xDI, 0 ; fResume327 je .vmlauch_lauch328 329 ;/* Restore edi & esi. */330 mov edi, [xSI + CPUMCTX.edi]331 mov esi, [xSI + CPUMCTX.esi]332 333 vmresume334 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */335 336 .vmlauch_lauch:337 ;/* Restore edi & esi. */338 mov edi, [xSI + CPUMCTX.edi]339 mov esi, [xSI + CPUMCTX.esi]340 341 vmlaunch342 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */343 344 ALIGNCODE(16)345 .vmlaunch_done:346 jc near .vmxstart_invalid_vmxon_ptr347 jz near .vmxstart_start_failed348 349 ; Restore base and limit of the IDTR & GDTR350 lidt [xSP]351 add xSP, xS*2352 lgdt [xSP]353 add xSP, xS*2354 355 push xDI356 mov xDI, [xSP + xS * 2] ; pCtx357 358 mov [ss:xDI + CPUMCTX.eax], eax359 mov [ss:xDI + CPUMCTX.ebx], ebx360 mov [ss:xDI + CPUMCTX.ecx], ecx361 mov [ss:xDI + CPUMCTX.edx], edx362 mov [ss:xDI + CPUMCTX.esi], esi363 mov [ss:xDI + CPUMCTX.ebp], ebp364 %ifdef RT_ARCH_AMD64365 pop xAX ; the guest edi we pushed above366 mov dword [ss:xDI + CPUMCTX.edi], eax367 %else368 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above369 %endif370 371 %ifdef VBOX_WITH_DR6_EXPERIMENT372 ; Save DR6 - experiment, not safe!373 mov xAX, dr6374 mov [ss:xDI + CPUMCTX.dr6], xAX375 %endif376 377 pop xAX ; saved LDTR378 lldt ax379 380 add xSP, xS ; pCtx381 382 ; Restore segment registers383 MYPOPSEGS xAX, ax384 385 ; Restore general purpose registers386 MYPOPAD387 388 mov eax, VINF_SUCCESS389 390 .vmstart_end:391 popf392 pop xBP393 ret394 395 396 .vmxstart_invalid_vmxon_ptr:397 ; Restore base and limit of the IDTR & GDTR398 lidt [xSP]399 add xSP, xS*2400 lgdt [xSP]401 add xSP, xS*2402 403 pop xAX ; saved LDTR404 lldt ax405 406 add xSP, xS ; pCtx407 408 ; Restore segment registers409 MYPOPSEGS xAX, ax410 411 ; Restore all general purpose host registers.412 MYPOPAD413 mov eax, VERR_VMX_INVALID_VMXON_PTR414 jmp .vmstart_end415 416 .vmxstart_start_failed:417 ; Restore base and limit of the IDTR & GDTR418 lidt [xSP]419 add xSP, xS*2420 lgdt [xSP]421 add xSP, xS*2422 423 pop xAX ; saved LDTR424 lldt ax425 426 add xSP, xS ; pCtx427 428 ; Restore segment registers429 MYPOPSEGS xAX, ax430 431 ; Restore all general purpose host registers.432 MYPOPAD433 mov eax, VERR_VMX_UNABLE_TO_START_VM434 jmp .vmstart_end435 436 ENDPROC VMXR0StartVM32437 438 %ifdef RT_ARCH_AMD64439 ;/**440 ; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)441 ; *442 ; * @returns VBox status code443 ; * @param fResume vmlauch/vmresume444 ; * @param pCtx Guest context445 ; */446 BEGINPROC VMXR0StartVM64447 push xBP448 mov xBP, xSP449 450 pushf451 cli452 453 ;/* First we have to save some final CPU context registers. */454 mov rax, qword .vmlaunch64_done455 push rax456 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */457 vmwrite rax, [xSP]458 ;/* Note: assumes success... */459 add xSP, xS460 461 ;/* Manual save and restore:462 ; * - General purpose registers except RIP, RSP463 ; *464 ; * Trashed:465 ; * - CR2 (we don't care)466 ; * - LDTR (reset to 0)467 ; * - DRx (presumably not changed at all)468 ; * - DR7 (reset to 0x400)469 ; * - EFLAGS (reset to RT_BIT(1); not relevant)470 ; *471 ; */472 473 ;/* Save all general purpose host registers. */474 MYPUSHAD475 476 ;/* Save the Guest CPU context pointer. */477 %ifdef ASM_CALL64_GCC478 ; fResume already in rdi479 ; pCtx already in rsi480 %else481 mov rdi, rcx ; fResume482 mov rsi, rdx ; pCtx483 %endif484 485 ;/* Save segment registers */486 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)487 MYPUSHSEGS xAX, ax488 489 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs490 ;; @todo use the automatic load feature for MSRs491 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR492 %if 0 ; not supported on Intel CPUs493 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR494 %endif495 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR496 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK497 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE498 499 ; Save the pCtx pointer500 push xSI501 502 ; Save LDTR503 xor eax, eax504 sldt ax505 push xAX506 507 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!508 sub xSP, xS*2509 sgdt [xSP]510 511 sub xSP, xS*2512 sidt [xSP]513 514 %ifdef VBOX_WITH_DR6_EXPERIMENT515 ; Restore DR6 - experiment, not safe!516 mov xBX, [xSI + CPUMCTX.dr6]517 mov dr6, xBX518 %endif519 520 ; Restore CR2521 mov rbx, qword [xSI + CPUMCTX.cr2]522 mov cr2, rbx523 524 mov eax, VMX_VMCS_HOST_RSP525 vmwrite xAX, xSP526 ;/* Note: assumes success... */527 ;/* Don't mess with ESP anymore!! */528 529 ;/* Restore Guest's general purpose registers. */530 mov rax, qword [xSI + CPUMCTX.eax]531 mov rbx, qword [xSI + CPUMCTX.ebx]532 mov rcx, qword [xSI + CPUMCTX.ecx]533 mov rdx, qword [xSI + CPUMCTX.edx]534 mov rbp, qword [xSI + CPUMCTX.ebp]535 mov r8, qword [xSI + CPUMCTX.r8]536 mov r9, qword [xSI + CPUMCTX.r9]537 mov r10, qword [xSI + CPUMCTX.r10]538 mov r11, qword [xSI + CPUMCTX.r11]539 mov r12, qword [xSI + CPUMCTX.r12]540 mov r13, qword [xSI + CPUMCTX.r13]541 mov r14, qword [xSI + CPUMCTX.r14]542 mov r15, qword [xSI + CPUMCTX.r15]543 544 ; resume or start?545 cmp xDI, 0 ; fResume546 je .vmlauch64_lauch547 548 ;/* Restore edi & esi. */549 mov rdi, qword [xSI + CPUMCTX.edi]550 mov rsi, qword [xSI + CPUMCTX.esi]551 552 vmresume553 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */554 555 .vmlauch64_lauch:556 ;/* Restore rdi & rsi. */557 mov rdi, qword [xSI + CPUMCTX.edi]558 mov rsi, qword [xSI + CPUMCTX.esi]559 560 vmlaunch561 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */562 563 ALIGNCODE(16)564 .vmlaunch64_done:565 jc near .vmxstart64_invalid_vmxon_ptr566 jz near .vmxstart64_start_failed567 568 ; Restore base and limit of the IDTR & GDTR569 lidt [xSP]570 add xSP, xS*2571 lgdt [xSP]572 add xSP, xS*2573 574 push xDI575 mov xDI, [xSP + xS * 2] ; pCtx576 577 mov qword [xDI + CPUMCTX.eax], rax578 mov qword [xDI + CPUMCTX.ebx], rbx579 mov qword [xDI + CPUMCTX.ecx], rcx580 mov qword [xDI + CPUMCTX.edx], rdx581 mov qword [xDI + CPUMCTX.esi], rsi582 mov qword [xDI + CPUMCTX.ebp], rbp583 mov qword [xDI + CPUMCTX.r8], r8584 mov qword [xDI + CPUMCTX.r9], r9585 mov qword [xDI + CPUMCTX.r10], r10586 mov qword [xDI + CPUMCTX.r11], r11587 mov qword [xDI + CPUMCTX.r12], r12588 mov qword [xDI + CPUMCTX.r13], r13589 mov qword [xDI + CPUMCTX.r14], r14590 mov qword [xDI + CPUMCTX.r15], r15591 592 pop xAX ; the guest edi we pushed above593 mov qword [xDI + CPUMCTX.edi], rax594 595 %ifdef VBOX_WITH_DR6_EXPERIMENT596 ; Save DR6 - experiment, not safe!597 mov xAX, dr6598 mov [xDI + CPUMCTX.dr6], xAX599 %endif600 601 pop xAX ; saved LDTR602 lldt ax603 604 pop xSI ; pCtx (needed in rsi by the macros below)605 606 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs607 ;; @todo use the automatic load feature for MSRs608 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE609 LOADHOSTMSR MSR_K8_SF_MASK610 LOADHOSTMSR MSR_K6_STAR611 %if 0 ; not supported on Intel CPUs612 LOADHOSTMSR MSR_K8_CSTAR613 %endif614 LOADHOSTMSR MSR_K8_LSTAR615 616 ; Restore segment registers617 MYPOPSEGS xAX, ax618 619 ; Restore general purpose registers620 MYPOPAD621 622 mov eax, VINF_SUCCESS623 624 .vmstart64_end:625 popf626 pop xBP627 ret628 629 630 .vmxstart64_invalid_vmxon_ptr:631 ; Restore base and limit of the IDTR & GDTR632 lidt [xSP]633 add xSP, xS*2634 lgdt [xSP]635 add xSP, xS*2636 637 pop xAX ; saved LDTR638 lldt ax639 640 pop xSI ; pCtx (needed in rsi by the macros below)641 642 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs643 ;; @todo use the automatic load feature for MSRs644 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE645 LOADHOSTMSR MSR_K8_SF_MASK646 LOADHOSTMSR MSR_K6_STAR647 %if 0 ; not supported on Intel CPUs648 LOADHOSTMSR MSR_K8_CSTAR649 %endif650 LOADHOSTMSR MSR_K8_LSTAR651 652 ; Restore segment registers653 MYPOPSEGS xAX, ax654 655 ; Restore all general purpose host registers.656 MYPOPAD657 mov eax, VERR_VMX_INVALID_VMXON_PTR658 jmp .vmstart64_end659 660 .vmxstart64_start_failed:661 ; Restore base and limit of the IDTR & GDTR662 lidt [xSP]663 add xSP, xS*2664 lgdt [xSP]665 add xSP, xS*2666 667 pop xAX ; saved LDTR668 lldt ax669 670 pop xSI ; pCtx (needed in rsi by the macros below)671 672 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs673 ;; @todo use the automatic load feature for MSRs674 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE675 LOADHOSTMSR MSR_K8_SF_MASK676 LOADHOSTMSR MSR_K6_STAR677 %if 0 ; not supported on Intel CPUs678 LOADHOSTMSR MSR_K8_CSTAR679 %endif680 LOADHOSTMSR MSR_K8_LSTAR681 682 ; Restore segment registers683 MYPOPSEGS xAX, ax684 685 ; Restore all general purpose host registers.686 MYPOPAD687 mov eax, VERR_VMX_UNABLE_TO_START_VM688 jmp .vmstart64_end689 ENDPROC VMXR0StartVM64690 %endif ; RT_ARCH_AMD64691 238 692 239 … … 713 260 lea edx, [esp + 8] ; &u64Data 714 261 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 715 cmp byte [ g_fIs64bit], 0262 cmp byte [NAME(g_fVMXIs64bitHost)], 0 716 263 jne .longmode 717 264 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 783 330 mov edx, [esp + 8] ; pData 784 331 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 785 cmp byte [ g_fIs64bit], 0332 cmp byte [NAME(g_fVMXIs64bitHost)], 0 786 333 jne .longmode 787 334 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 855 402 mov edx, [esp + 8] ; pu32Data 856 403 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 857 cmp byte [ g_fIs64bit], 0404 cmp byte [NAME(g_fVMXIs64bitHost)], 0 858 405 jne .longmode 859 406 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 923 470 mov edx, [esp + 8] ; u32Data 924 471 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 925 cmp byte [ g_fIs64bit], 0472 cmp byte [NAME(g_fVMXIs64bitHost)], 0 926 473 jne .longmode 927 474 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 983 530 %else ; RT_ARCH_X86 984 531 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 985 cmp byte [ g_fIs64bit], 0532 cmp byte [NAME(g_fVMXIs64bitHost)], 0 986 533 jne .longmode 987 534 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1036 583 BEGINPROC VMXDisable 1037 584 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 1038 cmp byte [ g_fIs64bit], 0585 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1039 586 jne .longmode 1040 587 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1080 627 %else ; RT_ARCH_X86 1081 628 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 1082 cmp byte [ g_fIs64bit], 0629 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1083 630 jne .longmode 1084 631 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1137 684 %else 1138 685 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 1139 cmp byte [ g_fIs64bit], 0686 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1140 687 jne .longmode 1141 688 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1196 743 %else 1197 744 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 1198 cmp byte [ g_fIs64bit], 0745 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1199 746 jne .longmode 1200 747 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1248 795 %else 1249 796 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 1250 cmp byte [ g_fIs64bit], 0797 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1251 798 jne .longmode 1252 799 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1316 863 %else 1317 864 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 1318 cmp byte [ g_fIs64bit], 0865 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1319 866 jne .longmode 1320 867 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL … … 1361 908 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL 1362 909 ENDPROC VMXR0InvVPID 1363 1364 1365 ;/**1366 ; * Prepares for and executes VMRUN (32 bits guests)1367 ; *1368 ; * @returns VBox status code1369 ; * @param HCPhysVMCB Physical address of host VMCB1370 ; * @param HCPhysVMCB Physical address of guest VMCB1371 ; * @param pCtx Guest context1372 ; */1373 BEGINPROC SVMR0VMRun1374 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame1375 %ifdef ASM_CALL64_GCC1376 push rdx1377 push rsi1378 push rdi1379 %else1380 push r81381 push rdx1382 push rcx1383 %endif1384 push 01385 %endif1386 push xBP1387 mov xBP, xSP1388 pushf1389 1390 ;/* Manual save and restore:1391 ; * - General purpose registers except RIP, RSP, RAX1392 ; *1393 ; * Trashed:1394 ; * - CR2 (we don't care)1395 ; * - LDTR (reset to 0)1396 ; * - DRx (presumably not changed at all)1397 ; * - DR7 (reset to 0x400)1398 ; */1399 1400 ;/* Save all general purpose host registers. */1401 MYPUSHAD1402 1403 ;/* Save the Guest CPU context pointer. */1404 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx1405 push xSI ; push for saving the state at the end1406 1407 ; Restore CR21408 mov ebx, [xSI + CPUMCTX.cr2]1409 mov cr2, xBX1410 1411 ; save host fs, gs, sysenter msr etc1412 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)1413 push xAX ; save for the vmload after vmrun1414 vmsave1415 1416 ; setup eax for VMLOAD1417 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)1418 1419 ;/* Restore Guest's general purpose registers. */1420 ;/* EAX is loaded from the VMCB by VMRUN */1421 mov ebx, [xSI + CPUMCTX.ebx]1422 mov ecx, [xSI + CPUMCTX.ecx]1423 mov edx, [xSI + CPUMCTX.edx]1424 mov edi, [xSI + CPUMCTX.edi]1425 mov ebp, [xSI + CPUMCTX.ebp]1426 mov esi, [xSI + CPUMCTX.esi]1427 1428 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch1429 clgi1430 sti1431 1432 ; load guest fs, gs, sysenter msr etc1433 vmload1434 ; run the VM1435 vmrun1436 1437 ;/* EAX is in the VMCB already; we can use it here. */1438 1439 ; save guest fs, gs, sysenter msr etc1440 vmsave1441 1442 ; load host fs, gs, sysenter msr etc1443 pop xAX ; pushed above1444 vmload1445 1446 ; Set the global interrupt flag again, but execute cli to make sure IF=0.1447 cli1448 stgi1449 1450 pop xAX ; pCtx1451 1452 mov [ss:xAX + CPUMCTX.ebx], ebx1453 mov [ss:xAX + CPUMCTX.ecx], ecx1454 mov [ss:xAX + CPUMCTX.edx], edx1455 mov [ss:xAX + CPUMCTX.esi], esi1456 mov [ss:xAX + CPUMCTX.edi], edi1457 mov [ss:xAX + CPUMCTX.ebp], ebp1458 1459 ; Restore general purpose registers1460 MYPOPAD1461 1462 mov eax, VINF_SUCCESS1463 1464 popf1465 pop xBP1466 %ifdef RT_ARCH_AMD641467 add xSP, 4*xS1468 %endif1469 ret1470 ENDPROC SVMR0VMRun1471 1472 %ifdef RT_ARCH_AMD641473 ;/**1474 ; * Prepares for and executes VMRUN (64 bits guests)1475 ; *1476 ; * @returns VBox status code1477 ; * @param HCPhysVMCB Physical address of host VMCB1478 ; * @param HCPhysVMCB Physical address of guest VMCB1479 ; * @param pCtx Guest context1480 ; */1481 BEGINPROC SVMR0VMRun641482 ; fake a cdecl stack frame1483 %ifdef ASM_CALL64_GCC1484 push rdx1485 push rsi1486 push rdi1487 %else1488 push r81489 push rdx1490 push rcx1491 %endif1492 push 01493 push rbp1494 mov rbp, rsp1495 pushf1496 1497 ;/* Manual save and restore:1498 ; * - General purpose registers except RIP, RSP, RAX1499 ; *1500 ; * Trashed:1501 ; * - CR2 (we don't care)1502 ; * - LDTR (reset to 0)1503 ; * - DRx (presumably not changed at all)1504 ; * - DR7 (reset to 0x400)1505 ; */1506 1507 ;/* Save all general purpose host registers. */1508 MYPUSHAD1509 1510 ;/* Save the Guest CPU context pointer. */1511 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx1512 push rsi ; push for saving the state at the end1513 1514 ; Restore CR21515 mov rbx, [rsi + CPUMCTX.cr2]1516 mov cr2, rbx1517 1518 ; save host fs, gs, sysenter msr etc1519 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)1520 push rax ; save for the vmload after vmrun1521 vmsave1522 1523 ; setup eax for VMLOAD1524 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)1525 1526 ;/* Restore Guest's general purpose registers. */1527 ;/* RAX is loaded from the VMCB by VMRUN */1528 mov rbx, qword [xSI + CPUMCTX.ebx]1529 mov rcx, qword [xSI + CPUMCTX.ecx]1530 mov rdx, qword [xSI + CPUMCTX.edx]1531 mov rdi, qword [xSI + CPUMCTX.edi]1532 mov rbp, qword [xSI + CPUMCTX.ebp]1533 mov r8, qword [xSI + CPUMCTX.r8]1534 mov r9, qword [xSI + CPUMCTX.r9]1535 mov r10, qword [xSI + CPUMCTX.r10]1536 mov r11, qword [xSI + CPUMCTX.r11]1537 mov r12, qword [xSI + CPUMCTX.r12]1538 mov r13, qword [xSI + CPUMCTX.r13]1539 mov r14, qword [xSI + CPUMCTX.r14]1540 mov r15, qword [xSI + CPUMCTX.r15]1541 mov rsi, qword [xSI + CPUMCTX.esi]1542 1543 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch1544 clgi1545 sti1546 1547 ; load guest fs, gs, sysenter msr etc1548 vmload1549 ; run the VM1550 vmrun1551 1552 ;/* RAX is in the VMCB already; we can use it here. */1553 1554 ; save guest fs, gs, sysenter msr etc1555 vmsave1556 1557 ; load host fs, gs, sysenter msr etc1558 pop rax ; pushed above1559 vmload1560 1561 ; Set the global interrupt flag again, but execute cli to make sure IF=0.1562 cli1563 stgi1564 1565 pop rax ; pCtx1566 1567 mov qword [rax + CPUMCTX.ebx], rbx1568 mov qword [rax + CPUMCTX.ecx], rcx1569 mov qword [rax + CPUMCTX.edx], rdx1570 mov qword [rax + CPUMCTX.esi], rsi1571 mov qword [rax + CPUMCTX.edi], rdi1572 mov qword [rax + CPUMCTX.ebp], rbp1573 mov qword [rax + CPUMCTX.r8], r81574 mov qword [rax + CPUMCTX.r9], r91575 mov qword [rax + CPUMCTX.r10], r101576 mov qword [rax + CPUMCTX.r11], r111577 mov qword [rax + CPUMCTX.r12], r121578 mov qword [rax + CPUMCTX.r13], r131579 mov qword [rax + CPUMCTX.r14], r141580 mov qword [rax + CPUMCTX.r15], r151581 1582 ; Restore general purpose registers1583 MYPOPAD1584 1585 mov eax, VINF_SUCCESS1586 1587 popf1588 pop rbp1589 add rsp, 4*xS1590 ret1591 ENDPROC SVMR0VMRun641592 %endif ; RT_ARCH_AMD641593 910 1594 911 … … 1622 939 ENDPROC SVMR0InvlpgA 1623 940 1624 %else 941 %else ; GC_ARCH_BITS != 64 1625 942 ;; 1626 943 ; Executes INVLPGA … … 1653 970 %endif ; GC_ARCH_BITS != 64 1654 971 972 973 974 ; 975 ; The default setup of the StartVM routines. 976 ; 977 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 978 %define MY_NAME(name) name %+ _32 979 %else 980 %define MY_NAME(name) name 981 %endif 982 %ifdef RT_ARCH_AMD64 983 %define MYPUSHAD MYPUSHAD64 984 %define MYPOPAD MYPOPAD64 985 %define MYPUSHSEGS MYPUSHSEGS64 986 %define MYPOPSEGS MYPOPSEGS64 987 %else 988 %define MYPUSHAD MYPUSHAD32 989 %define MYPOPAD MYPOPAD32 990 %define MYPUSHSEGS MYPUSHSEGS32 991 %define MYPOPSEGS MYPOPSEGS32 992 %endif 993 994 %include "HWACCMR0Mixed.mac" 995 996 997 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 998 ; 999 ; Write the wrapper procedures. 1000 ; 1001 1002 ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx); 1003 BEGINPROC VMXR0StartVM32 1004 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1005 je near NAME(VMXR0StartVM32_32) 1006 1007 ; stack frame. 1008 push ebp 1009 mov ebp, esp 1010 push esi 1011 push edi 1012 and esp, 0fffffff0h 1013 1014 ; retf frame (64 -> 32). 1015 push 0 1016 push cs 1017 push 0 1018 push .thunk32 1019 1020 ; jmp far .thunk64 1021 db 0xea 1022 dd .thunk64, NAME(SUPR0Abs64bitKernelCS) 1023 BITS 64 1024 .thunk64: 1025 and esp, 0ffffffffh 1026 and ebp, 0ffffffffh 1027 mov edi, [rbp + 8] ; fResume 1028 mov esi, [rbp + 12] ; pCtx 1029 sub rsp, 20h 1030 call NAME(VMXR0StartVM32_64) 1031 add rsp, 20h 1032 retf 1033 BITS 32 1034 .thunk32: 1035 mov esi, [ebp - 4] 1036 mov edi, [ebp - 8] 1037 leave 1038 ret 1039 ENDPROC VMXR0StartVM32 1040 1041 ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx) 1042 BEGINPROC VMXR0StartVM64 1043 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1044 jne .longmode 1045 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE 1046 ret 1047 1048 .longmode: 1049 ; stack frame. 1050 push ebp 1051 mov ebp, esp 1052 push esi 1053 push edi 1054 and esp, 0fffffff0h 1055 1056 ; retf frame (64 -> 32). 1057 push 0 1058 push cs 1059 push 0 1060 push .thunk32 1061 1062 ; jmp far .thunk64 1063 db 0xea 1064 dd .thunk64, NAME(SUPR0Abs64bitKernelCS) 1065 BITS 64 1066 .thunk64: 1067 and esp, 0ffffffffh 1068 and ebp, 0ffffffffh 1069 mov edi, [rbp + 8] ; fResume 1070 mov esi, [rbp + 12] ; pCtx 1071 sub rsp, 20h 1072 call NAME(VMXR0StartVM64_64) 1073 add rsp, 20h 1074 retf 1075 BITS 32 1076 .thunk32: 1077 mov esi, [ebp - 4] 1078 mov edi, [ebp - 8] 1079 leave 1080 ret 1081 ENDPROC VMXR0StartVM64 1082 1083 ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx); 1084 BEGINPROC SVMR0VMRun 1085 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1086 je near NAME(SVMR0VMRun_32) 1087 1088 ; stack frame. 1089 push ebp 1090 mov ebp, esp 1091 push esi 1092 push edi 1093 and esp, 0fffffff0h 1094 1095 ; retf frame (64 -> 32). 1096 push 0 1097 push cs 1098 push 0 1099 push .thunk32 1100 1101 ; jmp far .thunk64 1102 db 0xea 1103 dd .thunk64, NAME(SUPR0Abs64bitKernelCS) 1104 BITS 64 1105 .thunk64: 1106 and esp, 0ffffffffh 1107 and ebp, 0ffffffffh 1108 mov rdi, [rbp + 8] ; pVMCBHostPhys 1109 mov rsi, [rbp + 16] ; pVMCBPhys 1110 mov edx, [rbp + 24] ; pCtx 1111 sub rsp, 20h 1112 call NAME(SVMR0VMRun_64) 1113 add rsp, 20h 1114 retf 1115 BITS 32 1116 .thunk32: 1117 mov esi, [ebp - 4] 1118 mov edi, [ebp - 8] 1119 leave 1120 ret 1121 ENDPROC SVMR0VMRun 1122 1123 ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx); 1124 BEGINPROC SVMR0VMRun64 1125 cmp byte [NAME(g_fVMXIs64bitHost)], 0 1126 jne .longmode 1127 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE 1128 ret 1129 1130 .longmode: 1131 ; stack frame. 1132 push ebp 1133 mov ebp, esp 1134 push esi 1135 push edi 1136 and esp, 0fffffff0h 1137 1138 ; retf frame (64 -> 32). 1139 push 0 1140 push cs 1141 push 0 1142 push .thunk32 1143 1144 ; jmp far .thunk64 1145 db 0xea 1146 dd .thunk64, NAME(SUPR0Abs64bitKernelCS) 1147 BITS 64 1148 .thunk64: 1149 and esp, 0ffffffffh 1150 and ebp, 0ffffffffh 1151 mov rdi, [rbp + 8] ; pVMCBHostPhys 1152 mov rsi, [rbp + 16] ; pVMCBPhys 1153 mov edx, [rbp + 24] ; pCtx 1154 sub rsp, 20h 1155 call NAME(SVMR0VMRun64_64) 1156 add rsp, 20h 1157 retf 1158 BITS 32 1159 .thunk32: 1160 mov esi, [ebp - 4] 1161 mov edi, [ebp - 8] 1162 leave 1163 ret 1164 ENDPROC SVMR0VMRun64 1165 1166 ; 1167 ; Do it a second time pretending we're a 64-bit host. 1168 ; 1169 ; This *HAS* to be done at the very end of the file to avoid restoring 1170 ; macros. So, add new code *BEFORE* this mess. 1171 ; 1172 BITS 64 1173 %undef RT_ARCH_X86 1174 %define RT_ARCH_AMD64 1175 %define xS 8 1176 %define xSP rsp 1177 %define xBP rbp 1178 %define xAX rax 1179 %define xBX rbx 1180 %define xCX rcx 1181 %define xDX rdx 1182 %define xDI rdi 1183 %define xSI rsi 1184 %define MY_NAME(name) name %+ _64 1185 %define MYPUSHAD MYPUSHAD64 1186 %define MYPOPAD MYPOPAD64 1187 %define MYPUSHSEGS MYPUSHSEGS64 1188 %define MYPOPSEGS MYPOPSEGS64 1189 1190 %include "HWACCMR0Mixed.mac" 1191 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL -
trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac
r14799 r14802 1 1 ; $Id$ 2 2 ;; @file 3 ; VMXM - R0 vmx helpers 3 ; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of. 4 ; 5 ; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined. 4 6 ; 5 7 … … 20 22 ; 21 23 22 ;*******************************************************************************23 ;* Header Files *24 ;*******************************************************************************25 %include "VBox/asmdefs.mac"26 %include "VBox/err.mac"27 %include "VBox/hwacc_vmx.mac"28 %include "VBox/cpum.mac"29 %include "VBox/x86.mac"30 31 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.32 %macro vmwrite 2,33 int334 %endmacro35 %define vmlaunch int336 %define vmresume int337 %define vmsave int338 %define vmload int339 %define vmrun int340 %define clgi int341 %define stgi int342 %macro invlpga 2,43 int344 %endmacro45 %endif46 47 ;; This is too risky wrt. stability, performance and correctness.48 ;%define VBOX_WITH_DR6_EXPERIMENT 149 50 ;; @def MYPUSHAD51 ; Macro generating an equivalent to pushad52 53 ;; @def MYPOPAD54 ; Macro generating an equivalent to popad55 56 ;; @def MYPUSHSEGS57 ; Macro saving all segment registers on the stack.58 ; @param 1 full width register name59 ; @param 2 16-bit regsiter name for \a 1.60 61 ;; @def MYPOPSEGS62 ; Macro restoring all segment registers on the stack63 ; @param 1 full width register name64 ; @param 2 16-bit regsiter name for \a 1.65 66 %ifdef RT_ARCH_AMD6467 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)68 %macro LOADGUESTMSR 269 mov rcx, %170 rdmsr71 push rdx72 push rax73 mov edx, dword [xSI + %2 + 4]74 mov eax, dword [xSI + %2]75 wrmsr76 %endmacro77 78 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)79 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)80 %macro LOADHOSTMSREX 281 mov rcx, %182 rdmsr83 mov dword [xSI + %2], eax84 mov dword [xSI + %2 + 4], edx85 pop rax86 pop rdx87 wrmsr88 %endmacro89 90 ; Load the corresponding host MSR (trashes rdx & rcx)91 %macro LOADHOSTMSR 192 mov rcx, %193 pop rax94 pop rdx95 wrmsr96 %endmacro97 98 %ifdef ASM_CALL64_GCC99 %macro MYPUSHAD 0100 push r15101 push r14102 push r13103 push r12104 push rbx105 %endmacro106 %macro MYPOPAD 0107 pop rbx108 pop r12109 pop r13110 pop r14111 pop r15112 %endmacro113 114 %else ; ASM_CALL64_MSC115 %macro MYPUSHAD 0116 push r15117 push r14118 push r13119 push r12120 push rbx121 push rsi122 push rdi123 %endmacro124 %macro MYPOPAD 0125 pop rdi126 pop rsi127 pop rbx128 pop r12129 pop r13130 pop r14131 pop r15132 %endmacro133 %endif134 135 ; trashes, rax, rdx & rcx136 %macro MYPUSHSEGS 2137 mov %2, es138 push %1139 mov %2, ds140 push %1141 142 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.143 mov ecx, MSR_K8_FS_BASE144 rdmsr145 push rdx146 push rax147 push fs148 149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit150 mov ecx, MSR_K8_GS_BASE151 rdmsr152 push rdx153 push rax154 push gs155 %endmacro156 157 ; trashes, rax, rdx & rcx158 %macro MYPOPSEGS 2159 ; Note: do not step through this code with a debugger!160 pop gs161 pop rax162 pop rdx163 mov ecx, MSR_K8_GS_BASE164 wrmsr165 166 pop fs167 pop rax168 pop rdx169 mov ecx, MSR_K8_FS_BASE170 wrmsr171 ; Now it's safe to step again172 173 pop %1174 mov ds, %2175 pop %1176 mov es, %2177 %endmacro178 179 %else ; RT_ARCH_X86180 %macro MYPUSHAD 0181 pushad182 %endmacro183 %macro MYPOPAD 0184 popad185 %endmacro186 187 %macro MYPUSHSEGS 2188 push ds189 push es190 push fs191 push gs192 %endmacro193 %macro MYPOPSEGS 2194 pop gs195 pop fs196 pop es197 pop ds198 %endmacro199 %endif200 201 202 ;*******************************************************************************203 ;* External Symbols *204 ;*******************************************************************************205 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL206 extern NAME(SUPR0AbsIs64bit)207 extern NAME(SUPR0Abs64bitKernelCS)208 extern NAME(SUPR0Abs64bitKernelSS)209 extern NAME(SUPR0Abs64bitKernelDS)210 %endif211 212 213 ;*******************************************************************************214 ;* Global Variables *215 ;*******************************************************************************216 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL217 BEGINDATA218 ;;219 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without220 ; needing to clobber a register. (This trick doesn't quite work for PE btw.221 ; but that's not relevant atm.)222 g_fIs64bit:223 dd NAME(SUPR0AbsIs64bit)224 %endif225 226 227 BEGINCODE228 24 229 25 ;/** … … 234 30 ; * @param pCtx Guest context 235 31 ; */ 236 BEGINPROC VMXR0StartVM3232 BEGINPROC MY_NAME(VMXR0StartVM32) 237 33 push xBP 238 34 mov xBP, xSP … … 243 39 ;/* First we have to save some final CPU context registers. */ 244 40 %ifdef RT_ARCH_AMD64 245 mov rax, qword .vmlaunch_done41 lea rax, [.vmlaunch_done wrt rip] 246 42 push rax 247 43 %else … … 434 230 jmp .vmstart_end 435 231 436 ENDPROC VMXR0StartVM32232 ENDPROC MY_NAME(VMXR0StartVM32) 437 233 438 234 %ifdef RT_ARCH_AMD64 … … 444 240 ; * @param pCtx Guest context 445 241 ; */ 446 BEGINPROC VMXR0StartVM64242 BEGINPROC MY_NAME(VMXR0StartVM64) 447 243 push xBP 448 244 mov xBP, xSP … … 452 248 453 249 ;/* First we have to save some final CPU context registers. */ 454 mov rax, qword .vmlaunch64_done250 lea rax, [.vmlaunch64_done wrt rip] 455 251 push rax 456 252 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */ … … 687 483 mov eax, VERR_VMX_UNABLE_TO_START_VM 688 484 jmp .vmstart64_end 689 ENDPROC VMXR0StartVM64485 ENDPROC MY_NAME(VMXR0StartVM64) 690 486 %endif ; RT_ARCH_AMD64 691 692 693 ;/**694 ; * Executes VMWRITE, 64-bit value.695 ; *696 ; * @returns VBox status code697 ; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index698 ; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value699 ; */700 BEGINPROC VMXWriteVMCS64701 %ifdef RT_ARCH_AMD64702 %ifdef ASM_CALL64_GCC703 and edi, 0ffffffffh704 xor rax, rax705 vmwrite rdi, rsi706 %else707 and ecx, 0ffffffffh708 xor rax, rax709 vmwrite rcx, rdx710 %endif711 %else ; RT_ARCH_X86712 mov ecx, [esp + 4] ; idxField713 lea edx, [esp + 8] ; &u64Data714 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL715 cmp byte [g_fIs64bit], 0716 jne .longmode717 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL718 vmwrite ecx, [edx] ; low dword719 jz .done720 jc .done721 inc ecx722 xor eax, eax723 vmwrite ecx, [edx + 4] ; high dword724 .done:725 %endif ; RT_ARCH_X86726 jnc .valid_vmcs727 mov eax, VERR_VMX_INVALID_VMCS_PTR728 ret729 .valid_vmcs:730 jnz .the_end731 mov eax, VERR_VMX_INVALID_VMCS_FIELD732 .the_end:733 ret734 735 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL736 .longmode:737 ; Convert return frame into a retf frame 64-bit -> 32-bit738 xor eax, eax739 xchg eax, [esp]740 push cs741 push 0742 push eax ; original return address.743 ; jmp far .thunk64744 db 0xea745 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)746 BITS 64747 .thunk64:748 and edx, 0ffffffffh749 and ecx, 0ffffffffh750 xor eax, eax751 vmwrite rcx, [rdx]752 mov r8d, VERR_VMX_INVALID_VMCS_FIELD753 cmovz eax, r8d754 mov r9d, VERR_VMX_INVALID_VMCS_PTR755 cmovc eax, r9d756 retf ; return to caller757 BITS 32758 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL759 ENDPROC VMXWriteVMCS64760 761 762 ;/**763 ; * Executes VMREAD, 64-bit value764 ; *765 ; * @returns VBox status code766 ; * @param idxField VMCS index767 ; * @param pData Ptr to store VM field value768 ; */769 ;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);770 BEGINPROC VMXReadVMCS64771 %ifdef RT_ARCH_AMD64772 %ifdef ASM_CALL64_GCC773 and edi, 0ffffffffh774 xor rax, rax775 vmread [rsi], rdi776 %else777 and ecx, 0ffffffffh778 xor rax, rax779 vmread [rdx], rcx780 %endif781 %else ; RT_ARCH_X86782 mov ecx, [esp + 4] ; idxField783 mov edx, [esp + 8] ; pData784 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL785 cmp byte [g_fIs64bit], 0786 jne .longmode787 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL788 vmread [edx], ecx ; low dword789 jz .done790 jc .done791 inc ecx792 xor eax, eax793 vmread [edx + 4], ecx ; high dword794 .done:795 %endif ; RT_ARCH_X86796 jnc .valid_vmcs797 mov eax, VERR_VMX_INVALID_VMCS_PTR798 ret799 .valid_vmcs:800 jnz .the_end801 mov eax, VERR_VMX_INVALID_VMCS_FIELD802 .the_end:803 ret804 805 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL806 .longmode:807 ; Convert return frame into a retf frame 64-bit -> 32-bit808 xor eax, eax809 xchg eax, [esp]810 push cs811 push 0812 push eax ; original return address.813 ; jmp far .thunk64814 db 0xea815 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)816 BITS 64817 .thunk64:818 and edx, 0ffffffffh819 and ecx, 0ffffffffh820 xor eax, eax821 vmread [rdx], rcx822 mov r8d, VERR_VMX_INVALID_VMCS_FIELD823 cmovz eax, r8d824 mov r9d, VERR_VMX_INVALID_VMCS_PTR825 cmovc eax, r9d826 retf ; return to caller827 BITS 32828 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL829 ENDPROC VMXReadVMCS64830 831 832 ;/**833 ; * Executes VMREAD, 32-bit value.834 ; *835 ; * @returns VBox status code836 ; * @param idxField VMCS index837 ; * @param pu32Data Ptr to store VM field value838 ; */839 ;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);840 BEGINPROC VMXReadVMCS32841 %ifdef RT_ARCH_AMD64842 %ifdef ASM_CALL64_GCC843 and edi, 0ffffffffh844 xor rax, rax845 vmread r10, rdi846 mov [rsi], r10d847 %else848 and ecx, 0ffffffffh849 xor rax, rax850 vmread r10, rcx851 mov [rdx], r10d852 %endif853 %else ; RT_ARCH_X86854 mov ecx, [esp + 4] ; idxField855 mov edx, [esp + 8] ; pu32Data856 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL857 cmp byte [g_fIs64bit], 0858 jne .longmode859 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL860 xor eax, eax861 vmread [edx], ecx862 %endif ; RT_ARCH_X86863 jnc .valid_vmcs864 mov eax, VERR_VMX_INVALID_VMCS_PTR865 ret866 .valid_vmcs:867 jnz .the_end868 mov eax, VERR_VMX_INVALID_VMCS_FIELD869 .the_end:870 ret871 872 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL873 .longmode:874 ; Convert return frame into a retf frame 64-bit -> 32-bit875 xor eax, eax876 xchg eax, [esp]877 push cs878 push 0879 push eax ; original return address.880 ; jmp far .thunk64881 db 0xea882 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)883 BITS 64884 .thunk64:885 and edx, 0ffffffffh886 and ecx, 0ffffffffh887 xor eax, eax888 vmread r10, rcx889 mov [rdx], r10d890 mov r8d, VERR_VMX_INVALID_VMCS_FIELD891 cmovz eax, r8d892 mov r9d, VERR_VMX_INVALID_VMCS_PTR893 cmovc eax, r9d894 retf ; return to caller895 BITS 32896 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL897 ENDPROC VMXReadVMCS32898 899 900 ;/**901 ; * Executes VMWRITE, 32-bit value.902 ; *903 ; * @returns VBox status code904 ; * @param idxField VMCS index905 ; * @param u32Data Ptr to store VM field value906 ; */907 ;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);908 BEGINPROC VMXWriteVMCS32909 %ifdef RT_ARCH_AMD64910 %ifdef ASM_CALL64_GCC911 and edi, 0ffffffffh912 and esi, 0ffffffffh913 xor rax, rax914 vmwrite rdi, rsi915 %else916 and ecx, 0ffffffffh917 and edx, 0ffffffffh918 xor rax, rax919 vmwrite rcx, rdx920 %endif921 %else ; RT_ARCH_X86922 mov ecx, [esp + 4] ; idxField923 mov edx, [esp + 8] ; u32Data924 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL925 cmp byte [g_fIs64bit], 0926 jne .longmode927 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL928 xor eax, eax929 vmwrite ecx, edx930 %endif ; RT_ARCH_X86931 jnc .valid_vmcs932 mov eax, VERR_VMX_INVALID_VMCS_PTR933 ret934 .valid_vmcs:935 jnz .the_end936 mov eax, VERR_VMX_INVALID_VMCS_FIELD937 .the_end:938 ret939 940 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL941 .longmode:942 ; Convert return frame into a retf frame 64-bit -> 32-bit943 xor eax, eax944 xchg eax, [esp]945 push cs946 push 0947 push eax ; original return address.948 ; jmp far .thunk64949 db 0xea950 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)951 BITS 64952 .thunk64:953 and edx, 0ffffffffh954 and ecx, 0ffffffffh955 xor eax, eax956 vmwrite rcx, rdx957 mov r8d, VERR_VMX_INVALID_VMCS_FIELD958 cmovz eax, r8d959 mov r9d, VERR_VMX_INVALID_VMCS_PTR960 cmovc eax, r9d961 retf ; return to caller962 BITS 32963 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL964 ENDPROC VMXWriteVMCS32965 966 967 ;/**968 ; * Executes VMXON969 ; *970 ; * @returns VBox status code971 ; * @param HCPhysVMXOn Physical address of VMXON structure972 ; */973 ;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);974 BEGINPROC VMXEnable975 %ifdef RT_ARCH_AMD64976 xor rax, rax977 %ifdef ASM_CALL64_GCC978 push rdi979 %else980 push rcx981 %endif982 vmxon [rsp]983 %else ; RT_ARCH_X86984 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL985 cmp byte [g_fIs64bit], 0986 jne .longmode987 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL988 xor eax, eax989 vmxon [esp + 4]990 %endif ; RT_ARCH_X86991 jnc .good992 mov eax, VERR_VMX_INVALID_VMXON_PTR993 jmp .the_end994 995 .good:996 jnz .the_end997 mov eax, VERR_VMX_GENERIC998 999 .the_end:1000 %ifdef RT_ARCH_AMD641001 add rsp, 81002 %endif1003 ret1004 1005 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1006 .longmode:1007 lea edx, [esp + 4] ; &HCPhysVMXOn.1008 ; Convert return frame into a retf frame 64-bit -> 32-bit1009 xor eax, eax1010 xchg eax, [esp]1011 push cs1012 push 01013 push eax ; original return address.1014 ; jmp far .thunk641015 db 0xea1016 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1017 BITS 641018 .thunk64:1019 and edx, 0ffffffffh1020 xor eax, eax1021 vmxon [rdx]1022 mov r8d, VERR_INVALID_PARAMETER1023 cmovz eax, r8d1024 mov r9d, VERR_VMX_INVALID_VMCS_PTR1025 cmovc eax, r9d1026 retf ; return to caller1027 BITS 321028 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1029 ENDPROC VMXEnable1030 1031 1032 ;/**1033 ; * Executes VMXOFF1034 ; */1035 ;DECLASM(void) VMXDisable(void);1036 BEGINPROC VMXDisable1037 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1038 cmp byte [g_fIs64bit], 01039 jne .longmode1040 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1041 vmxoff1042 ret1043 1044 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1045 .longmode:1046 ; Convert return frame into a retf frame 64-bit -> 32-bit1047 xor eax, eax1048 xchg eax, [esp]1049 push cs1050 push 01051 push eax ; original return address.1052 ; jmp far .thunk641053 db 0xea1054 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1055 BITS 641056 .thunk64:1057 vmxoff1058 retf ; return to caller1059 BITS 321060 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1061 ENDPROC VMXDisable1062 1063 1064 ;/**1065 ; * Executes VMCLEAR1066 ; *1067 ; * @returns VBox status code1068 ; * @param HCPhysVMCS Physical address of VM control structure1069 ; */1070 ;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);1071 BEGINPROC VMXClearVMCS1072 %ifdef RT_ARCH_AMD641073 xor rax, rax1074 %ifdef ASM_CALL64_GCC1075 push rdi1076 %else1077 push rcx1078 %endif1079 vmclear [rsp]1080 %else ; RT_ARCH_X861081 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1082 cmp byte [g_fIs64bit], 01083 jne .longmode1084 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1085 xor eax, eax1086 vmclear [esp + 4]1087 %endif ; RT_ARCH_X861088 jnc .the_end1089 mov eax, VERR_VMX_INVALID_VMCS_PTR1090 .the_end:1091 %ifdef RT_ARCH_AMD641092 add rsp, 81093 %endif1094 ret1095 1096 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1097 .longmode:1098 lea edx, [esp + 4] ; &HCPhysVMCS1099 ; Convert return frame into a retf frame 64-bit -> 32-bit1100 xor eax, eax1101 xchg eax, [esp]1102 push cs1103 push 01104 push eax ; original return address.1105 ; jmp far .thunk641106 db 0xea1107 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1108 BITS 641109 .thunk64:1110 and edx, 0ffffffffh1111 xor eax, eax1112 vmclear [rdx]1113 mov r9d, VERR_VMX_INVALID_VMCS_PTR1114 cmovc eax, r9d1115 retf ; return to caller1116 BITS 321117 %endif1118 ENDPROC VMXClearVMCS1119 1120 1121 ;/**1122 ; * Executes VMPTRLD1123 ; *1124 ; * @returns VBox status code1125 ; * @param HCPhysVMCS Physical address of VMCS structure1126 ; */1127 ;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);1128 BEGINPROC VMXActivateVMCS1129 %ifdef RT_ARCH_AMD641130 xor rax, rax1131 %ifdef ASM_CALL64_GCC1132 push rdi1133 %else1134 push rcx1135 %endif1136 vmptrld [rsp]1137 %else1138 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1139 cmp byte [g_fIs64bit], 01140 jne .longmode1141 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1142 xor eax, eax1143 vmptrld [esp + 4]1144 %endif1145 jnc .the_end1146 mov eax, VERR_VMX_INVALID_VMCS_PTR1147 .the_end:1148 %ifdef RT_ARCH_AMD641149 add rsp, 81150 %endif1151 ret1152 1153 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1154 .longmode:1155 lea edx, [esp + 4] ; &HCPhysVMCS1156 ; Convert return frame into a retf frame 64-bit -> 32-bit1157 xor eax, eax1158 xchg eax, [esp]1159 push cs1160 push 01161 push eax ; original return address.1162 ; jmp far .thunk641163 db 0xea1164 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1165 BITS 641166 .thunk64:1167 and edx, 0ffffffffh1168 xor eax, eax1169 vmptrld [rdx]1170 mov r9d, VERR_VMX_INVALID_VMCS_PTR1171 cmovc eax, r9d1172 retf ; return to caller1173 BITS 321174 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1175 ENDPROC VMXActivateVMCS1176 1177 1178 ;/**1179 ; * Executes VMPTRST1180 ; *1181 ; * @returns VBox status code1182 ; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer1183 ; */1184 ;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);1185 BEGINPROC VMXGetActivateVMCS1186 %ifdef RT_OS_OS21187 mov eax, VERR_NOT_SUPPORTED1188 ret1189 %else1190 %ifdef RT_ARCH_AMD641191 %ifdef ASM_CALL64_GCC1192 vmptrst qword [rdi]1193 %else1194 vmptrst qword [rcx]1195 %endif1196 %else1197 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1198 cmp byte [g_fIs64bit], 01199 jne .longmode1200 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1201 vmptrst qword [esp+04h]1202 %endif1203 xor eax, eax1204 ret1205 1206 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1207 .longmode:1208 lea edx, [esp + 4] ; &HCPhysVMCS1209 ; Convert return frame into a retf frame 64-bit -> 32-bit1210 xor eax, eax1211 xchg eax, [esp]1212 push cs1213 push 01214 push eax ; original return address.1215 ; jmp far .thunk641216 db 0xea1217 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1218 BITS 641219 .thunk64:1220 and edx, 0ffffffffh1221 vmptrst qword [rdx]1222 xor eax, eax1223 retf ; return to caller1224 BITS 321225 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1226 %endif1227 ENDPROC VMXGetActivateVMCS1228 1229 ;/**1230 ; * Invalidate a page using invept1231 ; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush1232 ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer1233 ; */1234 ;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);1235 BEGINPROC VMXR0InvEPT1236 %ifdef RT_ARCH_AMD641237 %ifdef ASM_CALL64_GCC1238 and edi, 0ffffffffh1239 xor rax, rax1240 ; invept rdi, qword [rsi]1241 DB 0x66, 0x0F, 0x38, 0x80, 0x3E1242 %else1243 and ecx, 0ffffffffh1244 xor rax, rax1245 ; invept rcx, qword [rdx]1246 DB 0x66, 0x0F, 0x38, 0x80, 0xA1247 %endif1248 %else1249 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1250 cmp byte [g_fIs64bit], 01251 jne .longmode1252 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1253 mov eax, [esp + 4]1254 mov ecx, [esp + 8]1255 ; invept eax, qword [ecx]1256 DB 0x66, 0x0F, 0x38, 0x80, 0x11257 %endif1258 jnc .valid_vmcs1259 mov eax, VERR_VMX_INVALID_VMCS_PTR1260 ret1261 .valid_vmcs:1262 jnz .the_end1263 mov eax, VERR_INVALID_PARAMETER1264 .the_end:1265 ret1266 1267 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1268 .longmode:1269 mov ecx, [esp + 4] ; enmFlush1270 mov edx, [esp + 8] ; pDescriptor1271 ; Convert return frame into a retf frame 64-bit -> 32-bit1272 xor eax, eax1273 xchg eax, [esp]1274 push cs1275 push 01276 push eax ; original return address.1277 ; jmp far .thunk641278 db 0xea1279 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1280 BITS 641281 .thunk64:1282 and ecx, 0ffffffffh1283 and edx, 0ffffffffh1284 xor eax, eax1285 ; invept rcx, qword [rdx]1286 DB 0x66, 0x0F, 0x38, 0x80, 0xA1287 mov r8d, VERR_INVALID_PARAMETER1288 cmovz eax, r8d1289 mov r9d, VERR_VMX_INVALID_VMCS_PTR1290 cmovc eax, r9d1291 retf ; return to caller1292 BITS 321293 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1294 ENDPROC VMXR0InvEPT1295 1296 1297 ;/**1298 ; * Invalidate a page using invvpid1299 ; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush1300 ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer1301 ; */1302 ;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);1303 BEGINPROC VMXR0InvVPID1304 %ifdef RT_ARCH_AMD641305 %ifdef ASM_CALL64_GCC1306 and edi, 0ffffffffh1307 xor rax, rax1308 ;invvpid rdi, qword [rsi]1309 DB 0x66, 0x0F, 0x38, 0x81, 0x3E1310 %else1311 and ecx, 0ffffffffh1312 xor rax, rax1313 ; invvpid rcx, qword [rdx]1314 DB 0x66, 0x0F, 0x38, 0x81, 0xA1315 %endif1316 %else1317 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1318 cmp byte [g_fIs64bit], 01319 jne .longmode1320 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1321 mov eax, [esp + 4]1322 mov ecx, [esp + 8]1323 ; invept eax, qword [ecx]1324 DB 0x66, 0x0F, 0x38, 0x81, 0x11325 %endif1326 jnc .valid_vmcs1327 mov eax, VERR_VMX_INVALID_VMCS_PTR1328 ret1329 .valid_vmcs:1330 jnz .the_end1331 mov eax, VERR_INVALID_PARAMETER1332 .the_end:1333 ret1334 1335 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL1336 .longmode:1337 mov ecx, [esp + 4] ; enmFlush1338 mov edx, [esp + 8] ; pDescriptor1339 ; Convert return frame into a retf frame 64-bit -> 32-bit1340 xor eax, eax1341 xchg eax, [esp]1342 push cs1343 push 01344 push eax ; original return address.1345 ; jmp far .thunk641346 db 0xea1347 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1348 BITS 641349 .thunk64:1350 and ecx, 0ffffffffh1351 and edx, 0ffffffffh1352 xor eax, eax1353 ; invvpid rcx, qword [rdx]1354 DB 0x66, 0x0F, 0x38, 0x81, 0xA1355 mov r8d, VERR_INVALID_PARAMETER1356 cmovz eax, r8d1357 mov r9d, VERR_VMX_INVALID_VMCS_PTR1358 cmovc eax, r9d1359 retf ; return to caller1360 BITS 321361 %endif ; VBOX_WITH_HYBIRD_32BIT_KERNEL1362 ENDPROC VMXR0InvVPID1363 487 1364 488 … … 1371 495 ; * @param pCtx Guest context 1372 496 ; */ 1373 BEGINPROC SVMR0VMRun497 BEGINPROC MY_NAME(SVMR0VMRun) 1374 498 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame 1375 499 %ifdef ASM_CALL64_GCC … … 1468 592 %endif 1469 593 ret 1470 ENDPROC SVMR0VMRun594 ENDPROC MY_NAME(SVMR0VMRun) 1471 595 1472 596 %ifdef RT_ARCH_AMD64 … … 1479 603 ; * @param pCtx Guest context 1480 604 ; */ 1481 BEGINPROC SVMR0VMRun64605 BEGINPROC MY_NAME(SVMR0VMRun64) 1482 606 ; fake a cdecl stack frame 1483 607 %ifdef ASM_CALL64_GCC … … 1589 713 add rsp, 4*xS 1590 714 ret 1591 ENDPROC SVMR0VMRun64715 ENDPROC MY_NAME(SVMR0VMRun64) 1592 716 %endif ; RT_ARCH_AMD64 1593 717 1594 1595 %if GC_ARCH_BITS == 641596 ;;1597 ; Executes INVLPGA1598 ;1599 ; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate1600 ; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id1601 ;1602 ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);1603 BEGINPROC SVMR0InvlpgA1604 %ifdef RT_ARCH_AMD641605 %ifdef ASM_CALL64_GCC1606 mov rax, rdi1607 mov rcx, rsi1608 %else1609 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:1610 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register1611 ; values also set the upper 32 bits of the register to zero. Consequently1612 ; there is no need for an instruction movzlq.''1613 mov eax, ecx1614 mov rcx, rdx1615 %endif1616 %else1617 mov eax, [esp + 4]1618 mov ecx, [esp + 0Ch]1619 %endif1620 invlpga [xAX], ecx1621 ret1622 ENDPROC SVMR0InvlpgA1623 1624 %else1625 ;;1626 ; Executes INVLPGA1627 ;1628 ; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate1629 ; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id1630 ;1631 ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);1632 BEGINPROC SVMR0InvlpgA1633 %ifdef RT_ARCH_AMD641634 %ifdef ASM_CALL64_GCC1635 movzx rax, edi1636 mov ecx, esi1637 %else1638 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:1639 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register1640 ; values also set the upper 32 bits of the register to zero. Consequently1641 ; there is no need for an instruction movzlq.''1642 mov eax, ecx1643 mov ecx, edx1644 %endif1645 %else1646 mov eax, [esp + 4]1647 mov ecx, [esp + 8]1648 %endif1649 invlpga [xAX], ecx1650 ret1651 ENDPROC SVMR0InvlpgA1652 1653 %endif ; GC_ARCH_BITS != 641654
Note:
See TracChangeset
for help on using the changeset viewer.