Changeset 46099 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 15, 2013 2:23:49 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r45875 r46099 35 35 cli 36 36 37 ; /* Save all general purpose host registers. */37 ; Save all general purpose host registers. 38 38 MYPUSHAD 39 39 40 ; /* First we have to save some final CPU context registers. */40 ; First we have to save some final CPU context registers. 41 41 mov eax, VMX_VMCS_HOST_RIP 42 42 %ifdef RT_ARCH_AMD64 … … 47 47 vmwrite eax, ecx 48 48 %endif 49 ;/* Note: assumes success... */ 50 51 ;/* Manual save and restore: 52 ; * - General purpose registers except RIP, RSP 53 ; * 54 ; * Trashed: 55 ; * - CR2 (we don't care) 56 ; * - LDTR (reset to 0) 57 ; * - DRx (presumably not changed at all) 58 ; * - DR7 (reset to 0x400) 59 ; * - EFLAGS (reset to RT_BIT(1); not relevant) 60 ; * 61 ; */ 62 63 ;/* Save the Guest CPU context pointer. */ 49 ; Note: assumes success! 50 51 ; Manual save and restore: 52 ; - General purpose registers except RIP, RSP. 53 ; 54 ; Trashed: 55 ; - CR2 (we don't care). 56 ; - LDTR (reset to 0). 57 ; - DRx (presumably not changed at all). 58 ; - DR7 (reset to 0x400). 59 ; - EFLAGS (reset to RT_BIT(1); not relevant). 60 61 ; Save the Guest CPU context pointer. 64 62 %ifdef RT_ARCH_AMD64 65 63 %ifdef ASM_CALL64_GCC … … 78 76 %endif 79 77 80 ; /* Save segment registers */81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case) 78 ; Save segment registers. 79 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case). 82 80 MYPUSHSEGS xAX, ax 83 81 … … 101 99 .no_cached_writes: 102 100 103 ; Save the pCache pointer 101 ; Save the pCache pointer. 104 102 push xBX 105 103 %endif 106 104 107 ; Save the pCtx pointer 105 ; Save the pCtx pointer. 108 106 push xSI 109 107 110 ; Save LDTR 108 ; Save LDTR. 111 109 xor eax, eax 112 110 sldt ax 113 111 push xAX 114 112 115 ; The TR limit is reset to 0x67; restore it manually 113 ; The TR limit is reset to 0x67; restore it manually. 116 114 str eax 117 115 push xAX 118 116 119 ; V MXonly saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!117 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 120 118 sub xSP, xS*2 121 119 sgdt [xSP] … … 125 123 126 124 %ifdef VBOX_WITH_DR6_EXPERIMENT 127 ; RestoreDR6 - experiment, not safe!125 ; Load DR6 - experiment, not safe! 128 126 mov xBX, [xSI + CPUMCTX.dr6] 129 127 mov dr6, xBX 130 128 %endif 131 129 132 ; Restore CR2130 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 133 131 mov xBX, [xSI + CPUMCTX.cr2] 134 132 mov xDX, cr2 … … 140 138 mov eax, VMX_VMCS_HOST_RSP 141 139 vmwrite xAX, xSP 142 ; /* Note: assumes success... */143 ; /* Don't mess with ESP anymore!! */144 145 ; /* Restore Guest's general purpose registers. */140 ; Note: assumes success! 141 ; Don't mess with ESP anymore!!! 142 143 ; Load Guest's general purpose registers. 146 144 mov eax, [xSI + CPUMCTX.eax] 147 145 mov ebx, [xSI + CPUMCTX.ebx] … … 150 148 mov ebp, [xSI + CPUMCTX.ebp] 151 149 152 ; resume or start?150 ; Resume or start? 153 151 cmp xDI, 0 ; fResume 154 je .vmlau ch_lauch155 156 ; /* Restore edi & esi. */152 je .vmlaunch_launch 153 154 ; Restore edi & esi. 157 155 mov edi, [xSI + CPUMCTX.edi] 158 156 mov esi, [xSI + CPUMCTX.esi] 159 157 160 158 vmresume 161 jmp .vmlaunch_done; ; /* here if vmresume detected a failure. */162 163 .vmlau ch_lauch:164 ; /* Restore edi & esi. */159 jmp .vmlaunch_done; ; Here if vmresume detected a failure. 160 161 .vmlaunch_launch: 162 ; Restore edi & esi. 165 163 mov edi, [xSI + CPUMCTX.edi] 166 164 mov esi, [xSI + CPUMCTX.esi] 167 165 168 166 vmlaunch 169 jmp .vmlaunch_done; ; /* here if vmlaunch detected a failure. */167 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure. 170 168 171 169 ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off. … … 174 172 jz near .vmxstart_start_failed 175 173 176 ; Restore base and limit of the IDTR & GDTR 174 ; Restore base and limit of the IDTR & GDTR. 177 175 lidt [xSP] 178 176 add xSP, xS*2 … … 181 179 182 180 push xDI 183 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR) 181 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR). 184 182 185 183 mov [ss:xDI + CPUMCTX.eax], eax … … 195 193 196 194 %ifdef RT_ARCH_AMD64 197 pop xAX ; the guest edi we pushed above195 pop xAX ; The guest edi we pushed above. 198 196 mov dword [ss:xDI + CPUMCTX.edi], eax 199 197 %else 200 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above198 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above. 201 199 %endif 202 200 203 201 %ifdef VBOX_WITH_DR6_EXPERIMENT 204 ; Save DR6 - experiment, not safe!202 ; Restore DR6 - experiment, not safe! 205 203 mov xAX, dr6 206 204 mov [ss:xDI + CPUMCTX.dr6], xAX … … 210 208 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 211 209 ; @todo get rid of sgdt 212 pop xBX ; saved TR210 pop xBX ; Saved TR 213 211 sub xSP, xS*2 214 212 sgdt [xSP] 215 213 mov xAX, xBX 216 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.214 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset. 217 215 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 218 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)216 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit). 219 217 ltr bx 220 218 add xSP, xS*2 221 219 222 pop xAX ; saved LDTR220 pop xAX ; Saved LDTR 223 221 lldt ax 224 222 225 add xSP, xS 223 add xSP, xS ; pCtx 226 224 227 225 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 228 pop xDX ; saved pCache226 pop xDX ; Saved pCache 229 227 230 228 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries] 231 cmp ecx, 0 ; can't happen229 cmp ecx, 0 ; Can't happen 232 230 je .no_cached_reads 233 231 jmp .cached_read … … 243 241 244 242 %ifdef VBOX_WITH_OLD_VTX_CODE 245 ; Save CR2 for EPT243 ; Restore CR2 into VMCS-cache field (for EPT). 246 244 mov xAX, cr2 247 245 mov [ss:xDX + VMCSCACHE.cr2], xAX … … 249 247 %endif 250 248 251 ; Restore segment registers 249 ; Restore segment registers. 252 250 MYPOPSEGS xAX, ax 253 251 254 ; Restore general purpose registers 252 ; Restore general purpose registers. 255 253 MYPOPAD 256 254 … … 273 271 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 274 272 ; @todo get rid of sgdt 275 pop xBX ; saved TR273 pop xBX ; Saved TR 276 274 sub xSP, xS*2 277 275 sgdt [xSP] 278 276 mov xAX, xBX 279 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.277 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset. 280 278 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 281 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)279 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit). 282 280 ltr bx 283 281 add xSP, xS*2 284 282 285 pop xAX ; saved LDTR283 pop xAX ; Saved LDTR 286 284 lldt ax 287 285 … … 292 290 %endif 293 291 294 ; Restore segment registers 292 ; Restore segment registers. 295 293 MYPOPSEGS xAX, ax 296 294 … … 301 299 302 300 .vmxstart_start_failed: 303 ; Restore base and limit of the IDTR & GDTR 301 ; Restore base and limit of the IDTR & GDTR. 304 302 lidt [xSP] 305 303 add xSP, xS*2 … … 310 308 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 311 309 ; @todo get rid of sgdt 312 pop xBX ; saved TR310 pop xBX ; Saved TR 313 311 sub xSP, xS*2 314 312 sgdt [xSP] 315 313 mov xAX, xBX 316 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.314 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset. 317 315 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 318 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)316 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit). 319 317 ltr bx 320 318 add xSP, xS*2 321 319 322 pop xAX ; saved LDTR320 pop xAX ; Saved LDTR 323 321 lldt ax 324 322 … … 329 327 %endif 330 328 331 ; Restore segment registers 329 ; Restore segment registers. 332 330 MYPOPSEGS xAX, ax 333 331 … … 356 354 cli 357 355 358 ; /* Save all general purpose host registers. */356 ; Save all general purpose host registers. 359 357 MYPUSHAD 360 358 361 ; /* First we have to save some final CPU context registers. */359 ; First we have to save some final CPU context registers. 362 360 lea r10, [.vmlaunch64_done wrt rip] 363 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */361 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?). 364 362 vmwrite rax, r10 365 ;/* Note: assumes success... */ 366 367 ;/* Manual save and restore: 368 ; * - General purpose registers except RIP, RSP 369 ; * 370 ; * Trashed: 371 ; * - CR2 (we don't care) 372 ; * - LDTR (reset to 0) 373 ; * - DRx (presumably not changed at all) 374 ; * - DR7 (reset to 0x400) 375 ; * - EFLAGS (reset to RT_BIT(1); not relevant) 376 ; * 377 ; */ 378 379 ;/* Save the Guest CPU context pointer. */ 363 ; Note: assumes success! 364 365 ; Manual save and restore: 366 ; - General purpose registers except RIP, RSP. 367 ; 368 ; Trashed: 369 ; - CR2 (we don't care). 370 ; - LDTR (reset to 0). 371 ; - DRx (presumably not changed at all). 372 ; - DR7 (reset to 0x400). 373 ; - EFLAGS (reset to RT_BIT(1); not relevant). 374 375 ; Save the Guest CPU context pointer. 380 376 %ifdef ASM_CALL64_GCC 381 377 ; fResume already in rdi … … 388 384 %endif 389 385 390 ; /* Save segment registers */391 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case) 386 ; Save segment registers. 387 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case). 392 388 MYPUSHSEGS xAX, ax 393 389 … … 411 407 .no_cached_writes: 412 408 413 ; Save the pCache pointer 409 ; Save the pCache pointer. 414 410 push xBX 415 411 %endif 416 412 417 413 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 418 ; Save the host MSRs and load the guest MSRs 414 ; Save the host MSRs and load the guest MSRs. 419 415 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 420 416 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR … … 428 424 %endif 429 425 430 ; Save the pCtx pointer 426 ; Save the pCtx pointer. 431 427 push xSI 432 428 433 ; Save LDTR 429 ; Save LDTR. 434 430 xor eax, eax 435 431 sldt ax 436 432 push xAX 437 433 438 ; The TR limit is reset to 0x67; restore it manually 434 ; The TR limit is reset to 0x67; restore it manually. 439 435 str eax 440 436 push xAX 441 437 442 ; V MXonly saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!438 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 443 439 sub xSP, xS*2 444 440 sgdt [xSP] … … 448 444 449 445 %ifdef VBOX_WITH_DR6_EXPERIMENT 450 ; RestoreDR6 - experiment, not safe!446 ; Load DR6 - experiment, not safe! 451 447 mov xBX, [xSI + CPUMCTX.dr6] 452 448 mov dr6, xBX 453 449 %endif 454 450 455 ; Restore CR2451 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 456 452 mov rbx, qword [xSI + CPUMCTX.cr2] 457 453 mov rdx, cr2 … … 463 459 mov eax, VMX_VMCS_HOST_RSP 464 460 vmwrite xAX, xSP 465 ; /* Note: assumes success... */466 ; /* Don't mess with ESP anymore!! */467 468 ; /* Restore Guest's general purpose registers. */461 ; Note: assumes success! 462 ; Don't mess with ESP anymore!!! 463 464 ; Restore Guest's general purpose registers. 469 465 mov rax, qword [xSI + CPUMCTX.eax] 470 466 mov rbx, qword [xSI + CPUMCTX.ebx] … … 481 477 mov r15, qword [xSI + CPUMCTX.r15] 482 478 483 ; resume or start?479 ; Resume or start? 484 480 cmp xDI, 0 ; fResume 485 je .vmlau ch64_lauch486 487 ; /* Restore edi & esi. */481 je .vmlaunch64_launch 482 483 ; Restore edi & esi. 488 484 mov rdi, qword [xSI + CPUMCTX.edi] 489 485 mov rsi, qword [xSI + CPUMCTX.esi] 490 486 491 487 vmresume 492 jmp .vmlaunch64_done; ; /* here if vmresume detected a failure. */493 494 .vmlau ch64_lauch:495 ; /* Restore rdi & rsi. */488 jmp .vmlaunch64_done; ; Here if vmresume detected a failure. 489 490 .vmlaunch64_launch: 491 ; Restore rdi & rsi. 496 492 mov rdi, qword [xSI + CPUMCTX.edi] 497 493 mov rsi, qword [xSI + CPUMCTX.esi] 498 494 499 495 vmlaunch 500 jmp .vmlaunch64_done; ; /* here if vmlaunch detected a failure. */496 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure. 501 497 502 498 ALIGNCODE(16) … … 533 529 %endif 534 530 535 pop xAX ; the guest edi we pushed above531 pop xAX ; The guest edi we pushed above 536 532 mov qword [xDI + CPUMCTX.edi], rax 537 533 538 534 %ifdef VBOX_WITH_DR6_EXPERIMENT 539 ; Save DR6 - experiment, not safe!535 ; Restore DR6 - experiment, not safe! 540 536 mov xAX, dr6 541 537 mov [xDI + CPUMCTX.dr6], xAX … … 543 539 544 540 ; Restore TSS selector; must mark it as not busy before using ltr (!) 545 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 541 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p). 546 542 ; @todo get rid of sgdt 547 pop xBX ; saved TR543 pop xBX ; Saved TR 548 544 sub xSP, xS*2 549 545 sgdt [xSP] 550 546 mov xAX, xBX 551 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.547 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset. 552 548 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 553 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)549 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit). 554 550 ltr bx 555 551 add xSP, xS*2 556 552 557 pop xAX ; saved LDTR553 pop xAX ; Saved LDTR 558 554 lldt ax 559 555 … … 561 557 562 558 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 563 ; Save the guest MSRs and load the host MSRs 559 ; Save the guest MSRs and load the host MSRs. 564 560 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 565 561 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK … … 574 570 575 571 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 576 pop xDX ; saved pCache572 pop xDX ; Saved pCache 577 573 578 574 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries] 579 cmp ecx, 0 ; can't happen575 cmp ecx, 0 ; Can't happen 580 576 je .no_cached_reads 581 577 jmp .cached_read … … 591 587 592 588 %ifdef VBOX_WITH_OLD_VTX_CODE 593 ; Save CR2 for EPT589 ; Restore CR2 into VMCS-cache field (for EPT). 594 590 mov xAX, cr2 595 591 mov [xDX + VMCSCACHE.cr2], xAX … … 597 593 %endif 598 594 599 ; Restore segment registers 595 ; Restore segment registers. 600 596 MYPOPSEGS xAX, ax 601 597 602 ; Restore general purpose registers 598 ; Restore general purpose registers. 603 599 MYPOPAD 604 600 … … 612 608 613 609 .vmxstart64_invalid_vmcs_ptr: 614 ; Restore base and limit of the IDTR & GDTR 610 ; Restore base and limit of the IDTR & GDTR. 615 611 lidt [xSP] 616 612 add xSP, xS*2 … … 619 615 620 616 ; Restore TSS selector; must mark it as not busy before using ltr (!) 621 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 617 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p). 622 618 ; @todo get rid of sgdt 623 pop xBX ; saved TR619 pop xBX ; Saved TR 624 620 sub xSP, xS*2 625 621 sgdt [xSP] 626 622 mov xAX, xBX 627 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.623 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset. 628 624 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 629 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)625 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit). 630 626 ltr bx 631 627 add xSP, xS*2 632 628 633 pop xAX ; saved LDTR629 pop xAX ; Saved LDTR 634 630 lldt ax 635 631 … … 637 633 638 634 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 639 ; Load the host MSRs 635 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed. 640 636 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE 641 637 LOADHOSTMSR MSR_K8_SF_MASK … … 653 649 %endif 654 650 655 ; Restore segment registers 651 ; Restore segment registers. 656 652 MYPOPSEGS xAX, ax 657 653 … … 662 658 663 659 .vmxstart64_start_failed: 664 ; Restore base and limit of the IDTR & GDTR 660 ; Restore base and limit of the IDTR & GDTR. 665 661 lidt [xSP] 666 662 add xSP, xS*2 … … 669 665 670 666 ; Restore TSS selector; must mark it as not busy before using ltr (!) 671 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 667 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p). 672 668 ; @todo get rid of sgdt 673 pop xBX ; saved TR669 pop xBX ; Saved TR 674 670 sub xSP, xS*2 675 671 sgdt [xSP] 676 672 mov xAX, xBX 677 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.673 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset. 678 674 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 679 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)675 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit). 680 676 ltr bx 681 677 add xSP, xS*2 682 678 683 pop xAX ; saved LDTR679 pop xAX ; Saved LDTR 684 680 lldt ax 685 681 686 pop xSI ; pCtx (needed in rsi by the macros below) 682 pop xSI ; pCtx (needed in rsi by the macros below). 687 683 688 684 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 689 ; Load the host MSRs 685 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed. 690 686 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE 691 687 LOADHOSTMSR MSR_K8_SF_MASK … … 703 699 %endif 704 700 705 ; Restore segment registers 701 ; Restore segment registers. 706 702 MYPOPSEGS xAX, ax 707 703 … … 740 736 pushf 741 737 742 ;/* Manual save and restore: 743 ; * - General purpose registers except RIP, RSP, RAX 744 ; * 745 ; * Trashed: 746 ; * - CR2 (we don't care) 747 ; * - LDTR (reset to 0) 748 ; * - DRx (presumably not changed at all) 749 ; * - DR7 (reset to 0x400) 750 ; */ 751 752 ;/* Save all general purpose host registers. */ 738 ; Manual save and restore: 739 ; - General purpose registers except RIP, RSP, RAX 740 ; 741 ; Trashed: 742 ; - CR2 (we don't care) 743 ; - LDTR (reset to 0) 744 ; - DRx (presumably not changed at all) 745 ; - DR7 (reset to 0x400) 746 747 ; Save all general purpose host registers. 753 748 MYPUSHAD 754 749 755 ; /* Save the Guest CPU context pointer. */750 ; Save the Guest CPU context pointer. 756 751 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx 757 push xSI ; push for saving the state at the end758 759 ; save host fs, gs, sysenter msr etc752 push xSI ; push for saving the state at the end 753 754 ; Save host fs, gs, sysenter msr etc. 760 755 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only) 761 756 push xAX ; save for the vmload after vmrun 762 757 vmsave 763 758 764 ; setup eax for VMLOAD759 ; Setup eax for VMLOAD. 765 760 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only) 766 761 767 ; /* Restore Guest's general purpose registers. */768 ; /* EAX is loaded from the VMCB by VMRUN */762 ; Restore Guest's general purpose registers. 763 ; eax is loaded from the VMCB by VMRUN. 769 764 mov ebx, [xSI + CPUMCTX.ebx] 770 765 mov ecx, [xSI + CPUMCTX.ecx] … … 774 769 mov esi, [xSI + CPUMCTX.esi] 775 770 776 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch 771 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch. 777 772 clgi 778 773 sti 779 774 780 ; load guest fs, gs, sysenter msr etc775 ; Load guest fs, gs, sysenter msr etc. 781 776 vmload 782 ; run the VM777 ; Run the VM. 783 778 vmrun 784 779 785 ; /* EAX is in the VMCB already; we can use it here. */786 787 ; save guest fs, gs, sysenter msr etc780 ; eax is in the VMCB already; we can use it here. 781 782 ; Save guest fs, gs, sysenter msr etc. 788 783 vmsave 789 784 790 ; load host fs, gs, sysenter msr etc791 pop xAX ; pushed above785 ; Load host fs, gs, sysenter msr etc. 786 pop xAX ; Pushed above 792 787 vmload 793 788 … … 805 800 mov [ss:xAX + CPUMCTX.ebp], ebp 806 801 807 ; Restore general purpose registers 802 ; Restore general purpose registers. 808 803 MYPOPAD 809 804 … … 829 824 ALIGNCODE(16) 830 825 BEGINPROC MY_NAME(SVMR0VMRun64) 831 ; fake a cdecl stack frame826 ; Fake a cdecl stack frame 832 827 %ifdef ASM_CALL64_GCC 833 828 push rdx … … 844 839 pushf 845 840 846 ; /*Manual save and restore:847 ; *- General purpose registers except RIP, RSP, RAX848 ; *849 ; *Trashed:850 ; *- CR2 (we don't care)851 ; *- LDTR (reset to 0)852 ; *- DRx (presumably not changed at all)853 ; *- DR7 (reset to 0x400)854 ; */855 856 ; /* Save all general purpose host registers. */841 ; Manual save and restore: 842 ; - General purpose registers except RIP, RSP, RAX 843 ; 844 ; Trashed: 845 ; - CR2 (we don't care) 846 ; - LDTR (reset to 0) 847 ; - DRx (presumably not changed at all) 848 ; - DR7 (reset to 0x400) 849 ; 850 851 ; Save all general purpose host registers. 857 852 MYPUSHAD 858 853 859 ; /* Save the Guest CPU context pointer. */854 ; Save the Guest CPU context pointer. 860 855 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx 861 856 push rsi ; push for saving the state at the end 862 857 863 ; save host fs, gs, sysenter msr etc858 ; Save host fs, gs, sysenter msr etc. 864 859 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only) 865 push rax ; save for the vmload after vmrun860 push rax ; Save for the vmload after vmrun 866 861 vmsave 867 862 868 ; setup eax for VMLOAD863 ; Setup eax for VMLOAD. 869 864 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only) 870 865 871 ; /* Restore Guest's general purpose registers. */872 ; /* RAX is loaded from the VMCB by VMRUN */866 ; Restore Guest's general purpose registers. 867 ; rax is loaded from the VMCB by VMRUN. 873 868 mov rbx, qword [xSI + CPUMCTX.ebx] 874 869 mov rcx, qword [xSI + CPUMCTX.ecx] … … 886 881 mov rsi, qword [xSI + CPUMCTX.esi] 887 882 888 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch 883 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch. 889 884 clgi 890 885 sti 891 886 892 ; load guest fs, gs, sysenter msr etc887 ; Load guest fs, gs, sysenter msr etc. 893 888 vmload 894 ; run the VM889 ; Run the VM. 895 890 vmrun 896 891 897 ; /* RAX is in the VMCB already; we can use it here. */898 899 ; save guest fs, gs, sysenter msr etc892 ; rax is in the VMCB already; we can use it here. 893 894 ; Save guest fs, gs, sysenter msr etc. 900 895 vmsave 901 896 902 ; load host fs, gs, sysenter msr etc897 ; Load host fs, gs, sysenter msr etc. 903 898 pop rax ; pushed above 904 899 vmload … … 925 920 mov qword [rax + CPUMCTX.r15], r15 926 921 927 ; Restore general purpose registers 922 ; Restore general purpose registers. 928 923 MYPOPAD 929 924 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r45875 r46099 588 588 mov rbp, rsp 589 589 590 ; Make sure VT-x instructions are allowed 590 ; Make sure VT-x instructions are allowed. 591 591 mov rax, cr4 592 592 or rax, X86_CR4_VMXE 593 593 mov cr4, rax 594 594 595 ; /* Enter VMX Root Mode */595 ; Enter VMX Root Mode. 596 596 vmxon [rbp + 8 + 8] 597 597 jnc .vmxon_success … … 621 621 push qword [rbp + 16 + 8]; 622 622 623 ; /* Save segment registers */623 ; Save segment registers. 624 624 MYPUSHSEGS rax 625 625 626 626 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 627 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!) 627 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!). 628 628 mov rbx, [rbp + 24 + 8] ; pCache 629 629 … … 662 662 mov qword [rbx + VMCSCACHE.uPos], 3 663 663 %endif 664 ; Save the pCache pointer 664 ; Save the pCache pointer. 665 665 push rbx 666 666 %endif 667 667 668 ; Save the host state that's relevant in the temporary 64 bits mode668 ; Save the host state that's relevant in the temporary 64-bit mode. 669 669 mov rdx, cr0 670 670 mov eax, VMX_VMCS_HOST_CR0 … … 697 697 %endif 698 698 699 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)700 701 ; /* First we have to save some final CPU context registers. */699 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode). 700 701 ; First we have to save some final CPU context registers. 702 702 lea rdx, [.vmlaunch64_done wrt rip] 703 mov rax, VMX_VMCS_HOST_RIP ; /* return address (too difficult to continue after VMLAUNCH?) */703 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?). 704 704 vmwrite rax, rdx 705 ;/* Note: assumes success... */ 706 707 ;/* Manual save and restore: 708 ; * - General purpose registers except RIP, RSP 709 ; * 710 ; * Trashed: 711 ; * - CR2 (we don't care) 712 ; * - LDTR (reset to 0) 713 ; * - DRx (presumably not changed at all) 714 ; * - DR7 (reset to 0x400) 715 ; * - EFLAGS (reset to RT_BIT(1); not relevant) 716 ; * 717 ; */ 705 ; Note: assumes success! 706 707 ; Manual save and restore: 708 ; - General purpose registers except RIP, RSP 709 ; 710 ; Trashed: 711 ; - CR2 (we don't care) 712 ; - LDTR (reset to 0) 713 ; - DRx (presumably not changed at all) 714 ; - DR7 (reset to 0x400) 715 ; - EFLAGS (reset to RT_BIT(1); not relevant) 718 716 719 717 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 720 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 718 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs. 721 719 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 722 720 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR … … 737 735 push rsi 738 736 739 ; Restore CR2737 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 740 738 mov rbx, qword [rsi + CPUMCTX.cr2] 741 739 mov rdx, cr2 … … 747 745 mov eax, VMX_VMCS_HOST_RSP 748 746 vmwrite rax, rsp 749 ; /* Note: assumes success... */750 ; /* Don't mess with ESP anymore!! */751 752 ; /* Restore Guest's general purpose registers. */747 ; Note: assumes success! 748 ; Don't mess with ESP anymore!!! 749 750 ; Save Guest's general purpose registers. 753 751 mov rax, qword [rsi + CPUMCTX.eax] 754 752 mov rbx, qword [rsi + CPUMCTX.ebx] … … 765 763 mov r15, qword [rsi + CPUMCTX.r15] 766 764 767 ; /* Restore rdi & rsi. */765 ; Save rdi & rsi. 768 766 mov rdi, qword [rsi + CPUMCTX.edi] 769 767 mov rsi, qword [rsi + CPUMCTX.esi] 770 768 771 769 vmlaunch 772 jmp .vmlaunch64_done; ; /* here if vmlaunch detected a failure. */770 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure. 773 771 774 772 ALIGNCODE(16) … … 799 797 %endif 800 798 801 pop rax ; the guest edi we pushed above799 pop rax ; The guest edi we pushed above 802 800 mov qword [rdi + CPUMCTX.edi], rax 803 801 … … 817 815 818 816 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 819 pop rdi ; saved pCache817 pop rdi ; Saved pCache 820 818 821 819 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 830 828 831 829 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries] 832 cmp ecx, 0 ; can't happen830 cmp ecx, 0 ; Can't happen 833 831 je .no_cached_reads 834 832 jmp .cached_read … … 844 842 845 843 %ifdef VBOX_WITH_OLD_VTX_CODE 846 ; Save CR2 for EPT844 ; Restore CR2 into VMCS-cache field (for EPT). 847 845 mov rax, cr2 848 846 mov [rdi + VMCSCACHE.cr2], rax … … 853 851 %endif 854 852 855 ; Restore segment registers 853 ; Restore segment registers. 856 854 MYPOPSEGS rax 857 855 … … 870 868 %endif 871 869 872 ; Write back the data and disable the VMCS 873 vmclear qword [rsp] ; Pushed pVMCS870 ; Write back the data and disable the VMCS. 871 vmclear qword [rsp] ; Pushed pVMCS 874 872 add rsp, 8 875 873 876 874 .vmstart64_vmxoff_end: 877 ; Disable VMX root mode 875 ; Disable VMX root mode. 878 876 vmxoff 879 877 .vmstart64_vmxon_failed: … … 911 909 %endif 912 910 913 ; Restore segment registers 911 ; Restore segment registers. 914 912 MYPOPSEGS rax 915 913 … … 933 931 %endif 934 932 935 ; Restore segment registers 933 ; Restore segment registers. 936 934 MYPOPSEGS rax 937 935 … … 955 953 pushf 956 954 957 ;/* Manual save and restore: 958 ; * - General purpose registers except RIP, RSP, RAX 959 ; * 960 ; * Trashed: 961 ; * - CR2 (we don't care) 962 ; * - LDTR (reset to 0) 963 ; * - DRx (presumably not changed at all) 964 ; * - DR7 (reset to 0x400) 965 ; */ 966 967 ;/* Save the Guest CPU context pointer. */ 968 push rsi ; push for saving the state at the end 969 970 ; save host fs, gs, sysenter msr etc 955 ; Manual save and restore: 956 ; - General purpose registers except RIP, RSP, RAX 957 ; 958 ; Trashed: 959 ; - CR2 (we don't care) 960 ; - LDTR (reset to 0) 961 ; - DRx (presumably not changed at all) 962 ; - DR7 (reset to 0x400) 963 964 ; Save the Guest CPU context pointer. 965 push rsi ; Push for saving the state at the end 966 967 ; Save host fs, gs, sysenter msr etc 971 968 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address) 972 push rax ; save for the vmload after vmrun969 push rax ; Save for the vmload after vmrun 973 970 vmsave 974 971 975 ; setup eax for VMLOAD972 ; Setup eax for VMLOAD 976 973 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address) 977 974 978 ; /* Restore Guest's general purpose registers. */979 ; /* RAX is loaded from the VMCB by VMRUN */975 ; Restore Guest's general purpose registers. 976 ; rax is loaded from the VMCB by VMRUN. 980 977 mov rbx, qword [rsi + CPUMCTX.ebx] 981 978 mov rcx, qword [rsi + CPUMCTX.ecx] … … 993 990 mov rsi, qword [rsi + CPUMCTX.esi] 994 991 995 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch 992 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch. 996 993 clgi 997 994 sti 998 995 999 ; load guest fs, gs, sysenter msr etc996 ; Load guest fs, gs, sysenter msr etc 1000 997 vmload 1001 ; run the VM998 ; Run the VM 1002 999 vmrun 1003 1000 1004 ; /* RAX is in the VMCB already; we can use it here. */1005 1006 ; save guest fs, gs, sysenter msr etc1001 ; rax is in the VMCB already; we can use it here. 1002 1003 ; Save guest fs, gs, sysenter msr etc. 1007 1004 vmsave 1008 1005 1009 ; load host fs, gs, sysenter msr etc1010 pop rax ; pushed above1006 ; Load host fs, gs, sysenter msr etc. 1007 pop rax ; Pushed above 1011 1008 vmload 1012 1009
Note:
See TracChangeset
for help on using the changeset viewer.