Changeset 41328 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- May 16, 2012 10:57:35 AM (13 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r41327 r41328 61 61 #endif 62 62 63 63 64 /******************************************************************************* 64 65 * Global Variables * … … 73 74 #endif 74 75 76 75 77 /******************************************************************************* 76 78 * Local Functions * 77 79 *******************************************************************************/ 78 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx);79 80 static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu); 80 81 static DECLCALLBACK(void) hmR0VmxSetupTLBVPID(PVM pVM, PVMCPU pVCpu); … … 85 86 static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 86 87 static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite); 87 88 88 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx); 89 90 91 /** 92 * Updates error from VMCS to HWACCMCPU's lasterror record. 93 * 94 * @param pVM Pointer to the VM. 95 * @param pVCpu Pointer to the VMCPU. 96 * @param rc The error code. 97 */ 89 98 static void hmR0VmxCheckError(PVM pVM, PVMCPU pVCpu, int rc) 90 99 { … … 99 108 } 100 109 110 101 111 /** 102 * Sets up and activates VT-x on the current CPU 112 * Sets up and activates VT-x on the current CPU. 103 113 * 104 114 * @returns VBox status code. 105 * @param pCpu CPU info struct106 * @param pVM The VM to operate on. (can be NULL after a resume!!)107 * @param pvCpuPage Pointer to the global cpupage.108 * @param HCPhysCpuPage Physical address of the global cpupage.115 * @param pCpu Pointer to the CPU info struct. 116 * @param pVM Pointer to the VM. (can be NULL after a resume!!) 117 * @param pvCpuPage Pointer to the global CPU page. 118 * @param HCPhysCpuPage Physical address of the global CPU page. 109 119 */ 110 120 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) … … 112 122 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); 113 123 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER); 114 NOREF(pCpu);115 124 116 125 if (pVM) … … 127 136 return VERR_VMX_IN_VMX_ROOT_MODE; 128 137 129 /* Make sure the VMX instructions don't cause #UD faults. */ 130 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); 131 132 /* Enter VMX Root Mode. */ 138 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); /* Make sure the VMX instructions don't cause #UD faults. */ 139 140 /* 141 * Enter VM root mode. 142 */ 133 143 int rc = VMXEnable(HCPhysCpuPage); 134 144 if (RT_FAILURE(rc)) … … 156 166 } 157 167 168 158 169 /** 159 * Deactivates VT-x on the current CPU 170 * Deactivates VT-x on the current CPU. 160 171 * 161 172 * @returns VBox status code. 162 * @param pCpu CPU info struct163 * @param pvCpuPage Pointer to the global cpupage.164 * @param HCPhysCpuPage Physical address of the global cpupage.173 * @param pCpu Pointer to the CPU info struct. 174 * @param pvCpuPage Pointer to the global CPU page. 175 * @param HCPhysCpuPage Physical address of the global CPU page. 165 176 */ 166 177 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) … … 182 193 } 183 194 195 184 196 /** 185 * Does Ring-0 per VM VT-x init .197 * Does Ring-0 per VM VT-x initialization. 186 198 * 187 199 * @returns VBox status code. 188 * @param pVM The VM to operate on.200 * @param pVM Pointer to the VM. 189 201 */ 190 202 VMMR0DECL(int) VMXR0InitVM(PVM pVM) … … 309 321 } 310 322 323 311 324 /** 312 325 * Does Ring-0 per VM VT-x termination. 313 326 * 314 327 * @returns VBox status code. 315 * @param pVM The VM to operate on.328 * @param pVM Pointer to the VM. 316 329 */ 317 330 VMMR0DECL(int) VMXR0TermVM(PVM pVM) … … 379 392 } 380 393 394 381 395 /** 382 * Sets up VT-x for the specified VM 396 * Sets up VT-x for the specified VM. 383 397 * 384 398 * @returns VBox status code. 385 * @param pVM The VM to operate on.399 * @param pVM Pointer to the VM. 386 400 */ 387 401 VMMR0DECL(int) VMXR0SetupVM(PVM pVM) … … 445 459 *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info); 446 460 447 /* Clear VM Control Structure. */ 461 /* 462 * Clear and activate the VMCS. 463 */ 448 464 Log(("HCPhysVMCS = %RHp\n", pVCpu->hwaccm.s.vmx.HCPhysVMCS)); 449 465 rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); … … 451 467 goto vmx_end; 452 468 453 /* Activate the VM Control Structure. */454 469 rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 455 470 if (RT_FAILURE(rc)) 456 471 goto vmx_end; 457 472 458 /* VMX_VMCS_CTRL_PIN_EXEC_CONTROLS 473 /* 474 * VMX_VMCS_CTRL_PIN_EXEC_CONTROLS 459 475 * Set required bits to one and zero according to the MSR capabilities. 460 476 */ 461 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 462 /* External and non-maskable interrupts cause VM-exits. */ 463 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; 464 /* enable the preemption timer. */ 477 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 478 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts */ 479 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts */ 480 481 /* 482 * Enable the VMX preemption timer. 483 */ 465 484 if (pVM->hwaccm.s.vmx.fUsePreemptTimer) 466 485 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER; … … 470 489 AssertRC(rc); 471 490 472 /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS 491 /* 492 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS 473 493 * Set required bits to one and zero according to the MSR capabilities. 474 494 */ 475 495 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; 476 496 /* Program which event cause VM-exits and which features we want to use. */ 477 val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT 478 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET 479 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT 480 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT 481 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT 482 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT 483 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */ 497 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT 498 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET 499 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT 500 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT 501 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT 502 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT 503 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside 504 the guest (host thinks the cpu load is high) */ 484 505 485 506 /* Without nested paging we should intercept invlpg and cr3 mov instructions. */ 486 507 if (!pVM->hwaccm.s.fNestedPaging) 487 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT 488 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 489 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 490 491 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */ 508 { 509 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT 510 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 511 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 512 } 513 514 /* 515 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch 516 * failure with an invalid control fields error. (combined with some other exit reasons) 517 */ 492 518 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 493 519 { … … 519 545 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 520 546 { 521 /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2 547 /* 548 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2 522 549 * Set required bits to one and zero according to the MSR capabilities. 523 550 */ … … 545 572 } 546 573 547 /* VMX_VMCS_CTRL_CR3_TARGET_COUNT 574 /* 575 * VMX_VMCS_CTRL_CR3_TARGET_COUNT 548 576 * Set required bits to one and zero according to the MSR capabilities. 549 577 */ … … 551 579 AssertRC(rc); 552 580 553 /* Forward all exception except #NM & #PF to the guest. 581 /* 582 * Forward all exception except #NM & #PF to the guest. 554 583 * We always need to check pagefaults since our shadow page table can be out of sync. 555 * And we always lazily sync the FPU & XMM state. 584 * And we always lazily sync the FPU & XMM state. . 556 585 */ 557 586 … … 564 593 */ 565 594 566 /* Don't filter page faults; all of them should cause a switch. */ 595 /* 596 * Don't filter page faults, all of them should cause a world switch. 597 */ 567 598 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MASK, 0); 568 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0);569 599 AssertRC(rc); 570 571 /* Init TSC offset to zero. */ 600 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0); 601 AssertRC(rc); 602 572 603 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0); 573 604 AssertRC(rc); 574 575 605 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0); 576 606 AssertRC(rc); 577 578 607 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0); 579 608 AssertRC(rc); 580 609 581 /* Set the MSR bitmap address. */ 610 /* 611 * Set the MSR bitmap address. 612 */ 582 613 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 583 614 { … … 600 631 601 632 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 602 /* Set the guest & host MSR load/store physical addresses. */ 633 /* 634 * Set the guest & host MSR load/store physical addresses. 635 */ 603 636 Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys); 604 637 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys); … … 614 647 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, 0); 615 648 AssertRC(rc); 616 617 649 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0); 618 650 AssertRC(rc); … … 635 667 AssertRC(rc); 636 668 637 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 669 /* 670 * Clear VMCS, marking it inactive. Clear implementation specific data and writing back 671 * VMCS data back to memory. 672 */ 638 673 rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 639 674 AssertRC(rc); 640 675 641 /* Configure the VMCS read cache. */ 676 /* 677 * Configure the VMCS read cache. 678 */ 642 679 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache; 643 680 … … 659 696 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_IDTR_BASE); 660 697 661 VMX_SETUP_SELREG(ES, pCache);662 VMX_SETUP_SELREG(SS, pCache);663 VMX_SETUP_SELREG(CS, pCache);664 VMX_SETUP_SELREG(DS, pCache);665 VMX_SETUP_SELREG(FS, pCache);666 VMX_SETUP_SELREG(GS, pCache);698 VMX_SETUP_SELREG(ES, pCache); 699 VMX_SETUP_SELREG(SS, pCache); 700 VMX_SETUP_SELREG(CS, pCache); 701 VMX_SETUP_SELREG(DS, pCache); 702 VMX_SETUP_SELREG(FS, pCache); 703 VMX_SETUP_SELREG(GS, pCache); 667 704 VMX_SETUP_SELREG(LDTR, pCache); 668 VMX_SETUP_SELREG(TR, pCache); 669 670 /* Status code VMCS reads. */ 705 VMX_SETUP_SELREG(TR, pCache); 706 707 /* 708 * Status code VMCS reads. 709 */ 671 710 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_EXIT_REASON); 672 711 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_VM_INSTR_ERROR); … … 689 728 } /* for each VMCPU */ 690 729 691 /* Choose the right TLB setup function. */ 730 /* 731 * Setup the right TLB function based on CPU capabilities. 732 */ 692 733 if (pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID) 693 734 pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth; … … 704 745 } 705 746 747 706 748 /** 707 * Sets the permission bits for the specified MSR 749 * Sets the permission bits for the specified MSR. 708 750 * 709 * @param pVCpu The VMCPU to operate on.710 * @param ulMSR MSR value711 * @param fRead Reading allowed/disallowed712 * @param fWrite W riting allowed/disallowed751 * @param pVCpu Pointer to the VMCPU. 752 * @param ulMSR The MSR value. 753 * @param fRead Whether reading is allowed. 754 * @param fWrite Whether writing is allowed. 713 755 */ 714 756 static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite) … … 717 759 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap; 718 760 719 /* Layout: 761 /* 762 * Layout: 720 763 * 0x000 - 0x3ff - Low MSR read bits 721 764 * 0x400 - 0x7ff - High MSR read bits … … 729 772 } 730 773 else 731 if ( 732 && 774 if ( ulMSR >= 0xC0000000 775 && ulMSR <= 0xC0001FFF) 733 776 { 734 777 /* AMD Sixth Generation x86 Processor MSRs */ … … 756 799 757 800 /** 758 * Injects an event (trap or external interrupt) 801 * Injects an event (trap or external interrupt). 759 802 * 760 803 * @returns VBox status code. Note that it may return VINF_EM_RESET to 761 804 * indicate a triple fault when injecting X86_XCPT_DF. 762 805 * 763 * @param pVM The VM to operate on.764 * @param pVCpu The VMCPU to operate on.765 * @param pCtx CPU Context766 * @param intInfo VMX interrupt info 767 * @param cbInstr Opcode length of faulting instruction 768 * @param errCode Error code (optional) 806 * @param pVM Pointer to the VM. 807 * @param pVCpu Pointer to the VMCPU. 808 * @param pCtx Pointer to the guest CPU Context. 809 * @param intInfo VMX interrupt info. 810 * @param cbInstr Opcode length of faulting instruction. 811 * @param errCode Error code (optional). 769 812 */ 770 813 static int hmR0VmxInjectEvent(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode) … … 779 822 #ifdef VBOX_STRICT 780 823 if (iGate == 0xE) 781 LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x CR2=%RGv intInfo=%08x\n", iGate, (RTGCPTR)pCtx->rip, errCode, pCtx->cr2, intInfo)); 782 else 783 if (iGate < 0x20) 784 LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x\n", iGate, (RTGCPTR)pCtx->rip, errCode)); 824 { 825 LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x CR2=%RGv intInfo=%08x\n", iGate, 826 (RTGCPTR)pCtx->rip, errCode, pCtx->cr2, intInfo)); 827 } 828 else if (iGate < 0x20) 829 { 830 LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x\n", iGate, (RTGCPTR)pCtx->rip, 831 errCode)); 832 } 785 833 else 786 834 { … … 800 848 RTSEL sel; 801 849 802 /* Injecting events doesn't work right with real mode emulation. 850 /* 851 * Injecting events doesn't work right with real mode emulation. 803 852 * (#GP if we try to inject external hardware interrupts) 804 853 * Inject the interrupt or trap directly instead. … … 808 857 Log(("Manual interrupt/trap '%x' inject (real mode)\n", iGate)); 809 858 810 /* Check if the interrupt handler is present. */ 859 /* 860 * Check if the interrupt handler is present. 861 */ 811 862 if (iGate * 4 + 3 > pCtx->idtr.cbIdt) 812 863 { … … 835 886 ip = pCtx->ip; 836 887 837 /* Read the selector:offset pair of the interrupt handler. */ 888 /* 889 * Read the selector:offset pair of the interrupt handler. 890 */ 838 891 GCPhysHandler = (RTGCPHYS)pCtx->idtr.pIdt + iGate * 4; 839 892 rc = PGMPhysSimpleReadGCPhys(pVM, &offset, GCPhysHandler, sizeof(offset)); AssertRC(rc); … … 842 895 LogFlow(("IDT handler %04X:%04X\n", sel, offset)); 843 896 844 /* Construct the stack frame. */ 845 /** @todo should check stack limit. */ 897 /* 898 * Construct the stack frame. 899 */ 900 /** @todo Check stack limit. */ 846 901 pCtx->sp -= 2; 847 902 LogFlow(("ss:sp %04X:%04X eflags=%x\n", pCtx->ss, pCtx->sp, pCtx->eflags.u)); … … 854 909 rc = PGMPhysSimpleWriteGCPhys(pVM, pCtx->ssHid.u64Base + pCtx->sp, &ip, sizeof(ip)); AssertRC(rc); 855 910 856 /* Update the CPU state for executing the handler. */ 911 /* 912 * Update the CPU state for executing the handler. 913 */ 857 914 pCtx->rip = offset; 858 915 pCtx->cs = sel; 859 916 pCtx->csHid.u64Base = sel << 4; 860 pCtx->eflags.u &= ~(X86_EFL_IF |X86_EFL_TF|X86_EFL_RF|X86_EFL_AC);917 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC); 861 918 862 919 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS; … … 864 921 } 865 922 866 /* Set event injection state. */ 923 /* 924 * Set event injection state. 925 */ 867 926 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_IRQ_INFO, intInfo | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT)); 868 869 927 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH, cbInstr); 870 928 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE, errCode); … … 876 934 877 935 /** 878 * Checks for pending guest interrupts and injects them 936 * Checks for pending guest interrupts and injects them into the guest. 879 937 * 880 938 * @returns VBox status code. 881 * @param pVM The VM to operate on.882 * @param pVCpu The VMCPU to operate on.883 * @param pCtx CPU Context939 * @param pVM Pointer to the VM. 940 * @param pVCpu Pointer to the VMCPU. 941 * @param pCtx Pointer to the guest CPU context. 884 942 */ 885 943 static int hmR0VmxCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, CPUMCTX *pCtx) … … 887 945 int rc; 888 946 889 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */ 947 /* 948 * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely). 949 */ 890 950 if (pVCpu->hwaccm.s.Event.fPending) 891 951 { 892 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2)); 952 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hwaccm.s.Event.intInfo, 953 pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2)); 893 954 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject); 894 955 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hwaccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode); … … 899 960 } 900 961 901 /* If an active trap is already pending, then we must forward it first! */ 962 /* 963 * If an active trap is already pending, we must forward it first! 964 */ 902 965 if (!TRPMHasTrap(pVCpu)) 903 966 { … … 918 981 } 919 982 920 /* @todo SMI interrupts. */ 921 922 /* When external interrupts are pending, we should exit the VM when IF is set. */ 983 /** @todo SMI interrupts. */ 984 985 /* 986 * When external interrupts are pending, we should exit the VM when IF is set. 987 */ 923 988 if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))) 924 989 { … … 934 999 /* else nothing to do but wait */ 935 1000 } 936 else 937 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1001 else if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 938 1002 { 939 1003 uint8_t u8Interrupt; 940 1004 941 1005 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 942 Log(("CPU%d: Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%RGv\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc, pCtx->cs, (RTGCPTR)pCtx->rip)); 1006 Log(("CPU%d: Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%RGv\n", pVCpu->idCpu, 1007 u8Interrupt, u8Interrupt, rc, pCtx->cs, (RTGCPTR)pCtx->rip)); 943 1008 if (RT_SUCCESS(rc)) 944 1009 { … … 962 1027 if (TRPMHasTrap(pVCpu)) 963 1028 { 964 uint8_t 1029 uint8_t u8Vector; 965 1030 rc = TRPMQueryTrapAll(pVCpu, &u8Vector, 0, 0, 0); 966 1031 AssertRC(rc); … … 968 1033 #endif 969 1034 970 if ( 1035 if ( (pCtx->eflags.u32 & X86_EFL_IF) 971 1036 && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 972 1037 && TRPMHasTrap(pVCpu) … … 978 1043 RTGCUINT errCode; 979 1044 980 /* If a new event is pending, then dispatch it now. */ 1045 /* 1046 * If a new event is pending, dispatch it now. 1047 */ 981 1048 rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &errCode, 0); 982 1049 AssertRC(rc); … … 984 1051 Assert(enmType != TRPM_SOFTWARE_INT); 985 1052 986 /* Clear the pending trap. */ 1053 /* 1054 * Clear the pending trap. 1055 */ 987 1056 rc = TRPMResetTrap(pVCpu); 988 1057 AssertRC(rc); … … 993 1062 if (enmType == TRPM_TRAP) 994 1063 { 995 switch (u8Vector) { 996 case X86_XCPT_DF: 997 case X86_XCPT_TS: 998 case X86_XCPT_NP: 999 case X86_XCPT_SS: 1000 case X86_XCPT_GP: 1001 case X86_XCPT_PF: 1002 case X86_XCPT_AC: 1003 /* Valid error codes. */ 1004 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 1005 break; 1006 default: 1007 break; 1064 switch (u8Vector) 1065 { 1066 case X86_XCPT_DF: 1067 case X86_XCPT_TS: 1068 case X86_XCPT_NP: 1069 case X86_XCPT_SS: 1070 case X86_XCPT_GP: 1071 case X86_XCPT_PF: 1072 case X86_XCPT_AC: 1073 { 1074 /* Valid error codes. */ 1075 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 1076 break; 1077 } 1078 1079 default: 1080 break; 1008 1081 } 1009 if (u8Vector == X86_XCPT_BP || u8Vector == X86_XCPT_OF) 1082 1083 if ( u8Vector == X86_XCPT_BP 1084 || u8Vector == X86_XCPT_OF) 1085 { 1010 1086 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 1087 } 1011 1088 else 1012 1089 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 1023 1100 } 1024 1101 1102 1025 1103 /** 1026 * Save the host state 1104 * Save the host state into the VMCS. 1027 1105 * 1028 1106 * @returns VBox status code. 1029 * @param pVM The VM to operate on.1030 * @param pVCpu The VMCPU to operate on.1107 * @param pVM Pointer to the VM. 1108 * @param pVCpu Pointer to the VMCPU. 1031 1109 */ 1032 1110 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu) … … 1036 1114 1037 1115 /* 1038 * Host CPU Context 1116 * Host CPU Context. 1039 1117 */ 1040 1118 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT) … … 1049 1127 uint64_t cr3; 1050 1128 1051 /* Control registers */ 1052 rc = VMXWriteVMCS(VMX_VMCS_HOST_CR0, ASMGetCR0()); 1129 /* 1130 * Control registers. 1131 */ 1132 rc = VMXWriteVMCS(VMX_VMCS_HOST_CR0, ASMGetCR0()); 1133 Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0())); 1053 1134 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 1054 1135 if (VMX_IS_64BIT_HOST_MODE()) 1055 1136 { 1056 1137 cr3 = hwaccmR0Get64bitCR3(); 1057 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3, 1138 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3, cr3); 1058 1139 } 1059 1140 else … … 1061 1142 { 1062 1143 cr3 = ASMGetCR3(); 1063 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3, cr3); 1064 } 1065 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4, ASMGetCR4()); 1144 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3, cr3); 1145 } 1146 Log2(("VMX_VMCS_HOST_CR3 %08RX64\n", cr3)); 1147 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4, ASMGetCR4()); 1148 Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4())); 1066 1149 AssertRC(rc); 1067 Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0())); 1068 Log2(("VMX_VMCS_HOST_CR3 %08RX64\n", cr3)); 1069 Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4())); 1070 1071 /* Selector registers. */ 1150 1151 /* 1152 * Selector registers. 1153 */ 1072 1154 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 1073 1155 if (VMX_IS_64BIT_HOST_MODE()) … … 1111 1193 Log2(("VMX_VMCS_HOST_FIELD_TR %08x\n", ASMGetTR())); 1112 1194 1113 /* GDTR & IDTR */ 1195 /* 1196 * GDTR & IDTR. 1197 */ 1114 1198 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 1115 1199 if (VMX_IS_64BIT_HOST_MODE()) … … 1137 1221 } 1138 1222 1139 /* Save the base address of the TR selector. */ 1223 /* 1224 * Save the base address of the TR selector. 1225 */ 1140 1226 if (SelTR > gdtr.cbGdt) 1141 1227 { … … 1166 1252 } 1167 1253 1168 /* FS and GS base. */ 1254 /* 1255 * FS base and GS base. 1256 */ 1169 1257 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1170 1258 if (VMX_IS_64BIT_HOST_MODE()) … … 1172 1260 Log2(("MSR_K8_FS_BASE = %RX64\n", ASMRdMsr(MSR_K8_FS_BASE))); 1173 1261 Log2(("MSR_K8_GS_BASE = %RX64\n", ASMRdMsr(MSR_K8_GS_BASE))); 1174 rc = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE, ASMRdMsr(MSR_K8_FS_BASE));1175 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE, ASMRdMsr(MSR_K8_GS_BASE));1262 rc = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE, ASMRdMsr(MSR_K8_FS_BASE)); 1263 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE, ASMRdMsr(MSR_K8_GS_BASE)); 1176 1264 } 1177 1265 #endif 1178 1266 AssertRC(rc); 1179 1267 1180 /* Sysenter MSRs. */ 1268 /* 1269 * Sysenter MSRs. 1270 */ 1181 1271 /** @todo expensive!! */ 1182 rc = VMXWriteVMCS(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));1183 Log2(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));1272 rc = VMXWriteVMCS(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 1273 Log2(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS))); 1184 1274 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 1185 1275 if (VMX_IS_64BIT_HOST_MODE()) … … 1192 1282 else 1193 1283 { 1194 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));1195 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));1196 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));1197 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));1284 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 1285 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 1286 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP))); 1287 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP))); 1198 1288 } 1199 1289 #elif HC_ARCH_BITS == 32 1200 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));1201 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));1202 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));1203 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));1290 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 1291 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 1292 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP))); 1293 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP))); 1204 1294 #else 1205 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));1206 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));1207 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr(MSR_IA32_SYSENTER_ESP));1208 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));1295 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP))); 1296 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP))); 1297 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 1298 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 1209 1299 #endif 1210 1300 AssertRC(rc); 1211 1301 1302 1212 1303 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 1213 /* Store all host MSRs in the VM-Exit load area, so they will be reloaded after the world switch back to the host. */ 1304 /* 1305 * Store all host MSRs in the VM-Exit load area, so they will be reloaded after 1306 * the world switch back to the host. 1307 */ 1214 1308 PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR; 1215 1309 unsigned idxMsr = 0; 1216 1310 1217 /* EFER MSR present? */ 1311 /* 1312 * Check if EFER MSR present. 1313 */ 1218 1314 if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)) 1219 1315 { … … 1231 1327 if (CPUMIsGuestInLongMode(pVCpu)) 1232 1328 { 1233 /* Must match the efervalue in our 64 bits switcher. */1329 /* Must match the EFER value in our 64 bits switcher. */ 1234 1330 pMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE; 1235 1331 } … … 1266 1362 } 1267 1363 1364 1268 1365 /** 1269 1366 * Loads the 4 PDPEs into the guest state when nested paging is used and the 1270 1367 * guest operates in PAE mode. 1271 1368 * 1272 * @returns V INF_SUCCESS or fatal error.1273 * @param pVCpu The VMCPU to operate on.1274 * @param pCtx Guest context1369 * @returns VBox status code. 1370 * @param pVCpu Pointer to the VMCPU. 1371 * @param pCtx Pointer to the guest CPU context. 1275 1372 */ 1276 1373 static int hmR0VmxLoadPaePdpes(PVMCPU pVCpu, PCPUMCTX pCtx) … … 1290 1387 } 1291 1388 1389 1292 1390 /** 1293 1391 * Saves the 4 PDPEs into the guest state when nested paging is used and the 1294 1392 * guest operates in PAE mode. 1295 1393 * 1296 * @returns V INF_SUCCESS or fatal error.1297 * @param pVCpu The VMCPU to operate on.1298 * @param pCtx Guest context1394 * @returns VBox status code. 1395 * @param pVCpu Pointer to the VM CPU. 1396 * @param pCtx Pointer to the guest CPU context. 1299 1397 * 1300 1398 * @remarks Tell PGM about CR3 changes before calling this helper. … … 1319 1417 1320 1418 /** 1321 * Update the exception bitmap according to the current CPU state 1419 * Update the exception bitmap according to the current CPU state. 1322 1420 * 1323 * @param pVM The VM to operate on.1324 * @param pVCpu The VMCPU to operate on.1325 * @param pCtx Guest context1421 * @param pVM Pointer to the VM. 1422 * @param pVCpu Pointer to the VMCPU. 1423 * @param pCtx Pointer to the guest CPU context. 1326 1424 */ 1327 1425 static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 1330 1428 Assert(pCtx); 1331 1429 1332 /* Set up a mask for intercepting traps. */ 1430 /* 1431 * Set up a mask for intercepting traps. 1432 */ 1333 1433 /** @todo Do we really need to always intercept #DB? */ 1334 1434 u32TrapMask = RT_BIT(X86_XCPT_DB) … … 1350 1450 ; 1351 1451 1452 /* 1453 * Without nested paging, #PF must be intercepted to implement shadow paging. 1454 */ 1352 1455 /** @todo NP state won't change so maybe we should build the initial trap mask up front? */ 1353 /* Without nested paging, #PF must be intercepted to implement shadow paging. */1354 1456 if (!pVM->hwaccm.s.fNestedPaging) 1355 1457 u32TrapMask |= RT_BIT(X86_XCPT_PF); 1356 1458 1357 /* Also catch floating point exceptions if we need to report them to the guest in a different way. */1459 /* Catch floating point exceptions if we need to report them to the guest in a different way. */ 1358 1460 if (!(pCtx->cr0 & X86_CR0_NE)) 1359 {1360 1461 u32TrapMask |= RT_BIT(X86_XCPT_MF); 1361 }1362 1462 1363 1463 #ifdef VBOX_STRICT … … 1365 1465 #endif 1366 1466 1367 /* Intercept all exceptions in real mode as none of them can be injected directly (#GP otherwise). */ 1467 /* 1468 * Intercept all exceptions in real mode as none of them can be injected directly (#GP otherwise). 1469 */ 1368 1470 /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */ 1369 1471 if ( CPUMIsGuestInRealModeEx(pCtx) 1370 1472 && pVM->hwaccm.s.vmx.pRealModeTSS) 1473 { 1371 1474 u32TrapMask |= RT_BIT(X86_XCPT_DE) 1372 1475 | RT_BIT(X86_XCPT_DB) … … 1387 1490 | RT_BIT(X86_XCPT_XF) 1388 1491 ; 1492 } 1389 1493 1390 1494 int rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, u32TrapMask); … … 1392 1496 } 1393 1497 1498 1394 1499 /** 1395 * Loads a minimal guest state 1500 * Loads a minimal guest state. 1396 1501 * 1397 1502 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!! 1398 1503 * 1399 * @param pVM The VM to operate on.1400 * @param pVCpu The VMCPU to operate on.1401 * @param pCtx Guest context1504 * @param pVM Pointer to the VM. 1505 * @param pVCpu Pointer to the VMCPU. 1506 * @param pCtx Pointer to the guest CPU context. 1402 1507 */ 1403 1508 VMMR0DECL(void) VMXR0LoadMinimalGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 1408 1513 Assert(!(pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_ALL_GUEST)); 1409 1514 1410 /* EIP, ESP and EFLAGS */ 1515 /* 1516 * Load EIP, ESP and EFLAGS. 1517 */ 1411 1518 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_RIP, pCtx->rip); 1412 1519 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_RSP, pCtx->rsp); 1413 1520 AssertRC(rc); 1414 1521 1415 /* Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. */ 1522 /* 1523 * Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. 1524 */ 1416 1525 eflags = pCtx->eflags; 1417 1526 eflags.u32 &= VMX_EFLAGS_RESERVED_0; 1418 1527 eflags.u32 |= VMX_EFLAGS_RESERVED_1; 1419 1528 1420 /* Real mode emulation using v86 mode. */ 1529 /* 1530 * Check if real mode emulation using v86 mode. 1531 */ 1421 1532 if ( CPUMIsGuestInRealModeEx(pCtx) 1422 1533 && pVM->hwaccm.s.vmx.pRealModeTSS) … … 1431 1542 } 1432 1543 1544 1433 1545 /** 1434 * Loads the guest state 1546 * Loads the guest state. 1435 1547 * 1436 1548 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!! 1437 1549 * 1438 1550 * @returns VBox status code. 1439 * @param pVM The VM to operate on.1440 * @param pVCpu The VMCPU to operate on.1441 * @param pCtx Guest context1551 * @param pVM Pointer to the VM. 1552 * @param pVCpu Pointer to the VMCPU. 1553 * @param pCtx Pointer to the guest CPU context. 1442 1554 */ 1443 1555 VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 1446 1558 RTGCUINTPTR val; 1447 1559 1448 /* VMX_VMCS_CTRL_ENTRY_CONTROLS 1560 /* 1561 * VMX_VMCS_CTRL_ENTRY_CONTROLS 1449 1562 * Set required bits to one and zero according to the MSR capabilities. 1450 1563 */ 1451 1564 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0; 1452 /* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */ 1565 1566 /* 1567 * Load guest debug controls (DR7 & IA32_DEBUGCTL_MSR). 1568 * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs 1569 */ 1453 1570 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG; 1454 /* 64 bits guest mode? */ 1571 1455 1572 if (CPUMIsGuestInLongModeEx(pCtx)) 1456 1573 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE; 1457 1574 /* else Must be zero when AMD64 is not available. */ 1458 1575 1459 /* Mask away the bits that the CPU doesn't support */ 1576 /* 1577 * Mask away the bits that the CPU doesn't support. 1578 */ 1460 1579 val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1; 1461 1580 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val); 1462 1581 AssertRC(rc); 1463 1582 1464 /* VMX_VMCS_CTRL_EXIT_CONTROLS 1583 /* 1584 * VMX_VMCS_CTRL_EXIT_CONTROLS 1465 1585 * Set required bits to one and zero according to the MSR capabilities. 1466 1586 */ 1467 1587 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0; 1468 1588 1469 /* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */ 1589 /* 1590 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR) 1591 * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs 1592 */ 1470 1593 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG; 1471 1594 … … 1473 1596 if (VMX_IS_64BIT_HOST_MODE()) 1474 1597 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64; 1475 /* else :Must be zero when AMD64 is not available. */1598 /* else Must be zero when AMD64 is not available. */ 1476 1599 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 1477 1600 if (CPUMIsGuestInLongModeEx(pCtx)) … … 1481 1604 #endif 1482 1605 val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1; 1483 /* Don't acknowledge external interrupts on VM-exit. */ 1606 1607 /* 1608 * Don't acknowledge external interrupts on VM-exit. 1609 */ 1484 1610 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val); 1485 1611 AssertRC(rc); 1486 1612 1487 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ 1613 /* 1614 * Guest CPU context: ES, CS, SS, DS, FS, GS. 1615 */ 1488 1616 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS) 1489 1617 { … … 1493 1621 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode != enmGuestMode) 1494 1622 { 1495 /* Correct weird requirements for switching to protected mode. */ 1623 /* 1624 * Correct weird requirements for switching to protected mode. 1625 */ 1496 1626 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 1497 1627 && enmGuestMode >= PGMMODE_PROTECTED) 1498 1628 { 1499 1629 #ifdef VBOX_WITH_REM 1500 /* Flush the recompiler code cache as it's not unlikely1501 * the guest will rewrite code it will later execute in real1502 * mode (OpenBSD 4.0 is one such example)1630 /* 1631 * Flush the recompiler code cache as it's not unlikely the guest will rewrite code 1632 * it will later execute in real mode (OpenBSD 4.0 is one such example) 1503 1633 */ 1504 1634 REMFlushTBs(pVM); 1505 1635 #endif 1506 1636 1507 /* DPL of all hidden selector registers must match the current CPL (0). */ 1637 /* 1638 * DPL of all hidden selector registers must match the current CPL (0). 1639 */ 1508 1640 pCtx->csHid.Attr.n.u2Dpl = 0; 1509 1641 pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_RW_ACC; … … 1517 1649 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = enmGuestMode; 1518 1650 } 1519 else 1520 /* VT-x will fail with a guest invalid state otherwise... (CPU state after a reset) */ 1521 if ( CPUMIsGuestInRealModeEx(pCtx) 1522 && pCtx->csHid.u64Base == 0xffff0000) 1651 else if ( CPUMIsGuestInRealModeEx(pCtx) 1652 && pCtx->csHid.u64Base == 0xffff0000) 1523 1653 { 1654 /* VT-x will fail with a guest invalid state otherwise... (CPU state after a reset) */ 1524 1655 pCtx->csHid.u64Base = 0xf0000; 1525 1656 pCtx->cs = 0xf000; … … 1546 1677 } 1547 1678 1548 /* Guest CPU context: LDTR. */ 1679 /* 1680 * Guest CPU context: LDTR. 1681 */ 1549 1682 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR) 1550 1683 { … … 1566 1699 AssertRC(rc); 1567 1700 } 1568 /* Guest CPU context: TR. */ 1701 1702 /* 1703 * Guest CPU context: TR. 1704 */ 1569 1705 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR) 1570 1706 { 1571 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */ 1707 /* 1708 * Real mode emulation using v86 mode with CR4.VME (interrupt redirection 1709 * using the int bitmap in the TSS). 1710 */ 1572 1711 if ( CPUMIsGuestInRealModeEx(pCtx) 1573 1712 && pVM->hwaccm.s.vmx.pRealModeTSS) … … 1575 1714 RTGCPHYS GCPhys; 1576 1715 1577 /* We convert it here every time as pciregions could be reconfigured. */1716 /* We convert it here every time as PCI regions could be reconfigured. */ 1578 1717 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys); 1579 1718 AssertRC(rc); … … 1612 1751 AssertRC(rc); 1613 1752 } 1614 /* Guest CPU context: GDTR. */ 1753 1754 /* 1755 * Guest CPU context: GDTR. 1756 */ 1615 1757 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR) 1616 1758 { … … 1619 1761 AssertRC(rc); 1620 1762 } 1621 /* Guest CPU context: IDTR. */ 1763 1764 /* 1765 * Guest CPU context: IDTR. 1766 */ 1622 1767 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR) 1623 1768 { … … 1628 1773 1629 1774 /* 1630 * Sysenter MSRs 1775 * Sysenter MSRs. 1631 1776 */ 1632 1777 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR) … … 1638 1783 } 1639 1784 1640 /* Control registers */ 1785 /* 1786 * Guest CPU context: Control registers. 1787 */ 1641 1788 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0) 1642 1789 { … … 1665 1812 if (CPUMIsGuestInPagedProtectedModeEx(pCtx)) 1666 1813 { 1667 /* Disable cr3 read/write monitoring as we don't need it for EPT. */1814 /* Disable CR3 read/write monitoring as we don't need it for EPT. */ 1668 1815 pVCpu->hwaccm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1669 1816 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT); … … 1671 1818 else 1672 1819 { 1673 /* Reenable cr3 read/write monitoring as our identity mapped page table is active. */1820 /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */ 1674 1821 pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1675 1822 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; … … 1689 1836 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_CR0, val); 1690 1837 Log2(("Guest CR0 %08x\n", val)); 1691 /* CR0 flags owned by the host; if the guests attempts to change them, then 1692 * the VM will exit. 1838 1839 /* 1840 * CR0 flags owned by the host; if the guests attempts to change them, then the VM will exit. 1693 1841 */ 1694 1842 val = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */ … … 1696 1844 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */ 1697 1845 | X86_CR0_CD /* Bit not restored during VM-exit! */ 1698 | X86_CR0_NW /* Bit not restored during VM-exit! */1846 | X86_CR0_NW /* Bit not restored during VM-exit! */ 1699 1847 | X86_CR0_NE; 1700 1848 1701 /* When the guest's FPU state is active, then we no longer care about1702 * the FPU related bits.1849 /* 1850 * When the guest's FPU state is active, then we no longer care about the FPU related bits. 1703 1851 */ 1704 1852 if (CPUMIsGuestFPUStateActive(pVCpu) == false) … … 1711 1859 AssertRC(rc); 1712 1860 } 1861 1713 1862 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4) 1714 1863 { 1715 /* CR4 */1716 1864 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4); 1717 1865 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4)); … … 1723 1871 switch(pVCpu->hwaccm.s.enmShadowMode) 1724 1872 { 1725 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */1726 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */1727 case PGMMODE_32_BIT: /* 32-bit paging. */1728 val &= ~X86_CR4_PAE;1729 break;1730 1731 case PGMMODE_PAE: /* PAE paging. */1732 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */1733 /** Must use PAE paging as we could use physical memory > 4 GB */1734 val |= X86_CR4_PAE;1735 break;1736 1737 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */1738 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */1873 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */ 1874 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */ 1875 case PGMMODE_32_BIT: /* 32-bit paging. */ 1876 val &= ~X86_CR4_PAE; 1877 break; 1878 1879 case PGMMODE_PAE: /* PAE paging. */ 1880 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1881 /** Must use PAE paging as we could use physical memory > 4 GB */ 1882 val |= X86_CR4_PAE; 1883 break; 1884 1885 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */ 1886 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */ 1739 1887 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1740 break;1888 break; 1741 1889 #else 1742 AssertFailed();1743 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;1744 #endif 1745 default: /* shut up gcc */1746 AssertFailed();1747 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;1890 AssertFailed(); 1891 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1892 #endif 1893 default: /* shut up gcc */ 1894 AssertFailed(); 1895 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1748 1896 } 1749 1897 } 1750 else 1751 if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx) 1752 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest) 1898 else if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx) 1899 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest) 1753 1900 { 1754 1901 /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */ … … 1758 1905 } 1759 1906 1760 /* Turn off VME if we're in emulated real mode. */ 1907 /* 1908 * Turn off VME if we're in emulated real mode. 1909 */ 1761 1910 if ( CPUMIsGuestInRealModeEx(pCtx) 1762 1911 && pVM->hwaccm.s.vmx.pRealModeTSS) 1912 { 1763 1913 val &= ~X86_CR4_VME; 1914 } 1764 1915 1765 1916 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_CR4, val); 1766 1917 Log2(("Guest CR4 %08x\n", val)); 1767 /* CR4 flags owned by the host; if the guests attempts to change them, then 1768 * the VM will exit. 1918 1919 /* 1920 * CR4 flags owned by the host; if the guests attempts to change them, then the VM will exit. 1769 1921 */ 1770 1922 val = 0 … … 1812 1964 RTGCPHYS GCPhys; 1813 1965 1814 /* We convert it here every time as pciregions could be reconfigured. */1966 /* We convert it here every time as PCI regions could be reconfigured. */ 1815 1967 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys); 1816 1968 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable)); 1817 1969 1818 /* We use our identity mapping page table here as we need to map guest virtual to guest physical addresses; EPT will 1819 * take care of the translation to host physical addresses. 1970 /* 1971 * We use our identity mapping page table here as we need to map guest virtual to 1972 * guest physical addresses; EPT will take care of the translation to host physical addresses. 1820 1973 */ 1821 1974 val = GCPhys; … … 1840 1993 } 1841 1994 1842 /* Debug registers. */ 1995 /* 1996 * Guest CPU context: Debug registers. 1997 */ 1843 1998 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG) 1844 1999 { … … 1879 2034 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed); 1880 2035 1881 /* Disable drx move intercepts. */2036 /* Disable DRx move intercepts. */ 1882 2037 pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 1883 2038 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls); … … 1894 2049 1895 2050 /** @todo do we really ever need this? */ 1896 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS, 2051 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS, 0); 1897 2052 AssertRC(rc); 1898 2053 } 1899 2054 1900 /* 64 bits guest mode? */ 2055 /* 2056 * 64-bit guest mode. 2057 */ 1901 2058 if (CPUMIsGuestInLongModeEx(pCtx)) 1902 2059 { … … 1994 2151 else 1995 2152 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset); 2153 1996 2154 if (fOffsettedTsc) 1997 2155 { … … 2011 2169 { 2012 2170 /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */ 2013 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset, TMCpuTickGet(pVCpu))); 2171 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 2172 pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset, 2173 TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset, 2174 TMCpuTickGet(pVCpu))); 2014 2175 pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2015 2176 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls); … … 2029 2190 pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST; 2030 2191 2031 /* Minimal guest state update ( esp, eip, eflagsmostly) */2192 /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */ 2032 2193 VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx); 2033 2194 return rc; 2034 2195 } 2035 2196 2197 2036 2198 /** 2037 * Syncs back the guest state 2199 * Syncs back the guest state from VMCS. 2038 2200 * 2039 2201 * @returns VBox status code. 2040 * @param pVM The VM to operate on.2041 * @param pVCpu The VMCPU to operate on.2042 * @param pCtx Guest context2202 * @param pVM Pointer to the VM. 2203 * @param pVCpu Pointer to the VMCPU. 2204 * @param pCtx Pointer the guest CPU context. 2043 2205 */ 2044 2206 DECLINLINE(int) VMXR0SaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 2048 2210 int rc; 2049 2211 2050 /* Let's first sync back eip, esp, and eflags. */2212 /* First sync back EIP, ESP, and EFLAGS. */ 2051 2213 rc = VMXReadCachedVMCS(VMX_VMCS64_GUEST_RIP, &val); 2052 2214 AssertRC(rc); … … 2082 2244 CPUMSetGuestCR4(pVCpu, val); 2083 2245 2084 /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */ 2085 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */ 2086 if ( pVM->hwaccm.s.fNestedPaging 2087 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */ 2246 /* 2247 * No reason to sync back the CRx registers. They can't be changed by the guest unless in 2248 * the nested paging case where CR3 & CR4 can be changed by the guest. 2249 */ 2250 if ( pVM->hwaccm.s.fNestedPaging 2251 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */ 2088 2252 { 2089 2253 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache; … … 2103 2267 } 2104 2268 2105 /* Sync back DR7 here. */2269 /* Sync back DR7. */ 2106 2270 VMXReadCachedVMCS(VMX_VMCS64_GUEST_DR7, &val); 2107 2271 pCtx->dr[7] = val; … … 2115 2279 VMX_READ_SELREG(GS, gs); 2116 2280 2117 /* 2118 * System MSRs 2119 */ 2281 /* System MSRs */ 2120 2282 VMXReadCachedVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, &val); 2121 2283 pCtx->SysEnter.cs = val; … … 2180 2342 case MSR_K6_EFER: 2181 2343 /* EFER can't be changed without causing a VM-exit. */ 2182 // Assert(pCtx->msrEFER == pMsr->u64Value); 2344 /* Assert(pCtx->msrEFER == pMsr->u64Value); */ 2183 2345 break; 2184 2346 default: … … 2191 2353 } 2192 2354 2355 2193 2356 /** 2194 * Dummy placeholder 2357 * Dummy placeholder for TLB flush handling before VM-entry. Used in the case 2358 * where neither EPT nor VPID is supported by the CPU. 2195 2359 * 2196 * @param pVM The VM to operate on.2197 * @param pVCpu The VMCPU to operate on.2360 * @param pVM Pointer to the VM. 2361 * @param pVCpu Pointer to the VMCPU. 2198 2362 */ 2199 2363 static DECLCALLBACK(void) hmR0VmxSetupTLBDummy(PVM pVM, PVMCPU pVCpu) … … 2210 2374 * Setup the tagged TLB for EPT+VPID. 2211 2375 * 2212 * @param pVM The VM to operate on.2213 * @param pVCpu The VMCPU to operate on.2376 * @param pVM Pointer to the VM. 2377 * @param pVCpu Pointer to the VMCPU. 2214 2378 */ 2215 2379 static DECLCALLBACK(void) hmR0VmxSetupTLBBoth(PVM pVM, PVMCPU pVCpu) … … 2333 2497 * 2334 2498 * @returns VBox status code. 2335 * @param pVM The VM to operate on.2336 * @param pVCpu The VMCPU to operate on.2499 * @param pVM Pointer to the VM. 2500 * @param pVCpu Pointer to the VMCPU. 2337 2501 */ 2338 2502 static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu) … … 2343 2507 Assert(!pVM->hwaccm.s.vmx.fVPID); 2344 2508 2345 /* Deal with tagged TLBs if VPID or EPT is supported. */2346 2509 pCpu = HWACCMR0GetCurrentCpu(); 2347 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 2348 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 2510 2511 /* 2512 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last 2513 * This can happen both for start & resume due to long jumps back to ring-3. 2514 * If the TLB flush count shouldn't really change in this EPT-only case. 2515 */ 2349 2516 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 2350 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */2351 2517 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 2352 2518 { 2353 /* Force a TLB flush on VM entry. */2354 2519 pVCpu->hwaccm.s.fForceTLBFlush = true; 2355 2520 } 2356 2521 2357 /* Check for tlb shootdown flushes. */ 2522 /* 2523 * Check for explicit TLB shootdown flushes. 2524 */ 2358 2525 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2359 2526 pVCpu->hwaccm.s.fForceTLBFlush = true; … … 2390 2557 2391 2558 /** 2392 * Setup the tagged TLB for VPID 2559 * Setup the tagged TLB for VPID. 2393 2560 * 2394 2561 * @returns VBox status code. 2395 * @param pVM The VM to operate on.2396 * @param pVCpu The VMCPU to operate on.2562 * @param pVM Pointer to the VM. 2563 * @param pVCpu Pointer to the VMCPU. 2397 2564 */ 2398 2565 static DECLCALLBACK(void) hmR0VmxSetupTLBVPID(PVM pVM, PVMCPU pVCpu) … … 2403 2570 Assert(!pVM->hwaccm.s.fNestedPaging); 2404 2571 2405 /* Deal with tagged TLBs if VPID or EPT is supported. */2406 2572 pCpu = HWACCMR0GetCurrentCpu(); 2407 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 2408 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 2573 2574 /* 2575 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last 2576 * This can happen both for start & resume due to long jumps back to ring-3. 2577 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB, 2578 * so we cannot reuse the current ASID anymore. 2579 */ 2409 2580 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 2410 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */2411 2581 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 2412 2582 { … … 2415 2585 } 2416 2586 2417 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;2418 2419 /* Check for tlb shootdown flushes.*/2587 /* 2588 * Check for explicit TLB shootdown flushes. 2589 */ 2420 2590 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2421 2591 pVCpu->hwaccm.s.fForceTLBFlush = true; 2422 2592 2423 /* Make sure we flush the TLB when required. */ 2593 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu; 2594 2424 2595 if (pVCpu->hwaccm.s.fForceTLBFlush) 2425 2596 { … … 2485 2656 * 2486 2657 * @returns VBox status code. 2487 * @param pVM The VM to operate on.2488 * @param pVCpu The VMCPU to operate on.2489 * @param pCtx Guest context2658 * @param pVM Pointer to the VM. 2659 * @param pVCpu Pointer to the VMCPU. 2660 * @param pCtx Pointer to the guest CPU context. 2490 2661 */ 2491 2662 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 2518 2689 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || (pVCpu->hwaccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC)); 2519 2690 2520 /* Check if we need to use TPR shadowing. */ 2691 /* 2692 * Check if we need to use TPR shadowing. 2693 */ 2521 2694 if ( CPUMIsGuestInLongModeEx(pCtx) 2522 2695 || ( ((pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || pVM->hwaccm.s.fTRPPatchingAllowed) … … 2535 2708 rc2 = VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val2); 2536 2709 AssertRC(rc2); 2537 Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val2));2710 Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val2)); 2538 2711 2539 2712 /* allowed zero */ … … 2547 2720 rc2 = VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val2); 2548 2721 AssertRC(rc2); 2549 Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val2)); 2550 2551 /* Must be set according to the MSR, but can be cleared in case of EPT. */ 2722 Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val2)); 2723 2724 /* 2725 * Must be set according to the MSR, but can be cleared if nested paging is used. 2726 */ 2552 2727 if (pVM->hwaccm.s.fNestedPaging) 2728 { 2553 2729 val2 |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT 2554 2730 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 2555 2731 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 2732 } 2556 2733 2557 2734 /* allowed zero */ … … 2565 2742 rc2 = VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val2); 2566 2743 AssertRC(rc2); 2567 Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val2));2744 Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val2)); 2568 2745 2569 2746 /* allowed zero */ … … 2577 2754 rc2 = VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val2); 2578 2755 AssertRC(rc2); 2579 Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val2));2756 Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val2)); 2580 2757 2581 2758 /* allowed zero */ … … 2594 2771 #endif 2595 2772 2596 /* We can jump to this point to resume execution after determining that a VM-exit is innocent. 2773 /* 2774 * We can jump to this point to resume execution after determining that a VM-exit is innocent. 2597 2775 */ 2598 2776 ResumeExecution: … … 2606 2784 Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx)); 2607 2785 2608 /* Safety precaution; looping for too long here can have a very bad effect on the host */ 2786 /* 2787 * Safety precaution; looping for too long here can have a very bad effect on the host. 2788 */ 2609 2789 if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops)) 2610 2790 { … … 2614 2794 } 2615 2795 2616 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */ 2796 /* 2797 * Check for IRQ inhibition due to instruction fusing (sti, mov ss). 2798 */ 2617 2799 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2618 2800 { … … 2620 2802 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 2621 2803 { 2622 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here. 2804 /* 2805 * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here. 2623 2806 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might 2624 2807 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could … … 2651 2834 #endif 2652 2835 2653 /* Check for pending actions that force us to go back to ring 3. */ 2836 /* 2837 * Check for pending actions that force us to go back to ring-3. 2838 */ 2654 2839 if ( VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 2655 2840 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST)) … … 2725 2910 #endif 2726 2911 2727 /* When external interrupts are pending, we should exit the VM when IF is set. */ 2728 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */ 2912 /* 2913 * When external interrupts are pending, we should exit the VM when IF is et. 2914 * Note: *After* VM_FF_INHIBIT_INTERRUPTS check! 2915 */ 2729 2916 rc = hmR0VmxCheckPendingInterrupt(pVM, pVCpu, pCtx); 2730 2917 if (RT_FAILURE(rc)) … … 2733 2920 /** @todo check timers?? */ 2734 2921 2735 /* TPR caching using CR8 is only available in 64 bits mode */ 2736 /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */ 2737 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!! (no longer true) */ 2738 /** 2739 * @todo query and update the TPR only when it could have been changed (mmio access & wrmsr (x2apic)) 2740 */ 2922 /* 2923 * TPR caching using CR8 is only available in 64-bit mode. 2924 * Note: The 32-bit exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but this appears missing in Intel CPUs. 2925 * Note: We can't do this in LoadGuestState() as PDMApicGetTPR can jump back to ring-3 (lock)!! (no longer true) . 2926 */ 2927 /** @todo query and update the TPR only when it could have been changed (mmio 2928 * access & wrsmr (x2apic) */ 2741 2929 if (fSetupTPRCaching) 2742 2930 { … … 2749 2937 pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = u8LastTPR; 2750 2938 2751 /* Two options here: 2939 /* 2940 * Two options here: 2752 2941 * - external interrupt pending, but masked by the TPR value. 2753 2942 * -> a CR8 update that lower the current TPR value should cause an exit … … 2771 2960 else 2772 2961 { 2773 /* No interrupts are pending, so we don't need to be explicitely notified. 2962 /* 2963 * No interrupts are pending, so we don't need to be explicitely notified. 2774 2964 * There are enough world switches for detecting pending interrupts. 2775 2965 */ … … 2801 2991 2802 2992 /* 2803 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 2993 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING-3! 2804 2994 * (until the actual world switch) 2805 2995 */ … … 2810 3000 VMMR0LogFlushDisable(pVCpu); 2811 3001 #endif 2812 /* Save the host state first. */ 3002 3003 /* 3004 * Save the host state first. 3005 */ 2813 3006 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT) 2814 3007 { … … 2821 3014 } 2822 3015 2823 /* Load the guest state */ 3016 /* 3017 * Load the guest state. 3018 */ 2824 3019 if (!pVCpu->hwaccm.s.fContextUseFlags) 2825 3020 { … … 2839 3034 2840 3035 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2841 /* Disable interrupts to make sure a poke will interrupt execution. 3036 /* 3037 * Disable interrupts to make sure a poke will interrupt execution. 2842 3038 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this. 2843 3039 */ … … 2856 3052 pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu); 2857 3053 2858 /* Manual save and restore: 3054 /* 3055 * Manual save and restore: 2859 3056 * - General purpose registers except RIP, RSP 2860 3057 * … … 2865 3062 * - DR7 (reset to 0x400) 2866 3063 * - EFLAGS (reset to RT_BIT(1); not relevant) 2867 *2868 3064 */ 2869 3065 … … 2877 3073 #endif 2878 3074 2879 /* Save the current TPR value in the LSTAR msr so our patches can access it. */ 3075 /* 3076 * Save the current TPR value in the LSTAR MSR so our patches can access it. 3077 */ 2880 3078 if (pVM->hwaccm.s.fTPRPatchingActive) 2881 3079 { … … 2893 3091 ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false); 2894 3092 ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits); 2895 /* Possibly the last TSC value seen by the guest (too high) (only when we're in tscoffset mode). */3093 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 2896 3094 if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 2897 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); 3095 { 3096 TMCpuTickSetLastSeen(pVCpu, 3097 ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); 3098 } 2898 3099 2899 3100 TMNotifyEndOfExecution(pVCpu); … … 2901 3102 Assert(!(ASMGetFlags() & X86_EFL_IF)); 2902 3103 2903 /* Restore the host LSTAR msr if the guest could have changed it. */ 3104 /* 3105 * Restore the host LSTAR MSR if the guest could have changed it. 3106 */ 2904 3107 if (pVM->hwaccm.s.fTPRPatchingActive) 2905 3108 { … … 2950 3153 AssertRC(rc2); 2951 3154 2952 /* Sync back the guest state */ 3155 /* 3156 * Sync back the guest state. 3157 */ 2953 3158 rc2 = VMXR0SaveGuestState(pVM, pVCpu, pCtx); 2954 3159 AssertRC(rc2); … … 2961 3166 #endif 2962 3167 2963 /* Check if an injected event was interrupted prematurely. */ 3168 /* 3169 * Check if an injected event was interrupted prematurely. 3170 */ 2964 3171 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_INFO, &val); 2965 3172 AssertRC(rc2); … … 2979 3186 AssertRC(rc2); 2980 3187 pVCpu->hwaccm.s.Event.errCode = val; 2981 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val)); 3188 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n", 3189 pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val)); 2982 3190 } 2983 3191 else 2984 3192 { 2985 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 3193 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo, 3194 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 2986 3195 pVCpu->hwaccm.s.Event.errCode = 0; 2987 3196 } 2988 3197 } 2989 3198 #ifdef VBOX_STRICT 2990 else 2991 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)2992 /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */2993 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)2994 {2995 Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n",pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));3199 else if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo) 3200 /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */ 3201 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT) 3202 { 3203 Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", 3204 pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 2996 3205 } 2997 3206 … … 3006 3215 Log2(("IntInfo = %08x\n", (uint32_t)intInfo)); 3007 3216 3008 /* Sync back the TPR if it was changed. */ 3217 /* 3218 * Sync back the TPR if it was changed. 3219 */ 3009 3220 if ( fSetupTPRCaching 3010 3221 && u8LastTPR != pVCpu->hwaccm.s.vmx.pbVAPIC[0x80]) … … 3083 3294 Log(("Forward #NM fault to the guest\n")); 3084 3295 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM); 3085 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0); 3296 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3297 cbInstr, 0); 3086 3298 AssertRC(rc2); 3087 3299 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3); … … 3093 3305 #ifdef VBOX_ALWAYS_TRAP_PF 3094 3306 if (pVM->hwaccm.s.fNestedPaging) 3095 { /* A genuine pagefault. 3096 * Forward the trap to the guest by injecting the exception and resuming execution. 3307 { 3308 /* 3309 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution. 3097 3310 */ 3098 Log(("Guest page fault at %RGv cr2=%RGv error code %RGv rsp=%RGv\n", (RTGCPTR)pCtx->rip, exitQualification, errCode, (RTGCPTR)pCtx->rsp)); 3311 Log(("Guest page fault at %RGv cr2=%RGv error code %RGv rsp=%RGv\n", (RTGCPTR)pCtx->rip, exitQualification, 3312 errCode, (RTGCPTR)pCtx->rsp)); 3099 3313 3100 3314 Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx)); … … 3104 3318 /* Now we must update CR2. */ 3105 3319 pCtx->cr2 = exitQualification; 3106 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3320 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3321 cbInstr, errCode); 3107 3322 AssertRC(rc2); 3108 3323 … … 3110 3325 goto ResumeExecution; 3111 3326 } 3112 #e ndif3327 #else 3113 3328 Assert(!pVM->hwaccm.s.fNestedPaging); 3329 #endif 3114 3330 3115 3331 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING … … 3124 3340 { 3125 3341 RTGCPHYS GCPhysApicBase, GCPhys; 3126 PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */3342 PDMApicGetBase(pVM, &GCPhysApicBase); /** @todo cache this */ 3127 3343 GCPhysApicBase &= PAGE_BASE_GC_MASK; 3128 3344 … … 3181 3397 goto ResumeExecution; 3182 3398 } 3183 else 3184 if (rc == VINF_EM_RAW_GUEST_TRAP)3185 { /* A genuine pagefault.3186 * Forward the trap to the guest by injecting the exception and resuming execution.3399 else if (rc == VINF_EM_RAW_GUEST_TRAP) 3400 { 3401 /* 3402 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution. 3187 3403 */ 3188 3404 Log2(("Forward page fault to the guest\n")); … … 3196 3412 /* Now we must update CR2. */ 3197 3413 pCtx->cr2 = exitQualification; 3198 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3414 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3415 cbInstr, errCode); 3199 3416 AssertRC(rc2); 3200 3417 … … 3223 3440 } 3224 3441 Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip)); 3225 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3442 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3443 cbInstr, errCode); 3226 3444 AssertRC(rc2); 3227 3445 … … 3234 3452 uint64_t uDR6; 3235 3453 3236 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. 3454 /* 3455 * DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. 3237 3456 * 3238 3457 * Exit qualification bits: … … 3259 3478 ASMSetDR6(pCtx->dr[6]); 3260 3479 3261 /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */3480 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */ 3262 3481 pCtx->dr[7] &= ~X86_DR7_GD; 3263 3482 … … 3271 3490 AssertRC(rc2); 3272 3491 3273 Log(("Trap %x (debug) at %RGv exit qualification %RX64 dr6=%x dr7=%x\n", vector, (RTGCPTR)pCtx->rip, exitQualification, (uint32_t)pCtx->dr[6], (uint32_t)pCtx->dr[7])); 3274 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3492 Log(("Trap %x (debug) at %RGv exit qualification %RX64 dr6=%x dr7=%x\n", vector, (RTGCPTR)pCtx->rip, 3493 exitQualification, (uint32_t)pCtx->dr[6], (uint32_t)pCtx->dr[7])); 3494 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3495 cbInstr, errCode); 3275 3496 AssertRC(rc2); 3276 3497 … … 3290 3511 { 3291 3512 Log(("Guest #BP at %04x:%RGv\n", pCtx->cs, pCtx->rip)); 3292 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3513 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3514 cbInstr, errCode); 3293 3515 AssertRC(rc2); 3294 3516 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3); … … 3304 3526 } 3305 3527 3306 case X86_XCPT_GP: /* General protection failure exception. */3528 case X86_XCPT_GP: /* General protection failure exception. */ 3307 3529 { 3308 3530 uint32_t cbOp; … … 3315 3537 { 3316 3538 Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, errCode)); 3317 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3539 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3540 cbInstr, errCode); 3318 3541 AssertRC(rc2); 3319 3542 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3); … … 3343 3566 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + pDis->opsize); 3344 3567 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 3345 rc2 = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 3568 rc2 = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, 3569 VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 3346 3570 AssertRC(rc2); 3347 3571 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitSti); … … 3415 3639 } 3416 3640 3417 rc2 = SELMToFlatEx(pVCpu, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0, &GCPtrStack); 3641 rc2 = SELMToFlatEx(pVCpu, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0, 3642 &GCPtrStack); 3418 3643 if (RT_FAILURE(rc2)) 3419 3644 { … … 3466 3691 pCtx->cs = aIretFrame[1]; 3467 3692 pCtx->csHid.u64Base = pCtx->cs << 4; 3468 pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask)) | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 3693 pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask)) 3694 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 3469 3695 pCtx->sp += sizeof(aIretFrame); 3470 3696 … … 3537 3763 pCtx->rip += cbOp; /* Move on to the next instruction. */ 3538 3764 3539 /* lidt, lgdt can end up here. In the future crx changes as well. Just reload the whole context to be done with it. */ 3765 /* 3766 * LIDT, LGDT can end up here. In the future CRx changes as well. Just reload the 3767 * whole context to be done with it. 3768 */ 3540 3769 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL; 3541 3770 … … 3561 3790 switch(vector) 3562 3791 { 3563 case X86_XCPT_DE:3564 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);3565 break;3566 case X86_XCPT_UD:3567 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);3568 break;3569 case X86_XCPT_SS:3570 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);3571 break;3572 case X86_XCPT_NP:3573 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);3574 break;3575 case X86_XCPT_XF:3576 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXF);3577 break;3792 case X86_XCPT_DE: 3793 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE); 3794 break; 3795 case X86_XCPT_UD: 3796 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD); 3797 break; 3798 case X86_XCPT_SS: 3799 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS); 3800 break; 3801 case X86_XCPT_NP: 3802 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP); 3803 break; 3804 case X86_XCPT_XF: 3805 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXF); 3806 break; 3578 3807 } 3579 3808 3580 3809 Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip)); 3581 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3810 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3811 cbInstr, errCode); 3582 3812 AssertRC(rc2); 3583 3813 … … 3592 3822 { 3593 3823 Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs, pCtx->eip, errCode)); 3594 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 3824 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3825 cbInstr, errCode); 3595 3826 AssertRC(VBOXSTRICTRC_VAL(rc)); /* Strict RC check below. */ 3596 3827 3597 /* Go back to ring 3 in case of a triple fault. */ 3598 if ( vector == X86_XCPT_DF 3599 && rc == VINF_EM_RESET) 3828 /* Go back to ring-3 in case of a triple fault. */ 3829 if ( vector == X86_XCPT_DF 3830 && rc == VINF_EM_RESET) 3831 { 3600 3832 break; 3833 } 3601 3834 3602 3835 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3); … … 3620 3853 } 3621 3854 3622 case VMX_EXIT_EPT_VIOLATION: /* 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures. */ 3855 /* 3856 * 48 EPT violation. An attemp to access memory with a guest-physical address was disallowed 3857 * by the configuration of the EPT paging structures. 3858 */ 3859 case VMX_EXIT_EPT_VIOLATION: 3623 3860 { 3624 3861 RTGCPHYS GCPhys; … … 3640 3877 /* If the page is present, then it's a page level protection fault. */ 3641 3878 if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT) 3642 {3643 3879 errCode |= X86_TRAP_PF_P; 3644 }3645 3880 else 3646 3881 { 3647 3882 /* Shortcut for APIC TPR reads and writes. */ 3648 3883 if ( (GCPhys & 0xfff) == 0x080 3649 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */3884 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 3650 3885 && fSetupTPRCaching 3651 3886 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) … … 3678 3913 || rc == VERR_PAGE_TABLE_NOT_PRESENT 3679 3914 || rc == VERR_PAGE_NOT_PRESENT) 3680 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 3915 { 3916 /* We've successfully synced our shadow pages, so let's just continue execution. */ 3681 3917 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode)); 3682 3918 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF); … … 3707 3943 /* Shortcut for APIC TPR reads and writes. */ 3708 3944 if ( (GCPhys & 0xfff) == 0x080 3709 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */3945 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 3710 3946 && fSetupTPRCaching 3711 3947 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) … … 3745 3981 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */ 3746 3982 /* Clear VM-exit on IF=1 change. */ 3747 LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF)); 3983 LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, 3984 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF)); 3748 3985 pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT; 3749 3986 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls); … … 3869 4106 STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr); 3870 4107 3871 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */ 4108 /* 4109 * Note: The Intel spec. claims there's an REX version of RDMSR that's slightly different, 4110 * so we play safe by completely disassembling the instruction. 4111 */ 3872 4112 Log2(("VMX: %s\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr")); 3873 4113 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); … … 3875 4115 { 3876 4116 /* EIP has been updated already. */ 3877 3878 4117 /* Only resume if successful. */ 3879 4118 goto ResumeExecution; 3880 4119 } 3881 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr", VBOXSTRICTRC_VAL(rc))); 4120 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", 4121 (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr", VBOXSTRICTRC_VAL(rc))); 3882 4122 break; 3883 4123 } … … 3889 4129 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification)) 3890 4130 { 3891 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: 3892 Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))); 3893 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 3894 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 3895 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification), 3896 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification)); 3897 3898 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)) 4131 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: 3899 4132 { 3900 case 0: 3901 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3; 3902 break; 3903 case 2: 3904 break; 3905 case 3: 3906 Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx)); 3907 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3; 3908 break; 3909 case 4: 3910 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4; 3911 break; 3912 case 8: 3913 /* CR8 contains the APIC TPR */ 3914 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 3915 break; 3916 3917 default: 3918 AssertFailed(); 4133 Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))); 4134 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 4135 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4136 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification), 4137 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification)); 4138 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)) 4139 { 4140 case 0: 4141 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3; 4142 break; 4143 case 2: 4144 break; 4145 case 3: 4146 Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx)); 4147 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3; 4148 break; 4149 case 4: 4150 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4; 4151 break; 4152 case 8: 4153 /* CR8 contains the APIC TPR */ 4154 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 4155 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 4156 break; 4157 4158 default: 4159 AssertFailed(); 4160 break; 4161 } 3919 4162 break; 3920 4163 } 3921 break; 3922 3923 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: 3924 Log2(("VMX: mov x, crx\n")); 3925 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 3926 3927 Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx) || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3); 3928 3929 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */ 3930 Assert(VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 3931 3932 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 3933 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification), 3934 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)); 3935 break; 3936 3937 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: 3938 Log2(("VMX: clts\n")); 3939 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS); 3940 rc = EMInterpretCLTS(pVM, pVCpu); 3941 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 3942 break; 3943 3944 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: 3945 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification))); 3946 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW); 3947 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 3948 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 3949 break; 4164 4165 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: 4166 { 4167 Log2(("VMX: mov x, crx\n")); 4168 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 4169 4170 Assert( !pVM->hwaccm.s.fNestedPaging 4171 || !CPUMIsGuestInPagedProtectedModeEx(pCtx) 4172 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3); 4173 4174 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */ 4175 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 4176 || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 4177 4178 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4179 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification), 4180 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)); 4181 break; 4182 } 4183 4184 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: 4185 { 4186 Log2(("VMX: clts\n")); 4187 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS); 4188 rc = EMInterpretCLTS(pVM, pVCpu); 4189 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 4190 break; 4191 } 4192 4193 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: 4194 { 4195 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification))); 4196 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW); 4197 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 4198 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 4199 break; 4200 } 3950 4201 } 3951 4202 … … 3970 4221 && !CPUMIsHyperDebugStateActive(pVCpu)) 3971 4222 { 3972 /* Disable drx move intercepts. */4223 /* Disable DRx move intercepts. */ 3973 4224 pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 3974 4225 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls); … … 3981 4232 #ifdef LOG_ENABLED 3982 4233 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE) 3983 Log(("VMX_EXIT_DRX_MOVE: write DR%d genreg %d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 4234 { 4235 Log(("VMX_EXIT_DRX_MOVE: write DR%d genreg %d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 4236 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 4237 } 3984 4238 else 3985 4239 Log(("VMX_EXIT_DRX_MOVE: read DR%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification))); … … 3997 4251 } 3998 4252 3999 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first time and restore drx registers afterwards */ 4253 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first 4254 * time and restore DRx registers afterwards */ 4000 4255 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE) 4001 4256 { 4002 Log2(("VMX: mov drx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 4257 Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 4258 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 4003 4259 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite); 4004 4260 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), … … 4010 4266 else 4011 4267 { 4012 Log2(("VMX: mov x, drx\n"));4268 Log2(("VMX: mov x, DRx\n")); 4013 4269 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead); 4014 4270 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), … … 4033 4289 { 4034 4290 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub1, y1); 4291 uint32_t uPort; 4035 4292 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification); 4036 uint32_t uPort;4037 4293 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT); 4038 4294 4039 4295 /** @todo necessary to make the distinction? */ 4040 4296 if (VMX_EXIT_QUALIFICATION_IO_ENCODING(exitQualification) == VMX_EXIT_QUALIFICATION_IO_ENCODING_DX) 4041 {4042 4297 uPort = pCtx->edx & 0xffff; 4043 }4044 4298 else 4045 4299 uPort = VMX_EXIT_QUALIFICATION_IO_PORT(exitQualification); /* Immediate encoding. */ 4046 4300 4047 /* paranoia */ 4048 if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4)) 4301 if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4)) /* paranoia */ 4049 4302 { 4050 4303 rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ; … … 4054 4307 4055 4308 uint32_t cbSize = g_aIOSize[uIOWidth]; 4056 4057 4309 if (VMX_EXIT_QUALIFICATION_IO_STRING(exitQualification)) 4058 4310 { … … 4084 4336 else 4085 4337 { 4086 /* normal in/out */4338 /* Normal in/out */ 4087 4339 uint32_t uAndVal = g_aIOOpAnd[uIOWidth]; 4088 4340 … … 4112 4364 } 4113 4365 } 4366 4114 4367 /* 4115 4368 * Handled the I/O return codes. … … 4126 4379 { 4127 4380 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck); 4128 for (unsigned i =0;i<4;i++)4381 for (unsigned i = 0; i < 4; i++) 4129 4382 { 4130 4383 unsigned uBPLen = g_aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)]; … … 4144 4397 uDR6 |= (uint64_t)RT_BIT(i); 4145 4398 4146 /* Note: AMD64 Architecture Programmer's Manual 13.1: 4147 * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared by software after 4148 * the contents have been read. 4399 /* 4400 * Note: AMD64 Architecture Programmer's Manual 13.1: 4401 * Bits 15:13 of the DR6 register is never cleared by the processor and must 4402 * be cleared by software after the contents have been read. 4149 4403 */ 4150 4404 ASMSetDR6(uDR6); 4151 4405 4152 /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */4406 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */ 4153 4407 pCtx->dr[7] &= ~X86_DR7_GD; 4154 4408 … … 4168 4422 4169 4423 Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip)); 4170 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0); 4424 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 4425 0 /* cbInstr */, 0 /* errCode */); 4171 4426 AssertRC(rc2); 4172 4427 … … 4189 4444 Assert(fIOWrite); 4190 4445 else 4191 AssertMsg(RT_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rc))); 4446 { 4447 AssertMsg( RT_FAILURE(rc) 4448 || rc == VINF_EM_RAW_EMULATE_INSTR 4449 || rc == VINF_EM_RAW_GUEST_TRAP 4450 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rc))); 4451 } 4192 4452 #endif 4193 4453 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1); … … 4200 4460 goto ResumeExecution; 4201 4461 4202 case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */ 4462 case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address 4463 on the APIC-access page. */ 4203 4464 { 4204 4465 LogFlow(("VMX_EXIT_APIC_ACCESS\n")); … … 4207 4468 switch(uAccessType) 4208 4469 { 4209 case VMX_APIC_ACCESS_TYPE_LINEAR_READ: 4210 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE: 4211 { 4212 RTGCPHYS GCPhys; 4213 PDMApicGetBase(pVM, &GCPhys); 4214 GCPhys &= PAGE_BASE_GC_MASK; 4215 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification); 4216 4217 LogFlow(("Apic access at %RGp\n", GCPhys)); 4218 rc = IOMMMIOPhysHandler(pVM, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW, CPUMCTX2CORE(pCtx), GCPhys); 4219 if (rc == VINF_SUCCESS) 4220 goto ResumeExecution; /* rip already updated */ 4221 break; 4222 } 4223 4224 default: 4225 rc = VINF_EM_RAW_EMULATE_INSTR; 4226 break; 4470 case VMX_APIC_ACCESS_TYPE_LINEAR_READ: 4471 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE: 4472 { 4473 RTGCPHYS GCPhys; 4474 PDMApicGetBase(pVM, &GCPhys); 4475 GCPhys &= PAGE_BASE_GC_MASK; 4476 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification); 4477 4478 LogFlow(("Apic access at %RGp\n", GCPhys)); 4479 rc = IOMMMIOPhysHandler(pVM, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW, 4480 CPUMCTX2CORE(pCtx), GCPhys); 4481 if (rc == VINF_SUCCESS) 4482 goto ResumeExecution; /* rip already updated */ 4483 break; 4484 } 4485 4486 default: 4487 rc = VINF_EM_RAW_EMULATE_INSTR; 4488 break; 4227 4489 } 4228 4490 break; … … 4240 4502 } 4241 4503 4242 /* Note: the guest state isn't entirely synced back at this stage. */ 4504 4505 /* 4506 * Note: The guest state is not entirely synced back at this stage! 4507 */ 4243 4508 4244 4509 /* Investigate why there was a VM-exit. (part 2) */ … … 4369 4634 4370 4635 case VMX_EXIT_TPR: /* 43 TPR below threshold. Guest software executed MOV to CR8. */ 4371 case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */4372 4636 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */ 4373 4637 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */ 4374 4638 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */ 4375 4639 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */ 4376 /* Note: If we decide to emulate them here, then we must sync the MSRs that could have been changed (sysenter, fs/gs base)!!! */ 4640 case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address 4641 on the APIC-access page. */ 4642 { 4643 /* 4644 * If we decided to emulate them here, then we must sync the MSRs that could have been changed (sysenter, FS/GS base) 4645 */ 4377 4646 rc = VERR_EM_INTERPRETER; 4378 4647 break; 4648 } 4379 4649 4380 4650 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */ … … 4404 4674 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val2)); 4405 4675 4406 VMX_LOG_SELREG(CS, "CS",val2);4407 VMX_LOG_SELREG(DS, "DS",val2);4408 VMX_LOG_SELREG(ES, "ES",val2);4409 VMX_LOG_SELREG(FS, "FS",val2);4410 VMX_LOG_SELREG(GS, "GS",val2);4411 VMX_LOG_SELREG(SS, "SS",val2);4412 VMX_LOG_SELREG(TR, "TR",val2);4676 VMX_LOG_SELREG(CS, "CS", val2); 4677 VMX_LOG_SELREG(DS, "DS", val2); 4678 VMX_LOG_SELREG(ES, "ES", val2); 4679 VMX_LOG_SELREG(FS, "FS", val2); 4680 VMX_LOG_SELREG(GS, "GS", val2); 4681 VMX_LOG_SELREG(SS, "SS", val2); 4682 VMX_LOG_SELREG(TR, "TR", val2); 4413 4683 VMX_LOG_SELREG(LDTR, "LDTR", val2); 4414 4684 … … 4430 4700 4431 4701 } 4702 4432 4703 end: 4433 4434 4704 /* We now going back to ring-3, so clear the action flag. */ 4435 4705 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 4436 4706 4437 /* Signal changes for the recompiler. */ 4438 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS); 4439 4440 /* If we executed vmlaunch/vmresume and an external irq was pending, then we don't have to do a full sync the next time. */ 4707 /* 4708 * Signal changes for the recompiler. 4709 */ 4710 CPUMSetChangedFlags(pVCpu, 4711 CPUM_CHANGED_SYSENTER_MSR 4712 | CPUM_CHANGED_LDTR 4713 | CPUM_CHANGED_GDTR 4714 | CPUM_CHANGED_IDTR 4715 | CPUM_CHANGED_TR 4716 | CPUM_CHANGED_HIDDEN_SEL_REGS); 4717 4718 /* 4719 * If we executed vmlaunch/vmresume and an external IRQ was pending, then we don't have to do a full sync the next time. 4720 */ 4441 4721 if ( exitReason == VMX_EXIT_EXTERNAL_IRQ 4442 4722 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo)) … … 4454 4734 } 4455 4735 4456 /* translate into a less severe return code */4736 /* Translate into a less severe return code */ 4457 4737 if (rc == VERR_EM_INTERPRETER) 4458 4738 rc = VINF_EM_RAW_EMULATE_INSTR; 4459 else 4460 /* Try to extract more information about what might have gone wrong here. */ 4461 if (rc == VERR_VMX_INVALID_VMCS_PTR) 4462 { 4739 else if (rc == VERR_VMX_INVALID_VMCS_PTR) 4740 { 4741 /* Try to extract more information about what might have gone wrong here. */ 4463 4742 VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys); 4464 4743 pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS; … … 4471 4750 4472 4751 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 4473 /* Restore interrupts if we exit ted after disabling them. */4752 /* Restore interrupts if we exited after disabling them. */ 4474 4753 if (uOldEFlags != ~(RTCCUINTREG)0) 4475 4754 ASMSetFlags(uOldEFlags); … … 4485 4764 4486 4765 /** 4487 * Enters the VT-x session 4766 * Enters the VT-x session. 4488 4767 * 4489 4768 * @returns VBox status code. 4490 * @param pVM The VM to operate on.4491 * @param pVCpu The VMCPU to operate on.4492 * @param pCpu CPU info struct4769 * @param pVM Pointer to the VM. 4770 * @param pVCpu Pointer to the VMCPU. 4771 * @param pCpu Pointer to the CPU info struct. 4493 4772 */ 4494 4773 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) … … 4504 4783 } 4505 4784 4506 /* Activate the VM Control Structure. */4785 /* Activate the VMCS. */ 4507 4786 int rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4508 4787 if (RT_FAILURE(rc)) … … 4515 4794 4516 4795 /** 4517 * Leaves the VT-x session 4796 * Leaves the VT-x session. 4518 4797 * 4519 4798 * @returns VBox status code. 4520 * @param pVM The VM to operate on.4521 * @param pVCpu The VMCPU to operate on.4522 * @param pCtx CPU context4799 * @param pVM Pointer to the VM. 4800 * @param pVCpu Pointer to the VMCPU. 4801 * @param pCtx Pointer to the guests CPU context. 4523 4802 */ 4524 4803 VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 4534 4813 else 4535 4814 #endif 4536 /* Save the guest debug state if necessary. */ 4815 4816 /* 4817 * Save the guest debug state if necessary. 4818 */ 4537 4819 if (CPUMIsGuestDebugStateActive(pVCpu)) 4538 4820 { 4539 4821 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */); 4540 4822 4541 /* Enable drx move intercepts again. */4823 /* Enable DRx move intercepts again. */ 4542 4824 pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 4543 4825 int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls); … … 4550 4832 Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 4551 4833 4552 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 4834 /* 4835 * Clear VMCS, marking it inactive, clearing implementation-specific data and writing 4836 * VMCS data back to memory. 4837 */ 4553 4838 int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4554 4839 AssertRC(rc); … … 4562 4847 * 4563 4848 * @returns VBox status code. 4564 * @param pVM The VM to operate on.4565 * @param pVCpu The VM CPU to operate on.4849 * @param pVM Pointer to the VM. 4850 * @param pVCpu Pointer to the VMCPU. 4566 4851 * @param enmFlush Type of flush. 4567 4852 */ … … 4583 4868 * 4584 4869 * @returns VBox status code. 4585 * @param pVM The VM to operate on.4586 * @param pVCpu The VM CPU to operate on(can be NULL depending on @a4870 * @param pVM Pointer to the VM. 4871 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a 4587 4872 * enmFlush). 4588 4873 * @param enmFlush Type of flush. … … 4593 4878 { 4594 4879 #if HC_ARCH_BITS == 32 4595 /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invvpid probably takes only 32 bits addresses. (@todo) */ 4880 /* 4881 * If we get a flush in 64-bit guest mode, then force a full TLB flush. invvpid probably takes only 32-bit addresses. 4882 */ 4596 4883 if ( CPUMIsGuestInLongMode(pVCpu) 4597 4884 && !VMX_IS_64BIT_HOST_MODE()) … … 4629 4916 * 4630 4917 * @returns VBox status code. 4631 * @param pVM The VM to operate on.4632 * @param pVCpu The VM CPU to operate on.4633 * @param GCVirt Page to invalidate.4918 * @param pVM Pointer to the VM. 4919 * @param pVCpu Pointer to the VMCPU. 4920 * @param GCVirt Guest virtual address of the page to invalidate. 4634 4921 */ 4635 4922 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) … … 4671 4958 * 4672 4959 * @returns VBox status code. 4673 * @param pVM The VM to operate on.4960 * @param pVM Pointer to the VM. 4674 4961 * @param pVCpu The VM CPU to operate on. 4675 * @param GCPhys Page to invalidate.4962 * @param GCPhys Guest physical address of the page to invalidate. 4676 4963 */ 4677 4964 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) … … 4690 4977 4691 4978 /** 4692 * Report world switch error and dump some useful debug info 4979 * Report world switch error and dump some useful debug info. 4693 4980 * 4694 * @param pVM The VM to operate on.4695 * @param pVCpu The VMCPU to operate on.4696 * @param rc Return code 4697 * @param pCtx Current CPU context (not updated)4981 * @param pVM Pointer to the VM. 4982 * @param pVCpu Pointer to the VMCPU. 4983 * @param rc Return code. 4984 * @param pCtx Pointer to the current guest CPU context (not updated). 4698 4985 */ 4699 4986 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx) … … 4703 4990 switch (VBOXSTRICTRC_VAL(rc)) 4704 4991 { 4705 case VERR_VMX_INVALID_VMXON_PTR: 4706 AssertFailed(); 4707 break; 4708 4709 case VERR_VMX_UNABLE_TO_START_VM: 4710 case VERR_VMX_UNABLE_TO_RESUME_VM: 4711 { 4712 int rc2; 4713 RTCCUINTREG exitReason, instrError; 4714 4715 rc2 = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason); 4716 rc2 |= VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError); 4717 AssertRC(rc2); 4718 if (rc2 == VINF_SUCCESS) 4719 { 4720 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError)); 4721 Log(("Current stack %08x\n", &rc2)); 4722 4723 pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError; 4724 pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason; 4992 case VERR_VMX_INVALID_VMXON_PTR: 4993 AssertFailed(); 4994 break; 4995 4996 case VERR_VMX_UNABLE_TO_START_VM: 4997 case VERR_VMX_UNABLE_TO_RESUME_VM: 4998 { 4999 int rc2; 5000 RTCCUINTREG exitReason, instrError; 5001 5002 rc2 = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason); 5003 rc2 |= VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError); 5004 AssertRC(rc2); 5005 if (rc2 == VINF_SUCCESS) 5006 { 5007 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, 5008 (uint32_t)instrError)); 5009 Log(("Current stack %08x\n", &rc2)); 5010 5011 pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError; 5012 pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason; 4725 5013 4726 5014 #ifdef VBOX_STRICT 4727 RTGDTR gdtr; 4728 PCX86DESCHC pDesc; 4729 RTCCUINTREG val; 4730 4731 ASMGetGDTR(&gdtr); 4732 4733 VMXReadVMCS(VMX_VMCS64_GUEST_RIP, &val); 4734 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val)); 4735 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val); 4736 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val)); 4737 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val); 4738 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val)); 4739 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val); 4740 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val)); 4741 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val); 4742 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val)); 4743 4744 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val); 4745 Log(("VMX_VMCS_HOST_CR0 %08x\n", val)); 4746 4747 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val); 4748 Log(("VMX_VMCS_HOST_CR3 %08x\n", val)); 4749 4750 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val); 4751 Log(("VMX_VMCS_HOST_CR4 %08x\n", val)); 4752 4753 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_CS, &val); 4754 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val)); 4755 4756 VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 4757 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val)); 4758 4759 if (val < gdtr.cbGdt) 4760 { 4761 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4762 HWACCMR0DumpDescriptor(pDesc, val, "CS: "); 4763 } 4764 4765 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_DS, &val); 4766 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val)); 4767 if (val < gdtr.cbGdt) 4768 { 4769 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4770 HWACCMR0DumpDescriptor(pDesc, val, "DS: "); 4771 } 4772 4773 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_ES, &val); 4774 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val)); 4775 if (val < gdtr.cbGdt) 4776 { 4777 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4778 HWACCMR0DumpDescriptor(pDesc, val, "ES: "); 4779 } 4780 4781 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_FS, &val); 4782 Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val)); 4783 if (val < gdtr.cbGdt) 4784 { 4785 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4786 HWACCMR0DumpDescriptor(pDesc, val, "FS: "); 4787 } 4788 4789 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_GS, &val); 4790 Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val)); 4791 if (val < gdtr.cbGdt) 4792 { 4793 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4794 HWACCMR0DumpDescriptor(pDesc, val, "GS: "); 4795 } 4796 4797 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_SS, &val); 4798 Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val)); 4799 if (val < gdtr.cbGdt) 4800 { 4801 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4802 HWACCMR0DumpDescriptor(pDesc, val, "SS: "); 4803 } 4804 4805 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_TR, &val); 4806 Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val)); 4807 if (val < gdtr.cbGdt) 4808 { 4809 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 4810 HWACCMR0DumpDescriptor(pDesc, val, "TR: "); 4811 } 4812 4813 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val); 4814 Log(("VMX_VMCS_HOST_TR_BASE %RHv\n", val)); 4815 4816 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val); 4817 Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", val)); 4818 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val); 4819 Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", val)); 4820 4821 VMXReadVMCS(VMX_VMCS32_HOST_SYSENTER_CS, &val); 4822 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val)); 4823 4824 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val); 4825 Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n", val)); 4826 4827 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val); 4828 Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n", val)); 4829 4830 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val); 4831 Log(("VMX_VMCS_HOST_RSP %RHv\n", val)); 4832 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val); 4833 Log(("VMX_VMCS_HOST_RIP %RHv\n", val)); 4834 5015 RTGDTR gdtr; 5016 PCX86DESCHC pDesc; 5017 RTCCUINTREG val; 5018 5019 ASMGetGDTR(&gdtr); 5020 5021 VMXReadVMCS(VMX_VMCS64_GUEST_RIP, &val); 5022 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val)); 5023 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val); 5024 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val)); 5025 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val); 5026 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val)); 5027 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val); 5028 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val)); 5029 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val); 5030 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val)); 5031 5032 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val); 5033 Log(("VMX_VMCS_HOST_CR0 %08x\n", val)); 5034 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val); 5035 Log(("VMX_VMCS_HOST_CR3 %08x\n", val)); 5036 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val); 5037 Log(("VMX_VMCS_HOST_CR4 %08x\n", val)); 5038 5039 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_CS, &val); 5040 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val)); 5041 VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 5042 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val)); 5043 5044 if (val < gdtr.cbGdt) 5045 { 5046 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5047 HWACCMR0DumpDescriptor(pDesc, val, "CS: "); 5048 } 5049 5050 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_DS, &val); 5051 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val)); 5052 if (val < gdtr.cbGdt) 5053 { 5054 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5055 HWACCMR0DumpDescriptor(pDesc, val, "DS: "); 5056 } 5057 5058 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_ES, &val); 5059 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val)); 5060 if (val < gdtr.cbGdt) 5061 { 5062 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5063 HWACCMR0DumpDescriptor(pDesc, val, "ES: "); 5064 } 5065 5066 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_FS, &val); 5067 Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val)); 5068 if (val < gdtr.cbGdt) 5069 { 5070 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5071 HWACCMR0DumpDescriptor(pDesc, val, "FS: "); 5072 } 5073 5074 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_GS, &val); 5075 Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val)); 5076 if (val < gdtr.cbGdt) 5077 { 5078 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5079 HWACCMR0DumpDescriptor(pDesc, val, "GS: "); 5080 } 5081 5082 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_SS, &val); 5083 Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val)); 5084 if (val < gdtr.cbGdt) 5085 { 5086 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5087 HWACCMR0DumpDescriptor(pDesc, val, "SS: "); 5088 } 5089 5090 VMXReadVMCS(VMX_VMCS16_HOST_FIELD_TR, &val); 5091 Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val)); 5092 if (val < gdtr.cbGdt) 5093 { 5094 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5095 HWACCMR0DumpDescriptor(pDesc, val, "TR: "); 5096 } 5097 5098 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val); 5099 Log(("VMX_VMCS_HOST_TR_BASE %RHv\n", val)); 5100 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val); 5101 Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", val)); 5102 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val); 5103 Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", val)); 5104 VMXReadVMCS(VMX_VMCS32_HOST_SYSENTER_CS, &val); 5105 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val)); 5106 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val); 5107 Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n", val)); 5108 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val); 5109 Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n", val)); 5110 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val); 5111 Log(("VMX_VMCS_HOST_RSP %RHv\n", val)); 5112 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val); 5113 Log(("VMX_VMCS_HOST_RIP %RHv\n", val)); 4835 5114 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4836 if (VMX_IS_64BIT_HOST_MODE())4837 {4838 Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER)));4839 Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR)));4840 Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));4841 Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));4842 Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));4843 }5115 if (VMX_IS_64BIT_HOST_MODE()) 5116 { 5117 Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER))); 5118 Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR))); 5119 Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 5120 Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 5121 Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 5122 } 4844 5123 # endif 4845 5124 #endif /* VBOX_STRICT */ 4846 }4847 break;4848 }4849 4850 default:4851 /* impossible */4852 AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc)));4853 break;5125 } 5126 break; 5127 } 5128 5129 default: 5130 /* impossible */ 5131 AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc))); 5132 break; 4854 5133 } 4855 5134 } 4856 5135 5136 4857 5137 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4858 4859 5138 /** 4860 * Prepares for and executes VMLAUNCH (64 bits guest mode) 5139 * Prepares for and executes VMLAUNCH (64 bits guest mode). 4861 5140 * 4862 * @returns VBox status code 4863 * @param fResume vmlauch/vmresume4864 * @param pCtx Guest context4865 * @param pCache VMCS cache4866 * @param pVM The VM to operate on.4867 * @param pVCpu The VMCPU to operate on.5141 * @returns VBox status code. 5142 * @param fResume Whether to vmlauch/vmresume. 5143 * @param pCtx Pointer to the guest CPU context. 5144 * @param pCache Pointer to the VMCS cache. 5145 * @param pVM Pointer to the VM. 5146 * @param pVCpu Pointer to the VMCPU. 4868 5147 */ 4869 5148 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu) … … 4914 5193 #ifdef DEBUG 4915 5194 AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage)); 4916 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pVCpu->hwaccm.s.vmx.HCPhysVMCS)); 4917 AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pCache->TestOut.HCPhysVMCS)); 4918 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, pCache->TestOut.pCache)); 4919 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache), ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache))); 4920 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, pCache->TestOut.pCtx)); 5195 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, 5196 pVCpu->hwaccm.s.vmx.HCPhysVMCS)); 5197 AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, 5198 pCache->TestOut.HCPhysVMCS)); 5199 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, 5200 pCache->TestOut.pCache)); 5201 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache), 5202 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache))); 5203 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, 5204 pCache->TestOut.pCtx)); 4921 5205 Assert(!(pCache->TestOut.eflags & X86_EFL_IF)); 4922 5206 #endif … … 4924 5208 } 4925 5209 5210 4926 5211 # ifdef VBOX_STRICT 4927 4928 5212 static bool hmR0VmxIsValidReadField(uint32_t idxField) 4929 5213 { 4930 switch (idxField)4931 { 4932 case VMX_VMCS64_GUEST_RIP:4933 case VMX_VMCS64_GUEST_RSP:4934 case VMX_VMCS_GUEST_RFLAGS:4935 case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:4936 case VMX_VMCS_CTRL_CR0_READ_SHADOW:4937 case VMX_VMCS64_GUEST_CR0:4938 case VMX_VMCS_CTRL_CR4_READ_SHADOW:4939 case VMX_VMCS64_GUEST_CR4:4940 case VMX_VMCS64_GUEST_DR7:4941 case VMX_VMCS32_GUEST_SYSENTER_CS:4942 case VMX_VMCS64_GUEST_SYSENTER_EIP:4943 case VMX_VMCS64_GUEST_SYSENTER_ESP:4944 case VMX_VMCS32_GUEST_GDTR_LIMIT:4945 case VMX_VMCS64_GUEST_GDTR_BASE:4946 case VMX_VMCS32_GUEST_IDTR_LIMIT:4947 case VMX_VMCS64_GUEST_IDTR_BASE:4948 case VMX_VMCS16_GUEST_FIELD_CS:4949 case VMX_VMCS32_GUEST_CS_LIMIT:4950 case VMX_VMCS64_GUEST_CS_BASE:4951 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:4952 case VMX_VMCS16_GUEST_FIELD_DS:4953 case VMX_VMCS32_GUEST_DS_LIMIT:4954 case VMX_VMCS64_GUEST_DS_BASE:4955 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:4956 case VMX_VMCS16_GUEST_FIELD_ES:4957 case VMX_VMCS32_GUEST_ES_LIMIT:4958 case VMX_VMCS64_GUEST_ES_BASE:4959 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:4960 case VMX_VMCS16_GUEST_FIELD_FS:4961 case VMX_VMCS32_GUEST_FS_LIMIT:4962 case VMX_VMCS64_GUEST_FS_BASE:4963 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:4964 case VMX_VMCS16_GUEST_FIELD_GS:4965 case VMX_VMCS32_GUEST_GS_LIMIT:4966 case VMX_VMCS64_GUEST_GS_BASE:4967 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:4968 case VMX_VMCS16_GUEST_FIELD_SS:4969 case VMX_VMCS32_GUEST_SS_LIMIT:4970 case VMX_VMCS64_GUEST_SS_BASE:4971 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:4972 case VMX_VMCS16_GUEST_FIELD_LDTR:4973 case VMX_VMCS32_GUEST_LDTR_LIMIT:4974 case VMX_VMCS64_GUEST_LDTR_BASE:4975 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:4976 case VMX_VMCS16_GUEST_FIELD_TR:4977 case VMX_VMCS32_GUEST_TR_LIMIT:4978 case VMX_VMCS64_GUEST_TR_BASE:4979 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:4980 case VMX_VMCS32_RO_EXIT_REASON:4981 case VMX_VMCS32_RO_VM_INSTR_ERROR:4982 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:4983 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE:4984 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:4985 case VMX_VMCS32_RO_EXIT_INSTR_INFO:4986 case VMX_VMCS_RO_EXIT_QUALIFICATION:4987 case VMX_VMCS32_RO_IDT_INFO:4988 case VMX_VMCS32_RO_IDT_ERRCODE:4989 case VMX_VMCS64_GUEST_CR3:4990 case VMX_VMCS_EXIT_PHYS_ADDR_FULL:4991 return true;5214 switch (idxField) 5215 { 5216 case VMX_VMCS64_GUEST_RIP: 5217 case VMX_VMCS64_GUEST_RSP: 5218 case VMX_VMCS_GUEST_RFLAGS: 5219 case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE: 5220 case VMX_VMCS_CTRL_CR0_READ_SHADOW: 5221 case VMX_VMCS64_GUEST_CR0: 5222 case VMX_VMCS_CTRL_CR4_READ_SHADOW: 5223 case VMX_VMCS64_GUEST_CR4: 5224 case VMX_VMCS64_GUEST_DR7: 5225 case VMX_VMCS32_GUEST_SYSENTER_CS: 5226 case VMX_VMCS64_GUEST_SYSENTER_EIP: 5227 case VMX_VMCS64_GUEST_SYSENTER_ESP: 5228 case VMX_VMCS32_GUEST_GDTR_LIMIT: 5229 case VMX_VMCS64_GUEST_GDTR_BASE: 5230 case VMX_VMCS32_GUEST_IDTR_LIMIT: 5231 case VMX_VMCS64_GUEST_IDTR_BASE: 5232 case VMX_VMCS16_GUEST_FIELD_CS: 5233 case VMX_VMCS32_GUEST_CS_LIMIT: 5234 case VMX_VMCS64_GUEST_CS_BASE: 5235 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS: 5236 case VMX_VMCS16_GUEST_FIELD_DS: 5237 case VMX_VMCS32_GUEST_DS_LIMIT: 5238 case VMX_VMCS64_GUEST_DS_BASE: 5239 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS: 5240 case VMX_VMCS16_GUEST_FIELD_ES: 5241 case VMX_VMCS32_GUEST_ES_LIMIT: 5242 case VMX_VMCS64_GUEST_ES_BASE: 5243 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS: 5244 case VMX_VMCS16_GUEST_FIELD_FS: 5245 case VMX_VMCS32_GUEST_FS_LIMIT: 5246 case VMX_VMCS64_GUEST_FS_BASE: 5247 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS: 5248 case VMX_VMCS16_GUEST_FIELD_GS: 5249 case VMX_VMCS32_GUEST_GS_LIMIT: 5250 case VMX_VMCS64_GUEST_GS_BASE: 5251 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS: 5252 case VMX_VMCS16_GUEST_FIELD_SS: 5253 case VMX_VMCS32_GUEST_SS_LIMIT: 5254 case VMX_VMCS64_GUEST_SS_BASE: 5255 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS: 5256 case VMX_VMCS16_GUEST_FIELD_LDTR: 5257 case VMX_VMCS32_GUEST_LDTR_LIMIT: 5258 case VMX_VMCS64_GUEST_LDTR_BASE: 5259 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS: 5260 case VMX_VMCS16_GUEST_FIELD_TR: 5261 case VMX_VMCS32_GUEST_TR_LIMIT: 5262 case VMX_VMCS64_GUEST_TR_BASE: 5263 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS: 5264 case VMX_VMCS32_RO_EXIT_REASON: 5265 case VMX_VMCS32_RO_VM_INSTR_ERROR: 5266 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH: 5267 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE: 5268 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO: 5269 case VMX_VMCS32_RO_EXIT_INSTR_INFO: 5270 case VMX_VMCS_RO_EXIT_QUALIFICATION: 5271 case VMX_VMCS32_RO_IDT_INFO: 5272 case VMX_VMCS32_RO_IDT_ERRCODE: 5273 case VMX_VMCS64_GUEST_CR3: 5274 case VMX_VMCS_EXIT_PHYS_ADDR_FULL: 5275 return true; 4992 5276 } 4993 5277 return false; 4994 5278 } 4995 5279 5280 4996 5281 static bool hmR0VmxIsValidWriteField(uint32_t idxField) 4997 5282 { 4998 switch (idxField)4999 { 5000 case VMX_VMCS64_GUEST_LDTR_BASE:5001 case VMX_VMCS64_GUEST_TR_BASE:5002 case VMX_VMCS64_GUEST_GDTR_BASE:5003 case VMX_VMCS64_GUEST_IDTR_BASE:5004 case VMX_VMCS64_GUEST_SYSENTER_EIP:5005 case VMX_VMCS64_GUEST_SYSENTER_ESP:5006 case VMX_VMCS64_GUEST_CR0:5007 case VMX_VMCS64_GUEST_CR4:5008 case VMX_VMCS64_GUEST_CR3:5009 case VMX_VMCS64_GUEST_DR7:5010 case VMX_VMCS64_GUEST_RIP:5011 case VMX_VMCS64_GUEST_RSP:5012 case VMX_VMCS64_GUEST_CS_BASE:5013 case VMX_VMCS64_GUEST_DS_BASE:5014 case VMX_VMCS64_GUEST_ES_BASE:5015 case VMX_VMCS64_GUEST_FS_BASE:5016 case VMX_VMCS64_GUEST_GS_BASE:5017 case VMX_VMCS64_GUEST_SS_BASE:5018 return true;5283 switch (idxField) 5284 { 5285 case VMX_VMCS64_GUEST_LDTR_BASE: 5286 case VMX_VMCS64_GUEST_TR_BASE: 5287 case VMX_VMCS64_GUEST_GDTR_BASE: 5288 case VMX_VMCS64_GUEST_IDTR_BASE: 5289 case VMX_VMCS64_GUEST_SYSENTER_EIP: 5290 case VMX_VMCS64_GUEST_SYSENTER_ESP: 5291 case VMX_VMCS64_GUEST_CR0: 5292 case VMX_VMCS64_GUEST_CR4: 5293 case VMX_VMCS64_GUEST_CR3: 5294 case VMX_VMCS64_GUEST_DR7: 5295 case VMX_VMCS64_GUEST_RIP: 5296 case VMX_VMCS64_GUEST_RSP: 5297 case VMX_VMCS64_GUEST_CS_BASE: 5298 case VMX_VMCS64_GUEST_DS_BASE: 5299 case VMX_VMCS64_GUEST_ES_BASE: 5300 case VMX_VMCS64_GUEST_FS_BASE: 5301 case VMX_VMCS64_GUEST_GS_BASE: 5302 case VMX_VMCS64_GUEST_SS_BASE: 5303 return true; 5019 5304 } 5020 5305 return false; 5021 5306 } 5022 5023 5307 # endif /* VBOX_STRICT */ 5024 5308 5309 5025 5310 /** 5026 * Executes the specified handler in 64 mode5311 * Executes the specified handler in 64-bit mode. 5027 5312 * 5028 5313 * @returns VBox status code. 5029 * @param pVM The VM to operate on.5030 * @param pVCpu The VMCPU to operate on.5031 * @param pCtx Guest context5032 * @param pfnHandler RC handler5033 * @param cbParam Number of parameters 5034 * @param paParam Array of 32 bits parameters5314 * @param pVM Pointer to the VM. 5315 * @param pVCpu Pointer to the VMCPU. 5316 * @param pCtx Pointer to the guest CPU context. 5317 * @param pfnHandler Pointer to the RC handler function. 5318 * @param cbParam Number of parameters. 5319 * @param paParam Array of 32-bit parameters. 5035 5320 */ 5036 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam) 5321 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, 5322 uint32_t *paParam) 5037 5323 { 5038 5324 int rc, rc2; … … 5060 5346 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 5061 5347 5062 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS datato memory. */5348 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */ 5063 5349 VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 5064 5350 … … 5074 5360 5075 5361 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); 5362 5076 5363 /* Call switcher. */ 5077 5364 rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); … … 5096 5383 return rc; 5097 5384 } 5098 5099 5385 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 5100 5386 … … 5102 5388 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 5103 5389 /** 5104 * Executes VMWRITE 5390 * Executes VMWRITE. 5105 5391 * 5106 5392 * @returns VBox status code 5107 * @param pVCpu The VMCPU to operate on.5108 * @param idxField VMCS index5109 * @param u64Val 16, 32 or 64 bits value 5393 * @param pVCpu Pointer to the VMCPU. 5394 * @param idxField VMCS field index. 5395 * @param u64Val 16, 32 or 64 bits value. 5110 5396 */ 5111 5397 VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 5112 5398 { 5113 5399 int rc; 5114 5115 5400 switch (idxField) 5116 5401 { 5117 case VMX_VMCS_CTRL_TSC_OFFSET_FULL:5118 case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:5119 case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:5120 case VMX_VMCS_CTRL_MSR_BITMAP_FULL:5121 case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:5122 case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:5123 case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:5124 case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:5125 case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:5126 case VMX_VMCS_GUEST_LINK_PTR_FULL:5127 case VMX_VMCS_GUEST_PDPTR0_FULL:5128 case VMX_VMCS_GUEST_PDPTR1_FULL:5129 case VMX_VMCS_GUEST_PDPTR2_FULL:5130 case VMX_VMCS_GUEST_PDPTR3_FULL:5131 case VMX_VMCS_GUEST_DEBUGCTL_FULL:5132 case VMX_VMCS_GUEST_EFER_FULL:5133 case VMX_VMCS_CTRL_EPTP_FULL:5134 /* These fields consist of two parts, which are both writable in 32 bits mode. */5135 rc = VMXWriteVMCS32(idxField, u64Val);5136 rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));5137 AssertRC(rc);5138 return rc;5139 5140 case VMX_VMCS64_GUEST_LDTR_BASE:5141 case VMX_VMCS64_GUEST_TR_BASE:5142 case VMX_VMCS64_GUEST_GDTR_BASE:5143 case VMX_VMCS64_GUEST_IDTR_BASE:5144 case VMX_VMCS64_GUEST_SYSENTER_EIP:5145 case VMX_VMCS64_GUEST_SYSENTER_ESP:5146 case VMX_VMCS64_GUEST_CR0:5147 case VMX_VMCS64_GUEST_CR4:5148 case VMX_VMCS64_GUEST_CR3:5149 case VMX_VMCS64_GUEST_DR7:5150 case VMX_VMCS64_GUEST_RIP:5151 case VMX_VMCS64_GUEST_RSP:5152 case VMX_VMCS64_GUEST_CS_BASE:5153 case VMX_VMCS64_GUEST_DS_BASE:5154 case VMX_VMCS64_GUEST_ES_BASE:5155 case VMX_VMCS64_GUEST_FS_BASE:5156 case VMX_VMCS64_GUEST_GS_BASE:5157 case VMX_VMCS64_GUEST_SS_BASE:5158 /* Queue a 64 bits value as we can't set it in 32 bits host mode. */5159 if (u64Val >> 32ULL)5160 rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);5161 else5162 rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);5163 5164 return rc;5165 5166 default:5167 AssertMsgFailed(("Unexpected field %x\n", idxField));5168 return VERR_INVALID_PARAMETER;5402 case VMX_VMCS_CTRL_TSC_OFFSET_FULL: 5403 case VMX_VMCS_CTRL_IO_BITMAP_A_FULL: 5404 case VMX_VMCS_CTRL_IO_BITMAP_B_FULL: 5405 case VMX_VMCS_CTRL_MSR_BITMAP_FULL: 5406 case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL: 5407 case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL: 5408 case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL: 5409 case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL: 5410 case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL: 5411 case VMX_VMCS_GUEST_LINK_PTR_FULL: 5412 case VMX_VMCS_GUEST_PDPTR0_FULL: 5413 case VMX_VMCS_GUEST_PDPTR1_FULL: 5414 case VMX_VMCS_GUEST_PDPTR2_FULL: 5415 case VMX_VMCS_GUEST_PDPTR3_FULL: 5416 case VMX_VMCS_GUEST_DEBUGCTL_FULL: 5417 case VMX_VMCS_GUEST_EFER_FULL: 5418 case VMX_VMCS_CTRL_EPTP_FULL: 5419 /* These fields consist of two parts, which are both writable in 32 bits mode. */ 5420 rc = VMXWriteVMCS32(idxField, u64Val); 5421 rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL)); 5422 AssertRC(rc); 5423 return rc; 5424 5425 case VMX_VMCS64_GUEST_LDTR_BASE: 5426 case VMX_VMCS64_GUEST_TR_BASE: 5427 case VMX_VMCS64_GUEST_GDTR_BASE: 5428 case VMX_VMCS64_GUEST_IDTR_BASE: 5429 case VMX_VMCS64_GUEST_SYSENTER_EIP: 5430 case VMX_VMCS64_GUEST_SYSENTER_ESP: 5431 case VMX_VMCS64_GUEST_CR0: 5432 case VMX_VMCS64_GUEST_CR4: 5433 case VMX_VMCS64_GUEST_CR3: 5434 case VMX_VMCS64_GUEST_DR7: 5435 case VMX_VMCS64_GUEST_RIP: 5436 case VMX_VMCS64_GUEST_RSP: 5437 case VMX_VMCS64_GUEST_CS_BASE: 5438 case VMX_VMCS64_GUEST_DS_BASE: 5439 case VMX_VMCS64_GUEST_ES_BASE: 5440 case VMX_VMCS64_GUEST_FS_BASE: 5441 case VMX_VMCS64_GUEST_GS_BASE: 5442 case VMX_VMCS64_GUEST_SS_BASE: 5443 /* Queue a 64 bits value as we can't set it in 32 bits host mode. */ 5444 if (u64Val >> 32ULL) 5445 rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val); 5446 else 5447 rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val); 5448 5449 return rc; 5450 5451 default: 5452 AssertMsgFailed(("Unexpected field %x\n", idxField)); 5453 return VERR_INVALID_PARAMETER; 5169 5454 } 5170 5455 } 5456 5171 5457 5172 5458 /** 5173 5459 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts. 5174 5460 * 5175 * @param pVCpu The VMCPU to operate on.5176 * @param idxField VMCS field 5177 * @param u64Val Value5461 * @param pVCpu Pointer to the VMCPU. 5462 * @param idxField VMCS field index. 5463 * @param u64Val 16, 32 or 64 bits value.. 5178 5464 */ 5179 5465 VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) … … 5181 5467 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache; 5182 5468 5183 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED); 5469 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, 5470 ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED); 5184 5471 5185 5472 /* Make sure there are no duplicates. */ 5186 for (unsigned i =0;i<pCache->Write.cValidEntries;i++)5473 for (unsigned i = 0; i < pCache->Write.cValidEntries; i++) 5187 5474 { 5188 5475 if (pCache->Write.aField[i] == idxField)
Note:
See TracChangeset
for help on using the changeset viewer.