Changeset 45786 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Apr 26, 2013 10:35:59 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 deleted
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r45739 r45786 238 238 VMMSwitcher/32BitTo32Bit.asm \ 239 239 VMMSwitcher/32BitToPAE.asm \ 240 VMMSwitcher/32BitToAMD64.asm \241 240 VMMSwitcher/PAETo32Bit.asm \ 242 VMMSwitcher/PAEToAMD64.asm \243 241 VMMSwitcher/PAEToPAE.asm 244 242 VBoxVMM_SOURCES.amd64 = \ … … 249 247 endif 250 248 VBoxVMM_SOURCES.x86 += \ 249 VMMSwitcher/32BitToAMD64.asm \ 250 VMMSwitcher/PAEToAMD64.asm \ 251 251 VMMSwitcher/X86Stub.asm 252 252 VBoxVMM_SOURCES.amd64 += \ … … 411 411 VMMRC/VMMRC.cpp \ 412 412 VMMRC/VMMRCA.asm \ 413 VMMRC/HMRCA.asm \414 413 $(if-expr defined(VBOX_WITH_RAW_MODE), \ 415 414 VMMRC/CSAMRC.cpp \ -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r45739 r45786 259 259 } 260 260 261 262 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 261 #ifndef PGM_WITHOUT_MAPPINGS 262 263 263 /** 264 264 * Sets all PDEs involved with the mapping in the shadow page table. … … 357 357 if (!pgmPoolIsPageLocked(pPoolPagePd)) 358 358 pgmPoolLockPage(pPool, pPoolPagePd); 359 # ifdef VBOX_STRICT359 # ifdef VBOX_STRICT 360 360 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING) 361 361 { … … 369 369 ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1)); 370 370 } 371 # endif371 # endif 372 372 373 373 /* … … 542 542 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt); 543 543 } 544 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 545 544 545 #endif /* PGM_WITHOUT_MAPPINGS */ 546 546 #if defined(VBOX_STRICT) && !defined(IN_RING0) 547 547 548 /** 548 549 * Clears all PDEs involved with the mapping in the shadow page table. … … 653 654 pgmUnlock(pVM); 654 655 } 656 655 657 #endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */ 656 657 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 658 #ifndef PGM_WITHOUT_MAPPINGS 658 659 659 660 /** … … 741 742 if (!pgmMapAreMappingsFloating(pVM)) 742 743 return false; 743 744 Assert(pVM->cCpus == 1); 744 AssertReturn(pgmMapAreMappingsEnabled(pVM), false); 745 745 746 746 /* This only applies to raw mode where we only support 1 VCPU. */ … … 771 771 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); 772 772 773 # ifdef IN_RING3773 # ifdef IN_RING3 774 774 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n" 775 775 " iPDE=%#x iPT=%#x PDE=%RGp.\n", 776 776 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, 777 777 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 778 # else778 # else 779 779 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n" 780 780 " iPDE=%#x iPT=%#x PDE=%RGp.\n", 781 781 (iPT + iPDE) << X86_PD_SHIFT, 782 782 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 783 # endif783 # endif 784 784 return true; 785 785 } … … 802 802 { 803 803 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); 804 # ifdef IN_RING3804 # ifdef IN_RING3 805 805 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n" 806 806 " PDE=%016RX64.\n", 807 807 GCPtr, pCur->pszDesc, Pde.u)); 808 # else808 # else 809 809 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n" 810 810 " PDE=%016RX64.\n", 811 811 GCPtr, Pde.u)); 812 # endif812 # endif 813 813 return true; 814 814 } … … 866 866 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); 867 867 868 # ifdef IN_RING3868 # ifdef IN_RING3 869 869 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n" 870 870 " iPDE=%#x iPT=%#x PDE=%RGp.\n", … … 874 874 AssertRCReturn(rc, rc); 875 875 break; 876 # else876 # else 877 877 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n" 878 878 " iPDE=%#x iPT=%#x PDE=%RGp.\n", … … 880 880 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); 881 881 return VINF_PGM_SYNC_CR3; 882 # endif882 # endif 883 883 } 884 884 } … … 931 931 } 932 932 933 #endif /* VBOX_WITH_RAW_MODE_NOT_R0*/934 933 #endif /* PGM_WITHOUT_MAPPINGS */ 934 -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r45725 r45786 867 867 #endif 868 868 869 869 870 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 870 871 871 /** 872 872 * Gets ss:esp for ring1 in main Hypervisor's TSS. … … 954 954 return VINF_SUCCESS; 955 955 } 956 956 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 957 958 959 #if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) 957 960 958 961 /** … … 1028 1031 } 1029 1032 1030 #endif /* VBOX_WITH_RAW_MODE_NOT_R0*/1033 #endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */ 1031 1034 1032 1035 /** -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r45749 r45786 1585 1585 STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack); 1586 1586 if (pVM->hm.s.vmx.fSupported) 1587 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);1588 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);1587 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL); 1588 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL); 1589 1589 } 1590 1590 … … 1602 1602 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack); 1603 1603 if (pVM->hm.s.vmx.fSupported) 1604 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);1605 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);1604 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL); 1605 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL); 1606 1606 } 1607 1607 … … 1622 1622 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 1623 1623 if (pVM->hm.s.vmx.fSupported) 1624 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);1624 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]); 1625 1625 else 1626 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);1626 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]); 1627 1627 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 1628 1628 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45785 r45786 4024 4024 * @param pVCpu Pointer to the VMCPU. 4025 4025 * @param pCtx Pointer to the guest CPU context. 4026 * @param pfnHandler Pointer to the RC handler function.4026 * @param enmOp The operation to perform. 4027 4027 * @param cbParam Number of parameters. 4028 4028 * @param paParam Array of 32-bit parameters. 4029 4029 */ 4030 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,4030 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 4031 4031 uint32_t *paParam) 4032 4032 { … … 4037 4037 4038 4038 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 4039 Assert( pfnHandler);4039 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); 4040 4040 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField)); 4041 4041 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField)); … … 4069 4069 4070 4070 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 4071 CPUMSetHyperEIP(pVCpu, pfnHandler);4071 CPUMSetHyperEIP(pVCpu, enmOp); 4072 4072 for (int i = (int)cbParam - 1; i >= 0; i--) 4073 4073 CPUMPushHyper(pVCpu, paParam[i]); … … 4117 4117 RTHCPHYS HCPhysCpuPage = 0; 4118 4118 int rc = VERR_INTERNAL_ERROR_5; 4119 AssertReturn(pVM->hm.s.pfnVMXGCStartVM64, VERR_HM_IPE_5); 4119 4120 4120 4121 pCpu = HMR0GetCurrentCpu(); … … 4148 4149 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 4149 4150 #endif 4150 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);4151 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]); 4151 4152 4152 4153 #ifdef VBOX_WITH_CRASHDUMP_MAGIC -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r45749 r45786 3122 3122 aParam[3] = (uint32_t)(HCPhysVMCB >> 32); /* Param 2: HCPhysVMCB - Hi. */ 3123 3123 3124 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]);3124 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]); 3125 3125 } 3126 3126 … … 3133 3133 * @param pVCpu Pointer to the VMCPU. 3134 3134 * @param pCtx Pointer to the guest CPU context. 3135 * @param pfnHandler Pointer to the RC handler function.3135 * @param enmOp The operation to perform. 3136 3136 * @param cbParam Number of parameters. 3137 3137 * @param paParam Array of 32-bit parameters. 3138 3138 */ 3139 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,3139 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 3140 3140 uint32_t *paParam) 3141 3141 { … … 3143 3143 RTHCUINTREG uOldEFlags; 3144 3144 3145 Assert(pfnHandler); 3145 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 3146 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); 3146 3147 3147 3148 /* Disable interrupts. */ … … 3154 3155 3155 3156 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 3156 CPUMSetHyperEIP(pVCpu, pfnHandler);3157 CPUMSetHyperEIP(pVCpu, enmOp); 3157 3158 for (int i = (int)cbParam - 1; i >= 0; i--) 3158 3159 CPUMPushHyper(pVCpu, paParam[i]); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.h
r43455 r45786 127 127 128 128 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 129 /** 130 * Prepares for and executes VMRUN (64-bit guests from a 32-bit host). 131 * 132 * @returns VBox status code. 133 * @param pVMCBHostPhys Physical address of host VMCB. 134 * @param pVMCBPhys Physical address of the VMCB. 135 * @param pCtx Pointer to the guest CPU context. 136 * @param pVM Pointer to the VM. 137 * @param pVCpu Pointer to the VMCPU. (not used) 138 */ 139 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 140 141 /** 142 * Executes the specified handler in 64-bit mode. 143 * 144 * @returns VBox status code. 145 * @param pVM Pointer to the VM. 146 * @param pVCpu Pointer to the VMCPU. 147 * @param pCtx Pointer to the guest CPU context. 148 * @param pfnHandler Pointer to the RC handler function. 149 * @param cbParam Number of parameters. 150 * @param paParam Array of 32-bit parameters. 151 */ 152 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, 129 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 130 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 153 131 uint32_t *paParam); 154 132 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r45749 r45786 5478 5478 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 5479 5479 #endif 5480 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);5480 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]); 5481 5481 5482 5482 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 5610 5610 * @param pVCpu Pointer to the VMCPU. 5611 5611 * @param pCtx Pointer to the guest CPU context. 5612 * @param pfnHandler Pointer to the RC handler function.5612 * @param enmOp The operation to perform. 5613 5613 * @param cbParam Number of parameters. 5614 5614 * @param paParam Array of 32-bit parameters. 5615 5615 */ 5616 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,5616 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 5617 5617 uint32_t *paParam) 5618 5618 { … … 5623 5623 5624 5624 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 5625 Assert( pfnHandler);5625 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); 5626 5626 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField)); 5627 5627 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField)); … … 5655 5655 5656 5656 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 5657 CPUMSetHyperEIP(pVCpu, pfnHandler);5657 CPUMSetHyperEIP(pVCpu, enmOp); 5658 5658 for (int i=(int)cbParam-1;i>=0;i--) 5659 5659 CPUMPushHyper(pVCpu, paParam[i]); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r45681 r45786 46 46 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 47 47 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 48 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,48 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 49 49 uint32_t *paParam); 50 50 # endif -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r45781 r45786 1496 1496 if (HMIsEnabled(pVM)) 1497 1497 { 1498 int rc;1499 1498 switch (PGMGetHostMode(pVM)) 1500 1499 { … … 1512 1511 break; 1513 1512 } 1514 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hm.s.pfnVMXGCStartVM64);1515 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));1516 1517 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hm.s.pfnSVMGCVMRun64);1518 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));1519 1520 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMSaveGuestFPU64", &pVM->hm.s.pfnSaveGuestFPU64);1521 AssertReleaseMsgRC(rc, ("HMSetupFPU64 -> rc=%Rrc\n", rc));1522 1523 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMSaveGuestDebug64", &pVM->hm.s.pfnSaveGuestDebug64);1524 AssertReleaseMsgRC(rc, ("HMSetupDebug64 -> rc=%Rrc\n", rc));1525 1526 # ifdef DEBUG1527 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMTestSwitcher64", &pVM->hm.s.pfnTest64);1528 AssertReleaseMsgRC(rc, ("HMTestSwitcher64 -> rc=%Rrc\n", rc));1529 # endif1530 1513 } 1531 1514 #endif -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r45618 r45786 774 774 { 775 775 int rc; 776 bool fEnabled;777 776 PCFGMNODE pRoot = CFGMR3GetRoot(pVM); 778 777 … … 794 793 Assert(pVM->fRecompileUser == false); /* ASSUMES all zeros at this point */ 795 794 #ifdef VBOX_WITH_RAW_MODE 795 bool fEnabled; 796 796 rc = CFGMR3QueryBoolDef(pRoot, "RawR3Enabled", &fEnabled, false); AssertRCReturn(rc, rc); 797 797 pVM->fRecompileUser = !fEnabled; -
trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
r45750 r45786 145 145 int vmmR3SwitcherInit(PVM pVM) 146 146 { 147 #if ndef VBOX_WITH_RAW_MODE /** @todo 64-bit on 32-bit. */147 #if !defined(VBOX_WITH_RAW_MODE) && (HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) 148 148 return VINF_SUCCESS; 149 149 #else 150 150 151 /* 151 152 * Calc the size. … … 281 282 void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta) 282 283 { 283 #if def VBOX_WITH_RAW_MODE284 #if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) 284 285 /* 285 286 * Relocate all the switchers. … … 316 317 AssertRelease(HMIsEnabled(pVM)); 317 318 318 // AssertFailed();319 319 #else 320 320 NOREF(pVM); … … 324 324 325 325 326 #if def VBOX_WITH_RAW_MODE326 #if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) 327 327 328 328 /** … … 675 675 } 676 676 677 # if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)677 # if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 678 678 /* 679 679 * 64-bit HC Code Selector (no argument). … … 682 682 { 683 683 Assert(offSrc < pSwitcher->cbCode); 684 # if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)684 # if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 685 685 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */ 686 # else686 # else 687 687 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n")); 688 # endif688 # endif 689 689 break; 690 690 } … … 699 699 break; 700 700 } 701 # endif701 # endif 702 702 /* 703 703 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset). … … 712 712 } 713 713 714 # ifdef RT_ARCH_X86714 # ifdef RT_ARCH_X86 715 715 case FIX_GC_64_BIT_CPUM_OFF: 716 716 { … … 720 720 break; 721 721 } 722 # endif722 # endif 723 723 724 724 /* … … 761 761 } 762 762 763 # ifdef VBOX_WITH_NMI763 # ifdef VBOX_WITH_NMI 764 764 /* 765 765 * 32-bit address to the APIC base. … … 770 770 break; 771 771 } 772 # endif772 # endif 773 773 774 774 default: … … 778 778 } 779 779 780 # ifdef LOG_ENABLED780 # ifdef LOG_ENABLED 781 781 /* 782 782 * If Log2 is enabled disassemble the switcher code. … … 913 913 } 914 914 } 915 # endif915 # endif 916 916 } 917 917 … … 927 927 if (HMIsRawModeCtxNeeded(pVM)) 928 928 return SELMGetHyperGDT(pVM); 929 # if HC_ARCH_BITS != 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)929 # if HC_ARCH_BITS != 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 930 930 AssertFailed(); /* This path is only applicable to some 32-bit hosts. */ 931 # endif931 # endif 932 932 return NIL_RTRCPTR; 933 933 } … … 1062 1062 } 1063 1063 1064 #endif /* VBOX_WITH_RAW_MODE*/1064 #endif /* #defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */ 1065 1065 1066 1066 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r45745 r45786 1 1 ; $Id$ 2 2 ;; @file 3 ; VMM - World Switchers, 32Bit to AMD64 intermediate context. 4 ; 5 ; This is used for running 64-bit guest on 32-bit hosts, not normal raw-mode. 6 ; 7 8 ; 9 ; Copyright (C) 2006-2012 Oracle Corporation 3 ; VMM - World Switchers, 32-bit to AMD64 intermediate context. 4 ; 5 ; This is used for running 64-bit guest on 32-bit hosts, not 6 ; normal raw-mode. All the code involved is contained in this 7 ; file. 8 ; 9 10 ; 11 ; Copyright (C) 2006-2013 Oracle Corporation 10 12 ; 11 13 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 18 20 ; 19 21 20 ;%define DEBUG_STUFF 121 ;%define STRICT_IF 122 22 23 23 ;******************************************************************************* 24 24 ;* Defined Constants And Macros * 25 25 ;******************************************************************************* 26 ;; @note These values are from the HM64ON32OP enum in hm.h. 27 %define HM64ON32OP_VMXRCStartVM64 1 28 %define HM64ON32OP_SVMRCVMRun64 2 29 %define HM64ON32OP_HMRCSaveGuestFPU64 3 30 %define HM64ON32OP_HMRCSaveGuestDebug64 4 31 %define HM64ON32OP_HMRCTestSwitcher64 5 32 33 ;; Stubs for making OS/2 compile (though, not work). 34 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. 35 %macro vmwrite 2, 36 int3 37 %endmacro 38 %define vmlaunch int3 39 %define vmresume int3 40 %define vmsave int3 41 %define vmload int3 42 %define vmrun int3 43 %define clgi int3 44 %define stgi int3 45 %macro invlpga 2, 46 int3 47 %endmacro 48 %endif 49 50 ;; Debug options 51 ;%define DEBUG_STUFF 1 52 ;%define STRICT_IF 1 26 53 27 54 … … 30 57 ;******************************************************************************* 31 58 %include "VBox/asmdefs.mac" 59 %include "iprt/x86.mac" 60 %include "VBox/err.mac" 32 61 %include "VBox/apic.mac" 33 %include "iprt/x86.mac" 62 34 63 %include "VBox/vmm/cpum.mac" 35 64 %include "VBox/vmm/stam.mac" 36 65 %include "VBox/vmm/vm.mac" 66 %include "VBox/vmm/hm_vmx.mac" 37 67 %include "CPUMInternal.mac" 68 %include "HMInternal.mac" 38 69 %include "VMMSwitcher.mac" 39 70 … … 175 206 mov [edx + CPUMCPU.Host.ss], ss 176 207 ; special registers. 208 DEBUG32_S_CHAR('s') 209 DEBUG32_S_CHAR(';') 177 210 sldt [edx + CPUMCPU.Host.ldtr] 178 211 sidt [edx + CPUMCPU.Host.idtr] … … 185 218 186 219 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 220 DEBUG32_S_CHAR('f') 221 DEBUG32_S_CHAR(';') 187 222 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp 188 223 mov ebx, [edx + CPUM.pvApicBase] … … 246 281 mov eax, cr4 247 282 mov [edx + CPUMCPU.Host.cr4], eax 283 DEBUG32_S_CHAR('c') 284 DEBUG32_S_CHAR(';') 248 285 249 286 ; save the host EFER msr … … 254 291 mov [ebx + CPUMCPU.Host.efer + 4], edx 255 292 mov edx, ebx 293 DEBUG32_S_CHAR('e') 294 DEBUG32_S_CHAR(';') 256 295 257 296 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 262 301 lgdt [edx + CPUMCPU.Hyper.gdtr] 263 302 303 DEBUG32_S_CHAR('g') 304 DEBUG32_S_CHAR('!') 264 305 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 265 306 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4 … … 272 313 mov eax, 0ffffffffh 273 314 mov cr3, eax 274 DEBUG _CHAR('?')315 DEBUG32_CHAR('?') 275 316 276 317 ;; … … 284 325 ALIGNCODE(16) 285 326 GLOBALNAME IDEnterTarget 286 DEBUG _CHAR('2')327 DEBUG32_CHAR('1') 287 328 288 329 ; 1. Disable paging. … … 290 331 and ebx, ~X86_CR0_PG 291 332 mov cr0, ebx 292 DEBUG _CHAR('2')333 DEBUG32_CHAR('2') 293 334 294 335 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 306 347 mov ecx, 0ffffffffh 307 348 mov cr3, ecx 308 DEBUG _CHAR('3')349 DEBUG32_CHAR('3') 309 350 310 351 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 322 363 wrmsr 323 364 mov edx, esi 324 DEBUG _CHAR('4')365 DEBUG32_CHAR('4') 325 366 326 367 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 334 375 and ebx, ~X86_CR0_WRITE_PROTECT 335 376 mov cr0, ebx 336 DEBUG _CHAR('5')377 DEBUG32_CHAR('5') 337 378 338 379 ; Jump from compatibility mode to 64-bit mode. … … 345 386 ALIGNCODE(16) 346 387 NAME(IDEnter64Mode): 347 DEBUG _CHAR('6')388 DEBUG64_CHAR('6') 348 389 jmp [NAME(pICEnterTarget) wrt rip] 349 390 … … 386 427 387 428 ; Setup stack. 388 DEBUG _CHAR('7')429 DEBUG64_CHAR('7') 389 430 mov rsp, 0 390 431 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel] … … 399 440 ; load the hypervisor function address 400 441 mov r9, [rdx + CPUMCPU.Hyper.eip] 442 DEBUG64_S_CHAR('8') 401 443 402 444 ; Check if we need to restore the guest FPU state … … 447 489 448 490 ; parameter for all helper functions (pCtx) 491 DEBUG64_CHAR('9') 449 492 lea rsi, [rdx + CPUMCPU.Guest.fpu] 450 call r9 493 lea rax, [gth_return wrt rip] 494 push rax ; return address 495 496 cmp r9d, HM64ON32OP_VMXRCStartVM64 497 jz NAME(VMXRCStartVM64) 498 cmp r9d, HM64ON32OP_SVMRCVMRun64 499 jz NAME(SVMRCVMRun64) 500 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64 501 jz NAME(HMRCSaveGuestFPU64) 502 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64 503 jz NAME(HMRCSaveGuestDebug64) 504 cmp r9d, HM64ON32OP_HMRCTestSwitcher64 505 jz NAME(HMRCTestSwitcher64) 506 mov eax, VERR_HM_INVALID_HM64ON32OP 507 gth_return: 508 DEBUG64_CHAR('r') 451 509 452 510 ; Load CPUM pointer into rdx … … 465 523 466 524 ENDPROC vmmR0ToRawModeAsm 525 526 527 528 529 ; 530 ; 531 ; HM code (used to be HMRCA.asm at one point). 532 ; HM code (used to be HMRCA.asm at one point). 533 ; HM code (used to be HMRCA.asm at one point). 534 ; 535 ; 536 537 538 539 ; Load the corresponding guest MSR (trashes rdx & rcx) 540 %macro LOADGUESTMSR 2 541 mov rcx, %1 542 mov edx, dword [rsi + %2 + 4] 543 mov eax, dword [rsi + %2] 544 wrmsr 545 %endmacro 546 547 ; Save a guest MSR (trashes rdx & rcx) 548 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs) 549 %macro SAVEGUESTMSR 2 550 mov rcx, %1 551 rdmsr 552 mov dword [rsi + %2], eax 553 mov dword [rsi + %2 + 4], edx 554 %endmacro 555 556 ;; @def MYPUSHSEGS 557 ; Macro saving all segment registers on the stack. 558 ; @param 1 full width register name 559 %macro MYPUSHSEGS 1 560 mov %1, es 561 push %1 562 mov %1, ds 563 push %1 564 %endmacro 565 566 ;; @def MYPOPSEGS 567 ; Macro restoring all segment registers on the stack 568 ; @param 1 full width register name 569 %macro MYPOPSEGS 1 570 pop %1 571 mov ds, %1 572 pop %1 573 mov es, %1 574 %endmacro 575 576 577 ;/** 578 ; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode) 579 ; * 580 ; * @returns VBox status code 581 ; * @param HCPhysCpuPage VMXON physical address [rsp+8] 582 ; * @param HCPhysVmcs VMCS physical address [rsp+16] 583 ; * @param pCache VMCS cache [rsp+24] 584 ; * @param pCtx Guest context (rsi) 585 ; */ 586 BEGINPROC VMXRCStartVM64 587 push rbp 588 mov rbp, rsp 589 590 ; Make sure VT-x instructions are allowed 591 mov rax, cr4 592 or rax, X86_CR4_VMXE 593 mov cr4, rax 594 595 ;/* Enter VMX Root Mode */ 596 vmxon [rbp + 8 + 8] 597 jnc .vmxon_success 598 mov rax, VERR_VMX_INVALID_VMXON_PTR 599 jmp .vmstart64_vmxon_failed 600 601 .vmxon_success: 602 jnz .vmxon_success2 603 mov rax, VERR_VMX_VMXON_FAILED 604 jmp .vmstart64_vmxon_failed 605 606 .vmxon_success2: 607 ; Activate the VMCS pointer 608 vmptrld [rbp + 16 + 8] 609 jnc .vmptrld_success 610 mov rax, VERR_VMX_INVALID_VMCS_PTR 611 jmp .vmstart64_vmxoff_end 612 613 .vmptrld_success: 614 jnz .vmptrld_success2 615 mov rax, VERR_VMX_VMPTRLD_FAILED 616 jmp .vmstart64_vmxoff_end 617 618 .vmptrld_success2: 619 620 ; Save the VMCS pointer on the stack 621 push qword [rbp + 16 + 8]; 622 623 ;/* Save segment registers */ 624 MYPUSHSEGS rax 625 626 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 627 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!) 628 mov rbx, [rbp + 24 + 8] ; pCache 629 630 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 631 mov qword [rbx + VMCSCACHE.uPos], 2 632 %endif 633 634 %ifdef DEBUG 635 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage 636 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax 637 mov rax, [rbp + 16 + 8] ; HCPhysVmcs 638 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax 639 mov [rbx + VMCSCACHE.TestIn.pCache], rbx 640 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi 641 %endif 642 643 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries] 644 cmp ecx, 0 645 je .no_cached_writes 646 mov rdx, rcx 647 mov rcx, 0 648 jmp .cached_write 649 650 ALIGN(16) 651 .cached_write: 652 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4] 653 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8] 654 inc rcx 655 cmp rcx, rdx 656 jl .cached_write 657 658 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0 659 .no_cached_writes: 660 661 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 662 mov qword [rbx + VMCSCACHE.uPos], 3 663 %endif 664 ; Save the pCache pointer 665 push rbx 666 %endif 667 668 ; Save the host state that's relevant in the temporary 64 bits mode 669 mov rdx, cr0 670 mov eax, VMX_VMCS_HOST_CR0 671 vmwrite rax, rdx 672 673 mov rdx, cr3 674 mov eax, VMX_VMCS_HOST_CR3 675 vmwrite rax, rdx 676 677 mov rdx, cr4 678 mov eax, VMX_VMCS_HOST_CR4 679 vmwrite rax, rdx 680 681 mov rdx, cs 682 mov eax, VMX_VMCS_HOST_FIELD_CS 683 vmwrite rax, rdx 684 685 mov rdx, ss 686 mov eax, VMX_VMCS_HOST_FIELD_SS 687 vmwrite rax, rdx 688 689 sub rsp, 8*2 690 sgdt [rsp] 691 mov eax, VMX_VMCS_HOST_GDTR_BASE 692 vmwrite rax, [rsp+2] 693 add rsp, 8*2 694 695 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 696 mov qword [rbx + VMCSCACHE.uPos], 4 697 %endif 698 699 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode) 700 701 ;/* First we have to save some final CPU context registers. */ 702 lea rdx, [.vmlaunch64_done wrt rip] 703 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */ 704 vmwrite rax, rdx 705 ;/* Note: assumes success... */ 706 707 ;/* Manual save and restore: 708 ; * - General purpose registers except RIP, RSP 709 ; * 710 ; * Trashed: 711 ; * - CR2 (we don't care) 712 ; * - LDTR (reset to 0) 713 ; * - DRx (presumably not changed at all) 714 ; * - DR7 (reset to 0x400) 715 ; * - EFLAGS (reset to RT_BIT(1); not relevant) 716 ; * 717 ; */ 718 719 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 720 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 721 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 722 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 723 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 724 %endif 725 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}. 726 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 727 728 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 729 mov qword [rbx + VMCSCACHE.uPos], 5 730 %endif 731 732 ; Save the pCtx pointer 733 push rsi 734 735 ; Restore CR2 736 mov rbx, qword [rsi + CPUMCTX.cr2] 737 mov rdx, cr2 738 cmp rdx, rbx 739 je .skipcr2write64 740 mov cr2, rbx 741 742 .skipcr2write64: 743 mov eax, VMX_VMCS_HOST_RSP 744 vmwrite rax, rsp 745 ;/* Note: assumes success... */ 746 ;/* Don't mess with ESP anymore!! */ 747 748 ;/* Restore Guest's general purpose registers. */ 749 mov rax, qword [rsi + CPUMCTX.eax] 750 mov rbx, qword [rsi + CPUMCTX.ebx] 751 mov rcx, qword [rsi + CPUMCTX.ecx] 752 mov rdx, qword [rsi + CPUMCTX.edx] 753 mov rbp, qword [rsi + CPUMCTX.ebp] 754 mov r8, qword [rsi + CPUMCTX.r8] 755 mov r9, qword [rsi + CPUMCTX.r9] 756 mov r10, qword [rsi + CPUMCTX.r10] 757 mov r11, qword [rsi + CPUMCTX.r11] 758 mov r12, qword [rsi + CPUMCTX.r12] 759 mov r13, qword [rsi + CPUMCTX.r13] 760 mov r14, qword [rsi + CPUMCTX.r14] 761 mov r15, qword [rsi + CPUMCTX.r15] 762 763 ;/* Restore rdi & rsi. */ 764 mov rdi, qword [rsi + CPUMCTX.edi] 765 mov rsi, qword [rsi + CPUMCTX.esi] 766 767 vmlaunch 768 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */ 769 770 ALIGNCODE(16) 771 .vmlaunch64_done: 772 jc near .vmstart64_invalid_vmxon_ptr 773 jz near .vmstart64_start_failed 774 775 push rdi 776 mov rdi, [rsp + 8] ; pCtx 777 778 mov qword [rdi + CPUMCTX.eax], rax 779 mov qword [rdi + CPUMCTX.ebx], rbx 780 mov qword [rdi + CPUMCTX.ecx], rcx 781 mov qword [rdi + CPUMCTX.edx], rdx 782 mov qword [rdi + CPUMCTX.esi], rsi 783 mov qword [rdi + CPUMCTX.ebp], rbp 784 mov qword [rdi + CPUMCTX.r8], r8 785 mov qword [rdi + CPUMCTX.r9], r9 786 mov qword [rdi + CPUMCTX.r10], r10 787 mov qword [rdi + CPUMCTX.r11], r11 788 mov qword [rdi + CPUMCTX.r12], r12 789 mov qword [rdi + CPUMCTX.r13], r13 790 mov qword [rdi + CPUMCTX.r14], r14 791 mov qword [rdi + CPUMCTX.r15], r15 792 %ifndef VBOX_WITH_OLD_VTX_CODE 793 mov rax, cr2 794 mov qword [rdi + CPUMCTX.cr2], rax 795 %endif 796 797 pop rax ; the guest edi we pushed above 798 mov qword [rdi + CPUMCTX.edi], rax 799 800 pop rsi ; pCtx (needed in rsi by the macros below) 801 802 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 803 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 804 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 805 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 806 %endif 807 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}. 808 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 809 810 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 811 pop rdi ; saved pCache 812 813 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 814 mov dword [rdi + VMCSCACHE.uPos], 7 815 %endif 816 %ifdef DEBUG 817 mov [rdi + VMCSCACHE.TestOut.pCache], rdi 818 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi 819 mov rax, cr8 820 mov [rdi + VMCSCACHE.TestOut.cr8], rax 821 %endif 822 823 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries] 824 cmp ecx, 0 ; can't happen 825 je .no_cached_reads 826 jmp .cached_read 827 828 ALIGN(16) 829 .cached_read: 830 dec rcx 831 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4] 832 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax 833 cmp rcx, 0 834 jnz .cached_read 835 .no_cached_reads: 836 837 %ifdef VBOX_WITH_OLD_VTX_CODE 838 ; Save CR2 for EPT 839 mov rax, cr2 840 mov [rdi + VMCSCACHE.cr2], rax 841 %endif 842 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 843 mov dword [rdi + VMCSCACHE.uPos], 8 844 %endif 845 %endif 846 847 ; Restore segment registers 848 MYPOPSEGS rax 849 850 mov eax, VINF_SUCCESS 851 852 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 853 mov dword [rdi + VMCSCACHE.uPos], 9 854 %endif 855 .vmstart64_end: 856 857 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 858 %ifdef DEBUG 859 mov rdx, [rsp] ; HCPhysVmcs 860 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx 861 %endif 862 %endif 863 864 ; Write back the data and disable the VMCS 865 vmclear qword [rsp] ;Pushed pVMCS 866 add rsp, 8 867 868 .vmstart64_vmxoff_end: 869 ; Disable VMX root mode 870 vmxoff 871 .vmstart64_vmxon_failed: 872 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 873 %ifdef DEBUG 874 cmp eax, VINF_SUCCESS 875 jne .skip_flags_save 876 877 pushf 878 pop rdx 879 mov [rdi + VMCSCACHE.TestOut.eflags], rdx 880 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 881 mov dword [rdi + VMCSCACHE.uPos], 12 882 %endif 883 .skip_flags_save: 884 %endif 885 %endif 886 pop rbp 887 ret 888 889 890 .vmstart64_invalid_vmxon_ptr: 891 pop rsi ; pCtx (needed in rsi by the macros below) 892 893 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 894 pop rdi ; pCache 895 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 896 mov dword [rdi + VMCSCACHE.uPos], 10 897 %endif 898 899 %ifdef DEBUG 900 mov [rdi + VMCSCACHE.TestOut.pCache], rdi 901 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi 902 %endif 903 %endif 904 905 ; Restore segment registers 906 MYPOPSEGS rax 907 908 ; Restore all general purpose host registers. 909 mov eax, VERR_VMX_INVALID_VMXON_PTR 910 jmp .vmstart64_end 911 912 .vmstart64_start_failed: 913 pop rsi ; pCtx (needed in rsi by the macros below) 914 915 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 916 pop rdi ; pCache 917 918 %ifdef DEBUG 919 mov [rdi + VMCSCACHE.TestOut.pCache], rdi 920 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi 921 %endif 922 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 923 mov dword [rdi + VMCSCACHE.uPos], 11 924 %endif 925 %endif 926 927 ; Restore segment registers 928 MYPOPSEGS rax 929 930 ; Restore all general purpose host registers. 931 mov eax, VERR_VMX_UNABLE_TO_START_VM 932 jmp .vmstart64_end 933 ENDPROC VMXRCStartVM64 934 935 936 ;/** 937 ; * Prepares for and executes VMRUN (64 bits guests) 938 ; * 939 ; * @returns VBox status code 940 ; * @param HCPhysVMCB Physical address of host VMCB (rsp+8) 941 ; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16) 942 ; * @param pCtx Guest context (rsi) 943 ; */ 944 BEGINPROC SVMRCVMRun64 945 push rbp 946 mov rbp, rsp 947 pushf 948 949 ;/* Manual save and restore: 950 ; * - General purpose registers except RIP, RSP, RAX 951 ; * 952 ; * Trashed: 953 ; * - CR2 (we don't care) 954 ; * - LDTR (reset to 0) 955 ; * - DRx (presumably not changed at all) 956 ; * - DR7 (reset to 0x400) 957 ; */ 958 959 ;/* Save the Guest CPU context pointer. */ 960 push rsi ; push for saving the state at the end 961 962 ; save host fs, gs, sysenter msr etc 963 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address) 964 push rax ; save for the vmload after vmrun 965 vmsave 966 967 ; setup eax for VMLOAD 968 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address) 969 970 ;/* Restore Guest's general purpose registers. */ 971 ;/* RAX is loaded from the VMCB by VMRUN */ 972 mov rbx, qword [rsi + CPUMCTX.ebx] 973 mov rcx, qword [rsi + CPUMCTX.ecx] 974 mov rdx, qword [rsi + CPUMCTX.edx] 975 mov rdi, qword [rsi + CPUMCTX.edi] 976 mov rbp, qword [rsi + CPUMCTX.ebp] 977 mov r8, qword [rsi + CPUMCTX.r8] 978 mov r9, qword [rsi + CPUMCTX.r9] 979 mov r10, qword [rsi + CPUMCTX.r10] 980 mov r11, qword [rsi + CPUMCTX.r11] 981 mov r12, qword [rsi + CPUMCTX.r12] 982 mov r13, qword [rsi + CPUMCTX.r13] 983 mov r14, qword [rsi + CPUMCTX.r14] 984 mov r15, qword [rsi + CPUMCTX.r15] 985 mov rsi, qword [rsi + CPUMCTX.esi] 986 987 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch 988 clgi 989 sti 990 991 ; load guest fs, gs, sysenter msr etc 992 vmload 993 ; run the VM 994 vmrun 995 996 ;/* RAX is in the VMCB already; we can use it here. */ 997 998 ; save guest fs, gs, sysenter msr etc 999 vmsave 1000 1001 ; load host fs, gs, sysenter msr etc 1002 pop rax ; pushed above 1003 vmload 1004 1005 ; Set the global interrupt flag again, but execute cli to make sure IF=0. 1006 cli 1007 stgi 1008 1009 pop rax ; pCtx 1010 1011 mov qword [rax + CPUMCTX.ebx], rbx 1012 mov qword [rax + CPUMCTX.ecx], rcx 1013 mov qword [rax + CPUMCTX.edx], rdx 1014 mov qword [rax + CPUMCTX.esi], rsi 1015 mov qword [rax + CPUMCTX.edi], rdi 1016 mov qword [rax + CPUMCTX.ebp], rbp 1017 mov qword [rax + CPUMCTX.r8], r8 1018 mov qword [rax + CPUMCTX.r9], r9 1019 mov qword [rax + CPUMCTX.r10], r10 1020 mov qword [rax + CPUMCTX.r11], r11 1021 mov qword [rax + CPUMCTX.r12], r12 1022 mov qword [rax + CPUMCTX.r13], r13 1023 mov qword [rax + CPUMCTX.r14], r14 1024 mov qword [rax + CPUMCTX.r15], r15 1025 1026 mov eax, VINF_SUCCESS 1027 1028 popf 1029 pop rbp 1030 ret 1031 ENDPROC SVMRCVMRun64 1032 1033 ;/** 1034 ; * Saves the guest FPU context 1035 ; * 1036 ; * @returns VBox status code 1037 ; * @param pCtx Guest context [rsi] 1038 ; */ 1039 BEGINPROC HMRCSaveGuestFPU64 1040 mov rax, cr0 1041 mov rcx, rax ; save old CR0 1042 and rax, ~(X86_CR0_TS | X86_CR0_EM) 1043 mov cr0, rax 1044 1045 fxsave [rsi + CPUMCTX.fpu] 1046 1047 mov cr0, rcx ; and restore old CR0 again 1048 1049 mov eax, VINF_SUCCESS 1050 ret 1051 ENDPROC HMRCSaveGuestFPU64 1052 1053 ;/** 1054 ; * Saves the guest debug context (DR0-3, DR6) 1055 ; * 1056 ; * @returns VBox status code 1057 ; * @param pCtx Guest context [rsi] 1058 ; */ 1059 BEGINPROC HMRCSaveGuestDebug64 1060 mov rax, dr0 1061 mov qword [rsi + CPUMCTX.dr + 0*8], rax 1062 mov rax, dr1 1063 mov qword [rsi + CPUMCTX.dr + 1*8], rax 1064 mov rax, dr2 1065 mov qword [rsi + CPUMCTX.dr + 2*8], rax 1066 mov rax, dr3 1067 mov qword [rsi + CPUMCTX.dr + 3*8], rax 1068 mov rax, dr6 1069 mov qword [rsi + CPUMCTX.dr + 6*8], rax 1070 mov eax, VINF_SUCCESS 1071 ret 1072 ENDPROC HMRCSaveGuestDebug64 1073 1074 ;/** 1075 ; * Dummy callback handler 1076 ; * 1077 ; * @returns VBox status code 1078 ; * @param param1 Parameter 1 [rsp+8] 1079 ; * @param param2 Parameter 2 [rsp+12] 1080 ; * @param param3 Parameter 3 [rsp+16] 1081 ; * @param param4 Parameter 4 [rsp+20] 1082 ; * @param param5 Parameter 5 [rsp+24] 1083 ; * @param pCtx Guest context [rsi] 1084 ; */ 1085 BEGINPROC HMRCTestSwitcher64 1086 mov eax, [rsp+8] 1087 ret 1088 ENDPROC HMRCTestSwitcher64 1089 1090 1091 1092 1093 ; 1094 ; 1095 ; Back to switcher code. 1096 ; Back to switcher code. 1097 ; Back to switcher code. 1098 ; 1099 ; 1100 467 1101 468 1102 … … 497 1131 push rsi 498 1132 COM_NEWLINE 499 DEBUG_CHAR('b')500 DEBUG_CHAR('a')501 DEBUG_CHAR('c')502 DEBUG_CHAR('k')503 DEBUG_CHAR('!')1133 COM_CHAR 'b' 1134 COM_CHAR 'a' 1135 COM_CHAR 'c' 1136 COM_CHAR 'k' 1137 COM_CHAR '!' 504 1138 COM_NEWLINE 505 1139 pop rsi … … 540 1174 GLOBALNAME IDExitTarget 541 1175 BITS 32 542 DEBUG _CHAR('1')1176 DEBUG32_CHAR('1') 543 1177 544 1178 ; 1. Deactivate long mode by turning off paging. … … 546 1180 and ebx, ~X86_CR0_PG 547 1181 mov cr0, ebx 548 DEBUG _CHAR('2')1182 DEBUG32_CHAR('2') 549 1183 550 1184 ; 2. Load intermediate page table. … … 552 1186 mov edx, 0ffffffffh 553 1187 mov cr3, edx 554 DEBUG _CHAR('3')1188 DEBUG32_CHAR('3') 555 1189 556 1190 ; 3. Disable long mode. 557 1191 mov ecx, MSR_K6_EFER 558 1192 rdmsr 559 DEBUG _CHAR('5')1193 DEBUG32_CHAR('5') 560 1194 and eax, ~(MSR_K6_EFER_LME) 561 1195 wrmsr 562 DEBUG _CHAR('6')1196 DEBUG32_CHAR('6') 563 1197 564 1198 %ifndef NEED_PAE_ON_HOST … … 567 1201 and eax, ~X86_CR4_PAE 568 1202 mov cr4, eax 569 DEBUG _CHAR('7')1203 DEBUG32_CHAR('7') 570 1204 %endif 571 1205 … … 575 1209 jmp short just_a_jump 576 1210 just_a_jump: 577 DEBUG _CHAR('8')1211 DEBUG32_CHAR('8') 578 1212 579 1213 ;; … … 590 1224 ALIGNCODE(16) 591 1225 GLOBALNAME ICExitTarget 592 DEBUG _CHAR('8')1226 DEBUG32_CHAR('8') 593 1227 594 1228 ; load the hypervisor data selector into ds & es … … 619 1253 ; activate host gdt and idt 620 1254 lgdt [edx + CPUMCPU.Host.gdtr] 621 DEBUG _CHAR('0')1255 DEBUG32_CHAR('0') 622 1256 lidt [edx + CPUMCPU.Host.idtr] 623 DEBUG _CHAR('1')1257 DEBUG32_CHAR('1') 624 1258 625 1259 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 632 1266 633 1267 ; activate ldt 634 DEBUG _CHAR('2')1268 DEBUG32_CHAR('2') 635 1269 lldt [edx + CPUMCPU.Host.ldtr] 636 1270 -
trunk/src/VBox/VMM/include/HMInternal.h
r45781 r45786 316 316 /** 32 to 64 bits switcher entrypoint. */ 317 317 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0; 318 319 /* AMD-V 64 bits vmrun handler */ 320 RTRCPTR pfnSVMGCVMRun64; 321 322 /* VT-x 64 bits vmlaunch handler */ 323 RTRCPTR pfnVMXGCStartVM64; 324 325 /* RC handler to setup the 64 bits FPU state. */ 326 RTRCPTR pfnSaveGuestFPU64; 327 328 /* RC handler to setup the 64 bits debug state. */ 329 RTRCPTR pfnSaveGuestDebug64; 330 331 /* Test handler */ 332 RTRCPTR pfnTest64; 333 334 RTRCPTR uAlignment[2]; 335 /*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 336 uint32_t u32Alignment[1]; */ 318 RTR0PTR uPadding2; 337 319 #endif 338 320 -
trunk/src/VBox/VMM/include/VMMSwitcher.mac
r45701 r45786 128 128 129 129 %ifdef DEBUG_STUFF 130 %define DEBUG_CHAR(ch) COM_CHAR ch 131 %define DEBUG_S_CHAR(ch) COM_S_CHAR ch 130 %define DEBUG_CHAR(ch) COM_CHAR ch 131 %define DEBUG32_CHAR(ch) COM_CHAR ch 132 %define DEBUG64_CHAR(ch) COM_CHAR ch 133 %define DEBUG_S_CHAR(ch) COM_S_CHAR ch 134 %define DEBUG32_S_CHAR(ch) COM32_S_CHAR ch 135 %define DEBUG64_S_CHAR(ch) COM64_S_CHAR ch 132 136 %else 133 137 %define DEBUG_CHAR(ch) 138 %define DEBUG32_CHAR(ch) 139 %define DEBUG64_CHAR(ch) 134 140 %define DEBUG_S_CHAR(ch) 141 %define DEBUG32_S_CHAR(ch) 142 %define DEBUG64_S_CHAR(ch) 135 143 %endif 136 144
Note:
See TracChangeset
for help on using the changeset viewer.