Changeset 914 in vbox
- Timestamp:
- Feb 14, 2007 11:23:08 PM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 18641
- Location:
- trunk
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r861 r914 1382 1382 * @returns VBox status code. 1383 1383 * @param pVM The virtual machine. 1384 * @param pvAddr Intermediate context address of the mapping. This must be entriely below 4GB!1384 * @param Addr Intermediate context address of the mapping. 1385 1385 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB! 1386 1386 * @param cbPages Number of bytes to map. … … 1388 1388 * @remark This API shall not be used to anything but mapping the switcher code. 1389 1389 */ 1390 PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, void *pvAddr, RTHCPHYS HCPhys, unsigned cbPages);1390 PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages); 1391 1391 1392 1392 /** -
trunk/include/VBox/sup.h
r335 r914 169 169 { 170 170 PCSUPGLOBALINFOPAGE pGIP; 171 __asm__ __volatile__ ("movabs g_SUPGlobalInfoPage,%0\n\t" 171 __asm__ __volatile__ ("movabs g_SUPGlobalInfoPage,%0\n\t" 172 172 : "=a" (pGIP)); 173 173 return pGIP; … … 176 176 # else 177 177 # define g_pSUPGlobalInfoPage (&g_SUPGlobalInfoPage) 178 #endif 178 #endif 179 179 #else 180 180 extern DECLIMPORT(PCSUPGLOBALINFOPAGE) g_pSUPGlobalInfoPage; … … 251 251 * 252 252 * @returns error code specific to uFunction. 253 * @param pVM Pointer to the Host Contextmapping of the VM structure.253 * @param pVMR0 Pointer to the Ring-0 (Host Context) mapping of the VM structure. 254 254 * @param uOperation Operation to execute. 255 255 * @param pvArg Argument. 256 256 */ 257 SUPR3DECL(int) SUPCallVMMR0(PVM pVM, unsigned uOperation, void *pvArg);257 SUPR3DECL(int) SUPCallVMMR0(PVMR0 pVMR0, unsigned uOperation, void *pvArg); 258 258 259 259 /** … … 265 265 * 266 266 * @returns error code specific to uFunction. 267 * @param pVM Pointer to the Host Contextmapping of the VM structure.267 * @param pVMR0 Pointer to the Ring-0 (Host Context) mapping of the VM structure. 268 268 * @param uOperation Operation to execute. 269 269 * @param pvArg Pointer to argument structure or if cbArg is 0 just an value. … … 272 272 * being invalidated while we're executing the call. 273 273 */ 274 SUPR3DECL(int) SUPCallVMMR0Ex(PVM pVM, unsigned uOperation, void *pvArg, unsigned cbArg);274 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsigned uOperation, void *pvArg, unsigned cbArg); 275 275 276 276 /** … … 349 349 * @returns NULL on failure. 350 350 * @param cb Number of bytes to allocate. 351 * @param p pvR0Where to store the ring-0 mapping of the allocation. (optional)351 * @param pR0Ptr Where to store the ring-0 mapping of the allocation. (optional) 352 352 * @param pHCPhys Where to store the physical address of the memory block. 353 353 * … … 356 356 * the world switchers. 357 357 */ 358 SUPR3DECL(void *) SUPContAlloc2(unsigned cb, void **ppvR0, PRTHCPHYS pHCPhys);358 SUPR3DECL(void *) SUPContAlloc2(unsigned cb, PRTR0PTR pR0Ptr, PRTHCPHYS pHCPhys); 359 359 360 360 /** … … 486 486 SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, void *pvR3, unsigned cb, PSUPPAGE paPages); 487 487 SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, void *pvR3); 488 SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, unsigned cb, void **ppvR0, void **ppvR3, PRTHCPHYS pHCPhys);488 SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, unsigned cb, void **ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys); 489 489 SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, void *pv); 490 490 SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, unsigned cPages, void **ppvR3, PSUPPAGE paPages); -
trunk/src/VBox/HostDrivers/Support/SUPDRVIOC.h
r912 r914 300 300 { 301 301 /** The address of the ring-0 mapping of the allocated memory. */ 302 void *pvR0;302 RTR0PTR pvR0; 303 303 /** The address of the ring-3 mapping of the allocated memory. */ 304 void *pvR3;304 RTR3PTR pvR3; 305 305 /** The physical address of the allocation. */ 306 306 RTHCPHYS HCPhys; … … 458 458 uint32_t u32SessionCookie; 459 459 /** The VM handle. */ 460 PVM pVM;460 PVMR0 pVMR0; 461 461 /** Which operation to execute. */ 462 462 uint32_t uOperation; -
trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c
r679 r914 1145 1145 * Execute. 1146 1146 */ 1147 pOut->rc = pDevExt->pfnVMMR0Entry(pIn->pVM , pIn->uOperation, pIn->pvArg);1147 pOut->rc = pDevExt->pfnVMMR0Entry(pIn->pVMR0, pIn->uOperation, pIn->pvArg); 1148 1148 *pcbReturned = sizeof(*pOut); 1149 1149 return 0; … … 1837 1837 * @param pHCPhys Where to put the physical address of allocated memory. 1838 1838 */ 1839 SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, unsigned cb, void **ppvR0, void **ppvR3, PRTHCPHYS pHCPhys)1839 SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, unsigned cb, void **ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys) 1840 1840 { 1841 1841 int rc; … … 2832 2832 *u.pb++ = 0xf8; 2833 2833 2834 /* 2834 /* 2835 2835 * Call VMMR0Entry 2836 * We don't have to push the arguments here, but we have to 2836 * We don't have to push the arguments here, but we have to 2837 2837 * reserve some stack space for the interrupt forwarding. 2838 2838 */ -
trunk/src/VBox/HostDrivers/Support/SUPLib.cpp
r874 r914 79 79 * Structures and Typedefs * 80 80 *******************************************************************************/ 81 typedef DECLCALLBACK(int) FNCALLVMMR0(PVM pVM, unsigned uOperation, void *pvArg);81 typedef DECLCALLBACK(int) FNCALLVMMR0(PVMR0 pVMR0, unsigned uOperation, void *pvArg); 82 82 typedef FNCALLVMMR0 *PFNCALLVMMR0; 83 83 … … 356 356 } 357 357 358 SUPR3DECL(int) SUPCallVMMR0Ex(PVM pVM, unsigned uOperation, void *pvArg, unsigned cbArg)358 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsigned uOperation, void *pvArg, unsigned cbArg) 359 359 { 360 360 /* … … 364 364 In.u32Cookie = g_u32Cookie; 365 365 In.u32SessionCookie = g_u32SessionCookie; 366 In.pVM = pVM;366 In.pVMR0 = pVMR0; 367 367 In.uOperation = uOperation; 368 368 In.cbArg = cbArg; … … 377 377 378 378 379 SUPR3DECL(int) SUPCallVMMR0(PVM pVM, unsigned uOperation, void *pvArg)379 SUPR3DECL(int) SUPCallVMMR0(PVMR0 pVMR0, unsigned uOperation, void *pvArg) 380 380 { 381 381 #ifndef VBOX_WITHOUT_IDT_PATCHING 382 return g_pfnCallVMMR0(pVM , uOperation, pvArg);382 return g_pfnCallVMMR0(pVMR0, uOperation, pvArg); 383 383 384 384 #else 385 if ( uOperation == VMMR0_DO_RAW_RUN)385 if (RT_LIKELY(uOperation == VMMR0_DO_RAW_RUN)) 386 386 { 387 387 Assert(!pvArg); 388 388 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_RAW_RUN); 389 389 } 390 if ( uOperation == VMMR0_DO_HWACC_RUN)390 if (RT_LIKELY(uOperation == VMMR0_DO_HWACC_RUN)) 391 391 { 392 392 Assert(!pvArg); … … 398 398 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_NOP); 399 399 } 400 return SUPCallVMMR0Ex(pVM , uOperation, pvArg, pvArg ? sizeof(pvArg) : 0);400 return SUPCallVMMR0Ex(pVMR0, uOperation, pvArg, pvArg ? sizeof(pvArg) : 0); 401 401 #endif 402 402 } … … 478 478 SUPR3DECL(void *) SUPContAlloc(unsigned cb, PRTHCPHYS pHCPhys) 479 479 { 480 return SUPContAlloc2(cb, N ULL, pHCPhys);481 } 482 483 484 SUPR3DECL(void *) SUPContAlloc2(unsigned cb, void **ppvR0, PRTHCPHYS pHCPhys)480 return SUPContAlloc2(cb, NIL_RTR0PTR, pHCPhys); 481 } 482 483 484 SUPR3DECL(void *) SUPContAlloc2(unsigned cb, PRTR0PTR pR0Ptr, PRTHCPHYS pHCPhys) 485 485 { 486 486 /* … … 490 490 AssertPtr(pHCPhys); 491 491 *pHCPhys = NIL_RTHCPHYS; 492 AssertPtrNull(pR0Ptr); 493 if (pR0Ptr) 494 *pR0Ptr = NIL_RTR0PTR; 492 495 493 496 /* … … 506 509 rc = SUPPageAlloc(In.cb >> PAGE_SHIFT, &Out.pvR3); 507 510 Out.HCPhys = (uintptr_t)Out.pvR3 + (PAGE_SHIFT * 1024); 508 Out.pvR0 = Out.pvR3;511 Out.pvR0 = (uintptr_t)Out.pvR3; 509 512 } 510 513 if (VBOX_SUCCESS(rc)) 511 514 { 512 515 *pHCPhys = (RTHCPHYS)Out.HCPhys; 513 if (p pvR0)514 *p pvR0= Out.pvR0;516 if (pR0Ptr) 517 *pR0Ptr = Out.pvR0; 515 518 return Out.pvR3; 516 519 } … … 717 720 * reg params: 718 721 * <GCC> <MSC> <argument> 719 * rdi rcx pVM 722 * rdi rcx pVMR0 720 723 * esi edx uOperation 721 724 * rdx r8 pvArg -
trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
r913 r914 89 89 PVMR0 pVMR0; 90 90 RTHCPHYS HCPhysVM; 91 PVM pVM = (PVM)SUPContAlloc2(sizeof(*pVM), (void **)&pVMR0, &HCPhysVM);91 PVM pVM = (PVM)SUPContAlloc2(sizeof(*pVM), &pVMR0, &HCPhysVM); 92 92 if (pVM) 93 93 { 94 94 pVM->pVMGC = 0; 95 pVM->pVMHC = pVM; 95 pVM->pVMR3 = pVM; 96 pVM->pVMR0 = pVMR0; 96 97 pVM->HCPhysVM = HCPhysVM; 97 98 pVM->pSession = pSession; … … 108 109 for (i = cIterations; i > 0; i--) 109 110 { 110 rc = SUPCallVMMR0( (PVM)pVMR0, VMMR0_DO_NOP, NULL);111 rc = SUPCallVMMR0(pVMR0, VMMR0_DO_NOP, NULL); 111 112 if (rc != VINF_SUCCESS) 112 113 { … … 130 131 { 131 132 uint64_t OneStartTick = ASMReadTSC(); 132 rc = SUPCallVMMR0( (PVM)pVMR0, VMMR0_DO_NOP, NULL);133 rc = SUPCallVMMR0(pVMR0, VMMR0_DO_NOP, NULL); 133 134 uint64_t Ticks = ASMReadTSC() - OneStartTick; 134 135 if (Ticks < MinTicks) -
trunk/src/VBox/VMM/HWACCM.cpp
r771 r914 292 292 pVM->hwaccm.s.fInitialized = true; 293 293 294 int rc = SUPCallVMMR0(pVM , VMMR0_DO_HWACC_SETUP_VM, NULL);294 int rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, NULL); 295 295 AssertRC(rc); 296 296 if (rc == VINF_SUCCESS) … … 324 324 pVM->hwaccm.s.fInitialized = true; 325 325 326 int rc = SUPCallVMMR0(pVM , VMMR0_DO_HWACC_SETUP_VM, NULL);326 int rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, NULL); 327 327 AssertRC(rc); 328 328 if (rc == VINF_SUCCESS) -
trunk/src/VBox/VMM/PDMDriver.cpp
r23 r914 961 961 if ( uOperation >= VMMR0_DO_SRV_START 962 962 && uOperation < VMMR0_DO_SRV_END) 963 rc = SUPCallVMMR0Ex(pDrvIns->Internal.s.pVM , uOperation, pvArg, cbArg);963 rc = SUPCallVMMR0Ex(pDrvIns->Internal.s.pVM->pVMR0, uOperation, pvArg, cbArg); 964 964 else 965 965 { -
trunk/src/VBox/VMM/PDMLdr.cpp
r48 r914 20 20 */ 21 21 22 #if defined(__ DARWIN__) ||defined(__OS2__)22 #if defined(__OS2__) 23 23 # define PDMLDR_FAKE_MODE 24 24 #endif -
trunk/src/VBox/VMM/PGMMap.cpp
r838 r914 455 455 * @returns VBox status code. 456 456 * @param pVM The virtual machine. 457 * @param pvAddr Intermediate context address of the mapping. This must be entriely below 4GB!457 * @param Addr Intermediate context address of the mapping. 458 458 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB! 459 459 * @param cbPages Number of bytes to map. 460 460 * 461 461 * @remark This API shall not be used to anything but mapping the switcher code. 462 * @todo pvAddr must be a RTUINTPTR! 463 */ 464 PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, void *pvAddr, RTHCPHYS HCPhys, unsigned cbPages) 465 { 466 LogFlow(("PGMR3MapIntermediate: pvAddr=%p HCPhys=%VHp cbPages=%#x\n", pvAddr, HCPhys, cbPages)); 462 */ 463 PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages) 464 { 465 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%VHp cbPages=%#x\n", Addr, HCPhys, cbPages)); 467 466 468 467 /* … … 472 471 cbPages = RT_ALIGN(cbPages, PAGE_SIZE); 473 472 HCPhys &= X86_PTE_PAE_PG_MASK; 474 pvAddr = (void *)((RTUINTPTR)pvAddr & PAGE_BASE_MASK);473 Addr &= PAGE_BASE_MASK; 475 474 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */ 476 uint32_t uAddress = (uint32_t) (uintptr_t)pvAddr;475 uint32_t uAddress = (uint32_t)Addr; 477 476 478 477 /* … … 482 481 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n")); 483 482 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages)); 484 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, (" pvAddr=%p HCPhys=%VHp cbPages=%d\n", pvAddr, HCPhys, cbPages));483 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages)); 485 484 486 485 /* … … 494 493 ) 495 494 { 496 AssertMsgFailed((" pvAddr=%p HCPhys=%VHp cbPages=%d\n", pvAddr, HCPhys, cbPages));497 LogRel((" pvAddr=%p HCPhys=%VHp cbPages=%d\n", pvAddr, HCPhys, cbPages));495 AssertMsgFailed(("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages)); 496 LogRel(("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages)); 498 497 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo new error code */ 499 498 } -
trunk/src/VBox/VMM/VM.cpp
r872 r914 188 188 */ 189 189 RTHCPHYS HCPhysVM; 190 PVM pVM = (PVM)SUPContAlloc(RT_ALIGN_Z(sizeof(*pVM), PAGE_SIZE), &HCPhysVM); 190 PVMR0 pVMR0; 191 PVM pVM = (PVM)SUPContAlloc2(RT_ALIGN_Z(sizeof(*pVM), PAGE_SIZE), &pVMR0, &HCPhysVM); 191 192 if (pVM) 192 193 { … … 198 199 memset(pVM, 0, sizeof(*pVM)); 199 200 pVM->pVMHC = pVM; 201 pVM->pVMR0 = pVMR0; 202 pVM->pVMR3 = pVM; 200 203 pVM->HCPhysVM = HCPhysVM; 201 204 pVM->pSession = pSession; … … 1002 1005 if (pVM->vm.s.fPreventSaveState) 1003 1006 { 1004 LogRel(("VMM: vmR3Save: saving the VM state is not allowed at this moment\n")); 1005 return VERR_VM_SAVE_STATE_NOT_ALLOWED; 1007 LogRel(("VMM: vmR3Save: saving the VM state is not allowed at this moment\n")); 1008 return VERR_VM_SAVE_STATE_NOT_ALLOWED; 1006 1009 } 1007 1010 … … 1173 1176 * ACPI and we find the VMSTATE_OFF. Just ignore the second power-off request. 1174 1177 */ 1175 /** @todo r=bird: We should find a proper solution to this problem. This is just a workaround. 1178 /** @todo r=bird: We should find a proper solution to this problem. This is just a workaround. 1176 1179 * Guest code should really run after we've entered VMSTATE_OFF really... */ 1177 1180 if (pVM->enmVMState == VMSTATE_OFF) … … 2707 2710 */ 2708 2711 static void vmR3SetRuntimeErrorWorkerDoCall(PVM pVM, PVMATRUNTIMEERROR pCur, bool fFatal, 2709 const char *pszErrorID, 2712 const char *pszErrorID, 2710 2713 const char *pszFormat, ...) 2711 2714 { -
trunk/src/VBox/VMM/VMM.cpp
r873 r914 246 246 struct 247 247 { 248 void *pvR0;249 void *pvR3;248 RTR0PTR pvR0; 249 void *pvR3; 250 250 RTHCPHYS HCPhys; 251 251 } aBadTries[16]; … … 360 360 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc); 361 361 362 /* GC switchers are enabled by default. Turned off by HWACCM. */ 363 pVM->vmm.s.fSwitcherDisabled = false; 364 362 365 /* 363 366 * Register the saved state data unit. … … 369 372 return rc; 370 373 371 /* GC switchers are enabled by default. Turned off by HWACCM. */ 372 pVM->vmm.s.fSwitcherDisabled = false; 374 #ifdef VBOX_WITHOUT_IDT_PATCHING 375 /* 376 * Register the Ring-0 VM handle with the session for fast ioctl calls. 377 */ 378 rc = SUPSetVMForFastIOCtl(pVM->pVMR0); 379 if (VBOX_FAILURE(rc)) 380 return rc; 381 #endif 373 382 374 383 /* … … 615 624 rc = VINF_SUCCESS; 616 625 #else 617 rc = SUPCallVMMR0(pVM , VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION);626 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION); 618 627 #endif 619 628 if ( pVM->vmm.s.pR0Logger … … 675 684 rc = VINF_SUCCESS; 676 685 #else 677 rc = SUPCallVMMR0(pVM , VMMR0_DO_CALL_HYPERVISOR, NULL);686 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL); 678 687 #endif 679 688 #ifdef LOG_ENABLED … … 1045 1054 uint32_t offCPUM = *u.pu32++; 1046 1055 Assert(offCPUM < sizeof(pVM->cpum)); 1047 *uSrc.pu32 = (uint32_t) ((uintptr_t)&pVM->cpum + offCPUM);1048 break; 1049 } 1050 1051 /* 1052 * Make 32-bit HCpointer given VM offset.1056 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM; 1057 break; 1058 } 1059 1060 /* 1061 * Make 32-bit R0 pointer given VM offset. 1053 1062 */ 1054 1063 case FIX_HC_VM_OFF: … … 1056 1065 uint32_t offVM = *u.pu32++; 1057 1066 Assert(offVM < sizeof(VM)); 1058 *uSrc.pu32 = (uint32_t) (uintptr_t)pVM+ offVM;1067 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM; 1059 1068 break; 1060 1069 } … … 1240 1249 { 1241 1250 Assert(offSrc < pSwitcher->cbCode); 1242 *uSrc.pu64 = (uintptr_t)&pVM->cpum;1251 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum); 1243 1252 break; 1244 1253 } … … 1834 1843 rc = VERR_GENERAL_FAILURE; 1835 1844 #else 1836 rc = SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);1845 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 1837 1846 #endif 1838 1847 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 1882 1891 rc = VERR_GENERAL_FAILURE; 1883 1892 #else 1884 rc = SUPCallVMMR0(pVM , VMMR0_DO_HWACC_RUN, NULL);1893 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL); 1885 1894 #endif 1886 1895 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 1963 1972 rc = VERR_GENERAL_FAILURE; 1964 1973 #else 1965 rc = SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);1974 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 1966 1975 #endif 1967 1976 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2017 2026 rc = VERR_GENERAL_FAILURE; 2018 2027 #else 2019 rc = SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);2028 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 2020 2029 #endif 2021 2030 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2544 2553 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 2545 2554 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); 2546 return SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);2555 return SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 2547 2556 } 2548 2557 … … 2578 2587 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 2579 2588 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); 2580 rc = SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);2589 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 2581 2590 bool fDump = false; 2582 2591 if (rc != rcExpect) … … 2656 2665 { 2657 2666 RTPrintf("VMM: VMMGCEntry=%VGv\n", GCPtrEP); 2658 2667 2659 2668 /* 2660 2669 * Test various crashes which we must be able to recover from. … … 2818 2827 do 2819 2828 { 2820 rc = SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);2829 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 2821 2830 if (VBOX_FAILURE(rc)) 2822 2831 { … … 2869 2878 2870 2879 uint64_t TickThisStart = ASMReadTSC(); 2871 rc = SUPCallVMMR0(pVM , VMMR0_DO_RAW_RUN, NULL);2880 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); 2872 2881 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; 2873 2882 if (VBOX_FAILURE(rc)) -
trunk/src/VBox/VMM/VMMInternal.h
r847 r914 110 110 uint32_t eip; 111 111 uint32_t u32Padding; 112 #endif 112 #endif 113 113 #if HC_ARCH_BITS == 64 114 114 uint64_t rbx; 115 115 # ifdef __WIN__ 116 uint64_t rsi; 117 uint64_t rdi; 116 uint64_t rsi; 117 uint64_t rdi; 118 118 # endif 119 119 uint64_t rbp; … … 124 124 uint64_t rsp; 125 125 uint64_t rip; 126 #endif 126 #endif 127 127 /** @} */ 128 128 … … 157 157 /** Pointer to core code ring-3 mapping - contiguous memory. 158 158 * At present this only means the context switcher code. */ 159 RT HCPTR pvHCCoreCodeR3;159 RTR3PTR pvHCCoreCodeR3; 160 160 /** Pointer to core code ring-0 mapping - contiguous memory. 161 161 * At present this only means the context switcher code. */ 162 RT HCPTR pvHCCoreCodeR0;162 RTR0PTR pvHCCoreCodeR0; 163 163 /** Pointer to core code guest context mapping. */ 164 164 RTGCPTR pvGCCoreCode; … … 250 250 VMMR0JMPBUF CallHostR0JmpBuf; 251 251 /** @} */ 252 252 253 253 /** Number of VMMR0_DO_RUN_GC calls. */ 254 254 STAMCOUNTER StatRunGC; -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r848 r914 137 137 || VBOX_GET_VERSION_MINOR(uVersion) < VBOX_VERSION_MINOR)) 138 138 return VERR_VERSION_MISMATCH; 139 if ( !VALID_PTR(pVM) 140 || pVM->pVMR0 != pVM) 141 return VERR_INVALID_PARAMETER; 139 142 140 143 /* … … 449 452 #else 450 453 return rc; 451 #endif 454 #endif 452 455 453 456 /* … … 461 464 TRPMR0DispatchHostInterrupt(pVM); 462 465 #else /* !VBOX_WITHOUT_IDT_PATCHING */ 463 /* 466 /* 464 467 * Don't trust the compiler to get this right. 465 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit 468 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit 466 469 * mode too because we push the arguments on the stack in the IDT patch code. 467 470 */ … … 475 478 # error "huh?" 476 479 # endif 477 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM 480 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM 478 481 && ((uintptr_t *)pvRet)[2] == (uintptr_t)uOperation 479 482 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
Note:
See TracChangeset
for help on using the changeset viewer.