Changeset 19454 in vbox
- Timestamp:
- May 6, 2009 7:20:18 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/gmm.h
r19381 r19454 266 266 GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM); 267 267 GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM); 268 GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, unsignedidCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,268 GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages, 269 269 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority); 270 GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, unsignedidCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);271 GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, unsignedidCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);272 GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, unsignedidCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);273 GMMR0DECL(int) GMMR0FreePages(PVM pVM, unsignedidCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);274 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, unsignedidCpu, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted);275 GMMR0DECL(int) GMMR0DeflatedBalloon(PVM pVM, unsignedidCpu, uint32_t cPages);276 GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, unsignedidCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);277 GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, unsignedidCpu, RTR3PTR pvR3);270 GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages); 271 GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages); 272 GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount); 273 GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount); 274 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted); 275 GMMR0DECL(int) GMMR0DeflatedBalloon(PVM pVM, VMCPUID idCpu, uint32_t cPages); 276 GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3); 277 GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3); 278 278 279 279 … … 296 296 typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ; 297 297 298 GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, unsignedidCpu, PGMMINITIALRESERVATIONREQ pReq);298 GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq); 299 299 300 300 … … 314 314 typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ; 315 315 316 GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, unsignedidCpu, PGMMUPDATERESERVATIONREQ pReq);316 GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq); 317 317 318 318 … … 335 335 typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ; 336 336 337 GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, unsignedidCpu, PGMMALLOCATEPAGESREQ pReq);337 GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq); 338 338 339 339 … … 356 356 typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ; 357 357 358 GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, unsignedidCpu, PGMMFREEPAGESREQ pReq);358 GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq); 359 359 360 360 … … 379 379 typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ; 380 380 381 GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, unsignedidCpu, PGMMBALLOONEDPAGESREQ pReq);381 GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq); 382 382 383 383 … … 400 400 typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ; 401 401 402 GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, unsignedidCpu, PGMMMAPUNMAPCHUNKREQ pReq);402 GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, VMCPUID idCpu, PGMMMAPUNMAPCHUNKREQ pReq); 403 403 404 404 -
trunk/include/VBox/gvmm.h
r19406 r19454 83 83 /** The number of calls to GVMMR0WakeUp. */ 84 84 uint64_t cWakeUpCalls; 85 /** The number of times the EMT thread wasn't actually halted when GVMMR0WakeUp was called. */ 85 /** The number of times the EMT thread wasn't actually halted when GVMMR0WakeUp 86 * was called. */ 86 87 uint64_t cWakeUpNotHalted; 87 /** The number of wake ups done during GVMMR0WakeUp (not counting the explicit one). */ 88 /** The number of wake ups done during GVMMR0WakeUp (not counting the explicit 89 * one). */ 88 90 uint64_t cWakeUpWakeUps; 91 92 /** The number of calls to GVMMR0Poke. */ 93 uint64_t cPokeCalls; 94 /** The number of times the EMT thread wasn't actually busy when 95 * GVMMR0Poke was called. */ 96 uint64_t cPokeNotBusy; 89 97 90 98 /** The number of calls to GVMMR0SchedPoll. */ -
trunk/include/VBox/sup.h
r19257 r19454 447 447 * @returns error code specific to uFunction. 448 448 * @param pVMR0 Pointer to the Ring-0 (Host Context) mapping of the VM structure. 449 * @param idCpu VMCPU id.449 * @param idCpu The virtual CPU ID. 450 450 * @param uOperation Operation to execute. 451 451 * @param pvArg Argument. 452 452 */ 453 SUPR3DECL(int) SUPCallVMMR0(PVMR0 pVMR0, unsignedidCpu, unsigned uOperation, void *pvArg);453 SUPR3DECL(int) SUPCallVMMR0(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation, void *pvArg); 454 454 455 455 /** … … 460 460 * @param pVMR0 The ring-0 VM handle. 461 461 * @param uOperation The operation; only the SUP_VMMR0_DO_* ones are valid. 462 * @param idCpu VMCPU id.463 */ 464 SUPR3DECL(int) SUPCallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation, unsignedidCpu);462 * @param idCpu The virtual CPU ID. 463 */ 464 SUPR3DECL(int) SUPCallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation, VMCPUID idCpu); 465 465 466 466 /** … … 473 473 * @returns error code specific to uFunction. 474 474 * @param pVMR0 Pointer to the Ring-0 (Host Context) mapping of the VM structure. 475 * @param idCpu VMCPU id.475 * @param idCpu The virtual CPU ID. 476 476 * @param uOperation Operation to execute. 477 477 * @param u64Arg Constant argument. … … 480 480 * limit on this, just below 4KB. 481 481 */ 482 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsignedidCpu, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);482 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr); 483 483 484 484 /** -
trunk/include/VBox/vm.h
r19435 r19454 106 106 /** The native thread handle. */ 107 107 RTNATIVETHREAD hNativeThread; 108 /** Which host CPU ID is this EMT running on. 109 * Only valid when in RC or HWACCMR0 with scheduling disabled. */ 110 RTCPUID volatile idHostCpu; 108 111 109 112 /** Align the next bit on a 64-byte boundary. … … 114 117 * following it (to grow into and align the struct size). 115 118 * */ 116 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4];119 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 7 : 3]; 117 120 118 121 /** CPUM part. */ -
trunk/include/VBox/vm.mac
r19217 r19454 47 47 ; This is part of the VM structure. 48 48 struc VM 49 .enmVMState resd 150 .fGlobalForcedActions resd 151 .paVMPagesR3 RTR3PTR_RES 152 .pSession RTR0PTR_RES 153 .pUVM RTR3PTR_RES 154 .pVMR3 RTR3PTR_RES 155 .pVMR0 RTR0PTR_RES 156 .pVMRC RTRCPTR_RES 157 .hSelf resd 158 .cCPUs resd 159 .cbSelf resd 160 .offVMCPU resd 161 .u32Reserved resd 649 .enmVMState resd 1 50 .fGlobalForcedActions resd 1 51 .paVMPagesR3 RTR3PTR_RES 1 52 .pSession RTR0PTR_RES 1 53 .pUVM RTR3PTR_RES 1 54 .pVMR3 RTR3PTR_RES 1 55 .pVMR0 RTR0PTR_RES 1 56 .pVMRC RTRCPTR_RES 1 57 .hSelf resd 1 58 .cCPUs resd 1 59 .cbSelf resd 1 60 .offVMCPU resd 1 61 .u32Reserved resd 6 62 62 63 63 .pfnVMMGCGuestToHostAsmGuestCtx RTRCPTR_RES 1 … … 65 65 .pfnVMMGCGuestToHostAsm RTRCPTR_RES 1 66 66 67 .uPadding1 RTHCPTR_RES 168 .uPadding2 RTHCPTR_RES 167 .uPadding1 RTHCPTR_RES 1 68 .uPadding2 RTHCPTR_RES 1 69 69 70 .fRawR3Enabled resb 171 .fRawR0Enabled resb 172 .fPATMEnabled resb 173 .fCSAMEnabled resb 174 .fHWACCMEnabled resb 175 .fHwVirtExtForced resb 176 .fPARAVEnabled resb 170 .fRawR3Enabled resb 1 71 .fRawR0Enabled resb 1 72 .fPATMEnabled resb 1 73 .fCSAMEnabled resb 1 74 .fHWACCMEnabled resb 1 75 .fHwVirtExtForced resb 1 76 .fPARAVEnabled resb 1 77 77 78 78 alignb 8 79 79 80 .StatTotalQemuToGC resb STAMPROFILEADV_size81 .StatTotalGCToQemu resb STAMPROFILEADV_size82 .StatTotalInGC resb STAMPROFILEADV_size83 .StatTotalInQemu resb STAMPROFILEADV_size84 .StatSwitcherToGC resb STAMPROFILEADV_size85 .StatSwitcherToHC resb STAMPROFILEADV_size86 .StatSwitcherSaveRegs resb STAMPROFILEADV_size87 .StatSwitcherSysEnter resb STAMPROFILEADV_size88 .StatSwitcherDebug resb STAMPROFILEADV_size89 .StatSwitcherCR0 resb STAMPROFILEADV_size90 .StatSwitcherCR4 resb STAMPROFILEADV_size91 .StatSwitcherJmpCR3 resb STAMPROFILEADV_size92 .StatSwitcherRstrRegs resb STAMPROFILEADV_size93 .StatSwitcherLgdt resb STAMPROFILEADV_size94 .StatSwitcherLidt resb STAMPROFILEADV_size95 .StatSwitcherLldt resb STAMPROFILEADV_size96 .StatSwitcherTSS resb STAMPROFILEADV_size80 .StatTotalQemuToGC resb STAMPROFILEADV_size 81 .StatTotalGCToQemu resb STAMPROFILEADV_size 82 .StatTotalInGC resb STAMPROFILEADV_size 83 .StatTotalInQemu resb STAMPROFILEADV_size 84 .StatSwitcherToGC resb STAMPROFILEADV_size 85 .StatSwitcherToHC resb STAMPROFILEADV_size 86 .StatSwitcherSaveRegs resb STAMPROFILEADV_size 87 .StatSwitcherSysEnter resb STAMPROFILEADV_size 88 .StatSwitcherDebug resb STAMPROFILEADV_size 89 .StatSwitcherCR0 resb STAMPROFILEADV_size 90 .StatSwitcherCR4 resb STAMPROFILEADV_size 91 .StatSwitcherJmpCR3 resb STAMPROFILEADV_size 92 .StatSwitcherRstrRegs resb STAMPROFILEADV_size 93 .StatSwitcherLgdt resb STAMPROFILEADV_size 94 .StatSwitcherLidt resb STAMPROFILEADV_size 95 .StatSwitcherLldt resb STAMPROFILEADV_size 96 .StatSwitcherTSS resb STAMPROFILEADV_size 97 97 98 98 alignb 64 99 .cpum resb 2048100 .vmm resb 102499 .cpum resb 2048 100 .vmm resb 1024 101 101 102 102 endstruc … … 105 105 ; This is part of the VMCPU structure. 106 106 struc VMCPU 107 .fLocalForcedActions resd 1108 .enmState resd 1109 .pUVCpu RTR3PTR_RES 1110 .pVMR3 RTR3PTR_RES 1111 .pVMR0 RTR0PTR_RES 1112 .pVMRC RTRCPTR_RES 1113 .idCpu resd 1107 .fLocalForcedActions resd 1 108 .enmState resd 1 109 .pUVCpu RTR3PTR_RES 1 110 .pVMR3 RTR3PTR_RES 1 111 .pVMR0 RTR0PTR_RES 1 112 .pVMRC RTRCPTR_RES 1 113 .idCpu resd 1 114 114 115 .hNativeThread RTR0PTR_RES 1 115 .hNativeThread RTR0PTR_RES 1 116 .idHostCpu resd 1 116 117 117 118 alignb 64 118 119 119 .cpum resb 4096120 .cpum resb 4096 120 121 endstruc 121 122 -
trunk/include/VBox/vmm.h
r19437 r19454 114 114 VMMDECL(PVMCPU) VMMGetCpu(PVM pVM); 115 115 VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM); 116 VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, RTCPUID idCpu);116 VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, VMCPUID idCpu); 117 117 VMMDECL(uint32_t) VMMGetSvnRev(void); 118 118 VMMDECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM); … … 298 298 299 299 VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg); 300 VMMR0DECL(void) VMMR0EntryFast(PVM pVM, unsignedidCpu, VMMR0OPERATION enmOperation);301 VMMR0DECL(int) VMMR0EntryEx(PVM pVM, unsignedidCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);300 VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation); 301 VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION); 302 302 VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM); 303 303 VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg); -
trunk/src/VBox/Devices/Network/testcase/tstIntNet-1.cpp
r19257 r19454 312 312 SendReq.pSession = pSession; 313 313 SendReq.hIf = hIf; 314 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_SEND, 0, &SendReq.Hdr);314 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_SEND, 0, &SendReq.Hdr); 315 315 if (RT_FAILURE(rc)) 316 316 { … … 542 542 WaitReq.hIf = hIf; 543 543 WaitReq.cMillies = cMillies - (uint32_t)cElapsedMillies; 544 int rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_WAIT, 0, &WaitReq.Hdr);544 int rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_WAIT, 0, &WaitReq.Hdr); 545 545 if (rc == VERR_TIMEOUT) 546 546 break; … … 882 882 OpenReq.szNetwork, OpenReq.szTrunk); 883 883 RTStrmFlush(g_pStdOut); 884 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_OPEN, 0, &OpenReq.Hdr);884 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_OPEN, 0, &OpenReq.Hdr); 885 885 if (RT_SUCCESS(rc)) 886 886 { … … 898 898 GetRing3BufferReq.hIf = OpenReq.hIf; 899 899 GetRing3BufferReq.pRing3Buf = NULL; 900 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_GET_RING3_BUFFER, 0, &GetRing3BufferReq.Hdr);900 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_GET_RING3_BUFFER, 0, &GetRing3BufferReq.Hdr); 901 901 if (RT_SUCCESS(rc)) 902 902 { … … 913 913 PromiscReq.hIf = OpenReq.hIf; 914 914 PromiscReq.fPromiscuous = true; 915 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE, 0, &PromiscReq.Hdr);915 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE, 0, &PromiscReq.Hdr); 916 916 if (RT_SUCCESS(rc)) 917 917 RTPrintf("tstIntNet-1: interface in promiscuous mode\n"); … … 928 928 ActiveReq.hIf = OpenReq.hIf; 929 929 ActiveReq.fActive = true; 930 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_SET_ACTIVE, 0, &ActiveReq.Hdr);930 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_SET_ACTIVE, 0, &ActiveReq.Hdr); 931 931 if (RT_SUCCESS(rc)) 932 932 { -
trunk/src/VBox/HostDrivers/Support/SUPDrv.c
r19393 r19454 150 150 151 151 #ifdef RT_WITH_W64_UNWIND_HACK 152 DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, unsignedidCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);153 DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, unsignedidCpu, unsigned uOperation);152 DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, VMCPUID idCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession); 153 DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, VMCPUID idCpu, unsigned uOperation); 154 154 DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2); 155 155 DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid); … … 927 927 * @param pSession Session data. 928 928 */ 929 int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsignedidCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)929 int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession) 930 930 { 931 931 /* -
trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h
r19281 r19454 568 568 DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryInt, (PVM pVM, unsigned uOperation, void *pvArg)); 569 569 /** VMMR0EntryFast() pointer. */ 570 DECLR0CALLBACKMEMBER(void, pfnVMMR0EntryFast, (PVM pVM, unsignedidCpu, unsigned uOperation));570 DECLR0CALLBACKMEMBER(void, pfnVMMR0EntryFast, (PVM pVM, VMCPUID idCpu, unsigned uOperation)); 571 571 /** VMMR0EntryEx() pointer. */ 572 DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryEx, (PVM pVM, unsignedidCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession));572 DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryEx, (PVM pVM, VMCPUID idCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)); 573 573 574 574 /** Linked list of loaded code. */ … … 625 625 *******************************************************************************/ 626 626 int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr); 627 int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsignedidCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession);627 int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession); 628 628 int VBOXCALL supdrvIDC(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr); 629 629 int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt); -
trunk/src/VBox/HostDrivers/Support/SUPLib.cpp
r19393 r19454 568 568 569 569 570 SUPR3DECL(int) SUPCallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation, unsignedidCpu)570 SUPR3DECL(int) SUPCallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation, VMCPUID idCpu) 571 571 { 572 572 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_RAW_RUN)) … … 582 582 583 583 584 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsignedidCpu, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)584 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr) 585 585 { 586 586 /* … … 645 645 646 646 647 SUPR3DECL(int) SUPCallVMMR0(PVMR0 pVMR0, unsignedidCpu, unsigned uOperation, void *pvArg)647 SUPR3DECL(int) SUPCallVMMR0(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation, void *pvArg) 648 648 { 649 649 /* -
trunk/src/VBox/NetworkServices/DHCP/VBoxNetDHCP.cpp
r19257 r19454 577 577 CloseReq.hIf = m_hIf; 578 578 m_hIf = INTNET_HANDLE_INVALID; 579 int rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_CLOSE, 0, &CloseReq.Hdr);579 int rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_CLOSE, 0, &CloseReq.Hdr); 580 580 AssertRC(rc); 581 581 } … … 907 907 */ 908 908 debugPrint(2, false, "attempting to open/create network \"%s\"...", OpenReq.szNetwork); 909 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_OPEN, 0, &OpenReq.Hdr);909 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_OPEN, 0, &OpenReq.Hdr); 910 910 if (RT_SUCCESS(rc)) 911 911 { … … 922 922 GetRing3BufferReq.hIf = m_hIf; 923 923 GetRing3BufferReq.pRing3Buf = NULL; 924 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_GET_RING3_BUFFER, 0, &GetRing3BufferReq.Hdr);924 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_GET_RING3_BUFFER, 0, &GetRing3BufferReq.Hdr); 925 925 if (RT_SUCCESS(rc)) 926 926 { … … 939 939 ActiveReq.hIf = m_hIf; 940 940 ActiveReq.fActive = true; 941 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_SET_ACTIVE, 0, &ActiveReq.Hdr);941 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_SET_ACTIVE, 0, &ActiveReq.Hdr); 942 942 if (RT_SUCCESS(rc)) 943 943 return 0; … … 979 979 WaitReq.hIf = m_hIf; 980 980 WaitReq.cMillies = 2000; /* 2 secs - the sleep is for some reason uninterruptible... */ /** @todo fix interruptability in SrvIntNet! */ 981 int rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_WAIT, 0, &WaitReq.Hdr);981 int rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_WAIT, 0, &WaitReq.Hdr); 982 982 if (RT_FAILURE(rc)) 983 983 { -
trunk/src/VBox/NetworkServices/NetLib/VBoxNetInt.cpp
r17783 r19454 21 21 SendReq.pSession = pSession; 22 22 SendReq.hIf = hIf; 23 return SUPCallVMMR0Ex(NIL_RTR0PTR, VMMR0_DO_INTNET_IF_SEND, 0, &SendReq.Hdr);23 return SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_SEND, 0, &SendReq.Hdr); 24 24 } 25 25 -
trunk/src/VBox/NetworkServices/NetLib/VBoxNetIntIf.cpp
r19257 r19454 49 49 SendReq.pSession = pSession; 50 50 SendReq.hIf = hIf; 51 return SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VPCU 0 */, VMMR0_DO_INTNET_IF_SEND, 0, &SendReq.Hdr);51 return SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_INTNET_IF_SEND, 0, &SendReq.Hdr); 52 52 } 53 53 -
trunk/src/VBox/VMM/PDMDriver.cpp
r19257 r19454 949 949 if ( uOperation >= VMMR0_DO_SRV_START 950 950 && uOperation < VMMR0_DO_SRV_END) 951 rc = SUPCallVMMR0Ex(pDrvIns->Internal.s.pVM->pVMR0, 0 /* idCpu not relevant */, uOperation, 0, (PSUPVMMR0REQHDR)pvArg);951 rc = SUPCallVMMR0Ex(pDrvIns->Internal.s.pVM->pVMR0, NIL_VMCPUID, uOperation, 0, (PSUPVMMR0REQHDR)pvArg); 952 952 else 953 953 { -
trunk/src/VBox/VMM/STAM.cpp
r17369 r19454 195 195 { RT_UOFFSETOF(GVMMSTATS, SchedVM.cWakeUpNotHalted), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/WakeUpNotHalted", "The number of times the EMT thread wasn't actually halted when GVMMR0WakeUp was called." }, 196 196 { RT_UOFFSETOF(GVMMSTATS, SchedVM.cWakeUpWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/WakeUpWakeUps", "The number of wake ups done during GVMMR0WakeUp (not counting the explicit one)." }, 197 { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPokeCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PokeCalls", "The number of calls to GVMMR0Poke." }, 198 { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPokeNotBusy), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PokeNotBusy", "The number of times the EMT thread wasn't actually busy when GVMMR0Poke was called." }, 197 199 { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPollCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PollCalls", "The number of calls to GVMMR0SchedPoll." }, 198 200 { RT_UOFFSETOF(GVMMSTATS, SchedVM.cPollHalts), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/VM/PollHalts", "The number of times the EMT has halted in a GVMMR0SchedPoll call." }, … … 207 209 { RT_UOFFSETOF(GVMMSTATS, SchedSum.cWakeUpNotHalted), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/WakeUpNotHalted", "The number of times the EMT thread wasn't actually halted when GVMMR0WakeUp was called." }, 208 210 { RT_UOFFSETOF(GVMMSTATS, SchedSum.cWakeUpWakeUps), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/WakeUpWakeUps", "The number of wake ups done during GVMMR0WakeUp (not counting the explicit one)." }, 211 { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPokeCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PokeCalls", "The number of calls to GVMMR0Poke." }, 212 { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPokeNotBusy), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PokeNotBusy", "The number of times the EMT thread wasn't actually busy when GVMMR0Poke was called." }, 209 213 { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPollCalls), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PollCalls", "The number of calls to GVMMR0SchedPoll." }, 210 214 { RT_UOFFSETOF(GVMMSTATS, SchedSum.cPollHalts), STAMTYPE_U64_RESET, STAMUNIT_CALLS, "/GVMM/Sum/PollHalts", "The number of times the EMT has halted in a GVMMR0SchedPoll call." }, … … 754 758 GVMMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 755 759 GVMMReq.pSession = pVM->pSession; 756 rc = VMMR3CallR0(pVM, VMMR0_DO_GVMM_RESET_STATISTICS, 0, &GVMMReq.Hdr);760 rc = SUPCallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GVMM_RESET_STATISTICS, 0, &GVMMReq.Hdr); 757 761 } 758 762 … … 1766 1770 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 1767 1771 Req.pSession = pVM->pSession; 1768 int rc = VMMR3CallR0(pVM, VMMR0_DO_GVMM_QUERY_STATISTICS, 0, &Req.Hdr);1772 int rc = SUPCallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GVMM_QUERY_STATISTICS, 0, &Req.Hdr); 1769 1773 if (RT_SUCCESS(rc)) 1770 1774 pUVM->stam.s.GVMMStats = Req.Stats; -
trunk/src/VBox/VMM/VM.cpp
r19441 r19454 517 517 CreateVMReq.pVMR3 = NULL; 518 518 CreateVMReq.cCpus = cCpus; 519 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VCPU 0 */, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);519 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr); 520 520 if (RT_SUCCESS(rc)) 521 521 { -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r19381 r19454 1048 1048 * @thread The creator thread / EMT. 1049 1049 */ 1050 GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, unsignedidCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,1050 GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages, 1051 1051 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority) 1052 1052 { … … 1059 1059 PGMM pGMM; 1060 1060 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 1061 PGVM pGVM = GVMMR0ByVM(pVM); 1062 if (RT_UNLIKELY(!pGVM)) 1063 return VERR_INVALID_PARAMETER; 1064 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 1065 return VERR_NOT_OWNER; 1061 PGVM pGVM; 1062 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 1063 if (RT_FAILURE(rc)) 1064 return rc; 1066 1065 1067 1066 AssertReturn(cBasePages, VERR_INVALID_PARAMETER); … … 1071 1070 AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER); 1072 1071 1073 intrc = RTSemFastMutexRequest(pGMM->Mtx);1072 rc = RTSemFastMutexRequest(pGMM->Mtx); 1074 1073 AssertRC(rc); 1075 1074 … … 1115 1114 * @param pReq The request packet. 1116 1115 */ 1117 GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, unsignedidCpu, PGMMINITIALRESERVATIONREQ pReq)1116 GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq) 1118 1117 { 1119 1118 /* … … 1144 1143 * @thread EMT. 1145 1144 */ 1146 GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, unsignedidCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)1145 GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages) 1147 1146 { 1148 1147 LogFlow(("GMMR0UpdateReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n", … … 1154 1153 PGMM pGMM; 1155 1154 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 1156 PGVM pGVM = GVMMR0ByVM(pVM); 1157 if (RT_UNLIKELY(!pGVM)) 1158 return VERR_INVALID_PARAMETER; 1159 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 1160 return VERR_NOT_OWNER; 1155 PGVM pGVM; 1156 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 1157 if (RT_FAILURE(rc)) 1158 return rc; 1161 1159 1162 1160 AssertReturn(cBasePages, VERR_INVALID_PARAMETER); … … 1164 1162 AssertReturn(cFixedPages, VERR_INVALID_PARAMETER); 1165 1163 1166 intrc = RTSemFastMutexRequest(pGMM->Mtx);1164 rc = RTSemFastMutexRequest(pGMM->Mtx); 1167 1165 AssertRC(rc); 1168 1166 … … 1207 1205 * @param pReq The request packet. 1208 1206 */ 1209 GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, unsignedidCpu, PGMMUPDATERESERVATIONREQ pReq)1207 GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq) 1210 1208 { 1211 1209 /* … … 1851 1849 * @thread EMT. 1852 1850 */ 1853 GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, unsignedidCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)1851 GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages) 1854 1852 { 1855 1853 LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n", … … 1862 1860 PGMM pGMM; 1863 1861 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 1864 PGVM pGVM = GVMMR0ByVM(pVM); 1865 if (RT_UNLIKELY(!pGVM)) 1866 return VERR_INVALID_PARAMETER; 1867 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 1868 return VERR_NOT_OWNER; 1862 PGVM pGVM; 1863 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 1864 if (RT_FAILURE(rc)) 1865 return rc; 1869 1866 1870 1867 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); … … 1898 1895 } 1899 1896 1900 intrc = RTSemFastMutexRequest(pGMM->Mtx);1897 rc = RTSemFastMutexRequest(pGMM->Mtx); 1901 1898 AssertRC(rc); 1902 1899 … … 2034 2031 * @thread EMT. 2035 2032 */ 2036 GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, unsignedidCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)2033 GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount) 2037 2034 { 2038 2035 LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount)); … … 2043 2040 PGMM pGMM; 2044 2041 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2045 PGVM pGVM = GVMMR0ByVM(pVM); 2046 if (RT_UNLIKELY(!pGVM)) 2047 return VERR_INVALID_PARAMETER; 2048 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 2049 return VERR_NOT_OWNER; 2042 PGVM pGVM; 2043 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 2044 if (RT_FAILURE(rc)) 2045 return rc; 2050 2046 2051 2047 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); … … 2066 2062 } 2067 2063 2068 intrc = RTSemFastMutexRequest(pGMM->Mtx);2064 rc = RTSemFastMutexRequest(pGMM->Mtx); 2069 2065 AssertRC(rc); 2070 2066 … … 2104 2100 * @param pReq The request packet. 2105 2101 */ 2106 GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, unsignedidCpu, PGMMALLOCATEPAGESREQ pReq)2102 GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq) 2107 2103 { 2108 2104 /* … … 2430 2426 * @thread EMT. 2431 2427 */ 2432 GMMR0DECL(int) GMMR0FreePages(PVM pVM, unsignedidCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)2428 GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount) 2433 2429 { 2434 2430 LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount)); … … 2439 2435 PGMM pGMM; 2440 2436 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2441 PGVM pGVM = GVMMR0ByVM(pVM); 2442 if (RT_UNLIKELY(!pGVM)) 2443 return VERR_INVALID_PARAMETER; 2444 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 2445 return VERR_NOT_OWNER; 2437 PGVM pGVM; 2438 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 2439 if (RT_FAILURE(rc)) 2440 return rc; 2446 2441 2447 2442 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); … … 2457 2452 * Take the semaphore and call the worker function. 2458 2453 */ 2459 intrc = RTSemFastMutexRequest(pGMM->Mtx);2454 rc = RTSemFastMutexRequest(pGMM->Mtx); 2460 2455 AssertRC(rc); 2461 2456 … … 2476 2471 * @param pReq The request packet. 2477 2472 */ 2478 GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, unsignedidCpu, PGMMFREEPAGESREQ pReq)2473 GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq) 2479 2474 { 2480 2475 /* … … 2518 2513 * @thread EMT. 2519 2514 */ 2520 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, unsignedidCpu, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted)2515 GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted) 2521 2516 { 2522 2517 LogFlow(("GMMR0BalloonedPages: pVM=%p cBalloonedPages=%#x cPagestoFree=%#x paPages=%p enmAccount=%d fCompleted=%RTbool\n", … … 2528 2523 PGMM pGMM; 2529 2524 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2530 PGVM pGVM = GVMMR0ByVM(pVM); 2531 if (RT_UNLIKELY(!pGVM)) 2532 return VERR_INVALID_PARAMETER; 2533 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 2534 return VERR_NOT_OWNER; 2525 PGVM pGVM; 2526 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 2527 if (RT_FAILURE(rc)) 2528 return rc; 2535 2529 2536 2530 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER); … … 2546 2540 * Take the sempahore and do some more validations. 2547 2541 */ 2548 intrc = RTSemFastMutexRequest(pGMM->Mtx);2542 rc = RTSemFastMutexRequest(pGMM->Mtx); 2549 2543 AssertRC(rc); 2550 2544 if (pGVM->gmm.s.Allocated.cBasePages >= cPagesToFree) … … 2604 2598 * @param pReq The request packet. 2605 2599 */ 2606 GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, unsignedidCpu, PGMMBALLOONEDPAGESREQ pReq)2600 GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq) 2607 2601 { 2608 2602 /* … … 2633 2627 * @thread EMT. 2634 2628 */ 2635 GMMR0DECL(int) GMMR0DeflatedBalloon(PVM pVM, unsignedidCpu, uint32_t cPages)2629 GMMR0DECL(int) GMMR0DeflatedBalloon(PVM pVM, VMCPUID idCpu, uint32_t cPages) 2636 2630 { 2637 2631 LogFlow(("GMMR0DeflatedBalloon: pVM=%p cPages=%#x\n", pVM, cPages)); … … 2642 2636 PGMM pGMM; 2643 2637 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2644 PGVM pGVM = GVMMR0ByVM(pVM); 2645 if (RT_UNLIKELY(!pGVM)) 2646 return VERR_INVALID_PARAMETER; 2647 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 2648 return VERR_NOT_OWNER; 2638 PGVM pGVM; 2639 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 2640 if (RT_FAILURE(rc)) 2641 return rc; 2649 2642 2650 2643 AssertMsgReturn(cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER); … … 2653 2646 * Take the sempahore and do some more validations. 2654 2647 */ 2655 intrc = RTSemFastMutexRequest(pGMM->Mtx);2648 rc = RTSemFastMutexRequest(pGMM->Mtx); 2656 2649 AssertRC(rc); 2657 2650 … … 2824 2817 * @thread EMT 2825 2818 */ 2826 GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, unsignedidCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)2819 GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3) 2827 2820 { 2828 2821 LogFlow(("GMMR0MapUnmapChunk: pVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n", … … 2834 2827 PGMM pGMM; 2835 2828 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2836 PGVM pGVM = GVMMR0ByVM(pVM); 2837 if (RT_UNLIKELY(!pGVM)) 2838 return VERR_INVALID_PARAMETER; 2839 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 2840 return VERR_NOT_OWNER; 2829 PGVM pGVM; 2830 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 2831 if (RT_FAILURE(rc)) 2832 return rc; 2841 2833 2842 2834 AssertCompile(NIL_GMM_CHUNKID == 0); … … 2862 2854 * it it's limits, so, no problem here. 2863 2855 */ 2864 intrc = RTSemFastMutexRequest(pGMM->Mtx);2856 rc = RTSemFastMutexRequest(pGMM->Mtx); 2865 2857 AssertRC(rc); 2866 2858 … … 2909 2901 * @param pReq The request packet. 2910 2902 */ 2911 GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, unsignedidCpu, PGMMMAPUNMAPCHUNKREQ pReq)2903 GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, VMCPUID idCpu, PGMMMAPUNMAPCHUNKREQ pReq) 2912 2904 { 2913 2905 /* … … 2933 2925 * @param pvR3 Pointer to the chunk size memory block to lock down. 2934 2926 */ 2935 GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, unsignedidCpu, RTR3PTR pvR3)2927 GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3) 2936 2928 { 2937 2929 /* … … 2940 2932 PGMM pGMM; 2941 2933 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 2942 PGVM pGVM = GVMMR0ByVM(pVM); 2943 if (RT_UNLIKELY(!pGVM)) 2944 return VERR_INVALID_PARAMETER; 2945 if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != RTThreadNativeSelf())) 2946 return VERR_NOT_OWNER; 2934 PGVM pGVM; 2935 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM); 2936 if (RT_FAILURE(rc)) 2937 return rc; 2947 2938 2948 2939 AssertPtrReturn(pvR3, VERR_INVALID_POINTER); … … 2959 2950 */ 2960 2951 RTR0MEMOBJ MemObj; 2961 intrc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, NIL_RTR0PROCESS);2952 rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, NIL_RTR0PROCESS); 2962 2953 if (RT_SUCCESS(rc)) 2963 2954 { -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r19435 r19454 58 58 #include <iprt/mem.h> 59 59 #include <iprt/memobj.h> 60 #include <iprt/mp.h> 60 61 61 62 … … 1516 1517 1517 1518 PGVMCPU pCurGVCpu = &pGVM->aCpus[idCpu]; 1518 Assert(idCpu < pGVM->cCpus);1519 1519 Assert(!pCurGVCpu->gvmm.s.u64HaltExpire); 1520 1520 … … 1579 1579 DECLINLINE(int) gvmmR0SchedWakeUpOne(PGVM pGVM, PGVMCPU pGVCpu) 1580 1580 { 1581 int rc;1582 1581 pGVM->gvmm.s.StatsSched.cWakeUpCalls++; 1583 1582 … … 1591 1590 */ 1592 1591 /** @todo we can optimize some of that by means of the pVCpu->enmState now. */ 1592 int rc; 1593 1593 if (pGVCpu->gvmm.s.u64HaltExpire) 1594 1594 { … … 1669 1669 DECLINLINE(int) gvmmR0SchedPokeOne(PGVM pGVM, PVMCPU pVCpu) 1670 1670 { 1671 if (pVCpu->enmState != VMCPUSTATE_STARTED_EXEC) 1671 pGVM->gvmm.s.StatsSched.cPokeCalls++; 1672 1673 RTCPUID idHostCpu = pVCpu->idHostCpu; 1674 if ( idHostCpu == NIL_RTCPUID 1675 || VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_EXEC) 1676 { 1677 pGVM->gvmm.s.StatsSched.cPokeNotBusy++; 1672 1678 return VINF_GVM_NOT_BUSY_IN_GC; 1673 1674 /** @todo do the actual poking, need to get the current cpu id from HWACC or 1675 * somewhere and then call RTMpPokeCpu(). */ 1676 1679 } 1680 1681 RTMpPokeCpu(idHostCpu); 1677 1682 return VINF_SUCCESS; 1678 1683 } … … 1899 1904 pStats->SchedSum.cWakeUpNotHalted += pGVM->gvmm.s.StatsSched.cWakeUpNotHalted; 1900 1905 pStats->SchedSum.cWakeUpWakeUps += pGVM->gvmm.s.StatsSched.cWakeUpWakeUps; 1906 1907 pStats->SchedSum.cPokeCalls += pGVM->gvmm.s.StatsSched.cPokeCalls; 1908 pStats->SchedSum.cPokeNotBusy += pGVM->gvmm.s.StatsSched.cPokeNotBusy; 1901 1909 1902 1910 pStats->SchedSum.cPollCalls += pGVM->gvmm.s.StatsSched.cPollCalls; … … 1970 1978 MAYBE_RESET_FIELD(cWakeUpNotHalted); 1971 1979 MAYBE_RESET_FIELD(cWakeUpWakeUps); 1980 MAYBE_RESET_FIELD(cPokeCalls); 1981 MAYBE_RESET_FIELD(cPokeNotBusy); 1972 1982 MAYBE_RESET_FIELD(cPollCalls); 1973 1983 MAYBE_RESET_FIELD(cPollHalts); … … 2009 2019 MAYBE_RESET_FIELD(cWakeUpNotHalted); 2010 2020 MAYBE_RESET_FIELD(cWakeUpWakeUps); 2021 MAYBE_RESET_FIELD(cPokeCalls); 2022 MAYBE_RESET_FIELD(cPokeNotBusy); 2011 2023 MAYBE_RESET_FIELD(cPollCalls); 2012 2024 MAYBE_RESET_FIELD(cPollHalts); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r19434 r19454 526 526 * @param pVM The VM to operate on. 527 527 * The return code is stored in pVM->vmm.s.iLastGZRc. 528 * @param idCpu VMCPU id.528 * @param idCpu The Virtual CPU ID of the calling EMT. 529 529 * @param enmOperation Which operation to execute. 530 530 * @remarks Assume called with interrupts _enabled_. 531 531 */ 532 VMMR0DECL(void) VMMR0EntryFast(PVM pVM, unsigned idCpu, VMMR0OPERATION enmOperation) 533 { 534 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 535 532 VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation) 533 { 536 534 if (RT_UNLIKELY(idCpu >= pVM->cCPUs)) 537 535 { … … 539 537 return; 540 538 } 539 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 541 540 542 541 switch (enmOperation) … … 552 551 { 553 552 RTCCUINTREG uFlags = ASMIntDisableFlags(); 554 int rc;555 bool fVTxDisabled;553 int rc; 554 bool fVTxDisabled; 556 555 557 556 if (RT_UNLIKELY(pVM->cCPUs > 1)) … … 577 576 } 578 577 578 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId()); 579 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 580 579 581 TMNotifyStartOfExecution(pVCpu); 580 582 rc = pVM->vmm.s.pfnHostToGuestR0(pVM); 581 583 pVM->vmm.s.iLastGZRc = rc; 582 584 TMNotifyEndOfExecution(pVCpu); 585 586 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 587 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 583 588 584 589 /* Re-enable VT-x if previously turned off. */ … … 614 619 { 615 620 int rc; 616 PVMCPU pVCpu = &pVM->aCpus[idCpu];617 621 618 622 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); … … 621 625 RTCCUINTREG uFlags = ASMIntDisableFlags(); 622 626 #endif 627 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 628 623 629 if (!HWACCMR0SuspendPending()) 624 630 { … … 637 643 } 638 644 pVM->vmm.s.iLastGZRc = rc; 645 646 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 639 647 #ifndef RT_OS_WINDOWS /** @todo check other hosts */ 640 648 ASMSetFlags(uFlags); … … 694 702 * @returns VBox status code. 695 703 * @param pVM The VM to operate on. 696 * @param idCpu VMCPU id. 704 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM 705 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't 697 706 * @param enmOperation Which operation to execute. 698 707 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional. … … 702 711 * @remarks Assume called with interrupts _enabled_. 703 712 */ 704 static int vmmR0EntryExWorker(PVM pVM, unsignedidCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)713 static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession) 705 714 { 706 715 /* … … 724 733 } 725 734 726 if (RT_UNLIKELY(idCpu >= pVM->cCPUs ))735 if (RT_UNLIKELY(idCpu >= pVM->cCPUs && idCpu != NIL_VMCPUID)) 727 736 { 728 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (% d vs cCPUs=%d\n", idCpu, pVM->cCPUs);737 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCPUs=%u)\n", idCpu, pVM->cCPUs); 729 738 return VERR_INVALID_PARAMETER; 730 739 } 731 740 } 741 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID)) 742 { 743 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu); 744 return VERR_INVALID_PARAMETER; 745 } 746 732 747 733 748 switch (enmOperation) … … 737 752 */ 738 753 case VMMR0_DO_GVMM_CREATE_VM: 739 if (pVM || u64Arg )754 if (pVM || u64Arg || idCpu != NIL_VMCPUID) 740 755 return VERR_INVALID_PARAMETER; 741 756 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr); … … 817 832 818 833 /* 819 * Switch to GC to execute Hypervisor function.834 * Switch to RC to execute Hypervisor function. 820 835 */ 821 836 case VMMR0_DO_CALL_HYPERVISOR: … … 855 870 */ 856 871 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES: 872 if (idCpu == NIL_VMCPUID) 873 return VERR_INVALID_CPU_ID; 857 874 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]); 858 875 … … 907 924 case VMMR0_DO_GCFGM_QUERY_VALUE: 908 925 { 909 if (pVM || !pReqHdr || u64Arg )926 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 910 927 return VERR_INVALID_PARAMETER; 911 928 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr; … … 935 952 { 936 953 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr; 937 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) )954 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID) 938 955 return VERR_INVALID_PARAMETER; 939 956 if (!g_pIntNet) … … 943 960 944 961 case VMMR0_DO_INTNET_IF_CLOSE: 945 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) )962 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 946 963 return VERR_INVALID_PARAMETER; 947 964 if (!g_pIntNet) … … 950 967 951 968 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER: 952 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETRING3BUFFERREQ)pReqHdr)->pSession, pSession) )969 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETRING3BUFFERREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 953 970 return VERR_INVALID_PARAMETER; 954 971 if (!g_pIntNet) … … 957 974 958 975 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE: 959 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) )976 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 960 977 return VERR_INVALID_PARAMETER; 961 978 if (!g_pIntNet) … … 964 981 965 982 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS: 966 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) )983 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 967 984 return VERR_INVALID_PARAMETER; 968 985 if (!g_pIntNet) … … 971 988 972 989 case VMMR0_DO_INTNET_IF_SET_ACTIVE: 973 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) )990 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 974 991 return VERR_INVALID_PARAMETER; 975 992 if (!g_pIntNet) … … 978 995 979 996 case VMMR0_DO_INTNET_IF_SEND: 980 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) )997 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 981 998 return VERR_INVALID_PARAMETER; 982 999 if (!g_pIntNet) … … 985 1002 986 1003 case VMMR0_DO_INTNET_IF_WAIT: 987 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) )1004 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 988 1005 return VERR_INVALID_PARAMETER; 989 1006 if (!g_pIntNet) … … 1008 1025 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1009 1026 case VMMR0_DO_TEST_SWITCHER3264: 1027 if (idCpu == NIL_VMCPUID) 1028 return VERR_INVALID_CPU_ID; 1010 1029 return HWACCMR0TestSwitcher3264(pVM); 1011 1030 #endif … … 1027 1046 { 1028 1047 PVM pVM; 1029 unsignedidCpu;1048 VMCPUID idCpu; 1030 1049 VMMR0OPERATION enmOperation; 1031 1050 PSUPVMMR0REQHDR pReq; … … 1058 1077 * @returns VBox status code. 1059 1078 * @param pVM The VM to operate on. 1060 * @param idCpu VMCPU id. 1079 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM 1080 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't 1061 1081 * @param enmOperation Which operation to execute. 1062 1082 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional. … … 1065 1085 * @remarks Assume called with interrupts _enabled_. 1066 1086 */ 1067 VMMR0DECL(int) VMMR0EntryEx(PVM pVM, unsignedidCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)1087 VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession) 1068 1088 { 1069 1089 /* … … 1072 1092 */ 1073 1093 if ( VALID_PTR(pVM) 1074 && pVM->pVMR0) 1094 && pVM->pVMR0 1095 && idCpu < pVM->cCPUs) 1075 1096 { 1076 1097 switch (enmOperation) -
trunk/src/VBox/VMM/testcase/tstGlobalConfig.cpp
r19257 r19454 107 107 { 108 108 Req.pSession = pSession; 109 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VCPU 0 */, enmOp, 0, &Req.Hdr);109 rc = SUPCallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, enmOp, 0, &Req.Hdr); 110 110 if (RT_SUCCESS(rc)) 111 111 {
Note:
See TracChangeset
for help on using the changeset viewer.