Changeset 19529 in vbox for trunk/src/VBox
- Timestamp:
- May 8, 2009 2:37:30 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMM.cpp
r19478 r19529 138 138 else 139 139 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc); 140 141 /* 142 * Initialize the VMM sync critical section. 143 */ 144 rc = RTCritSectInit(&pVM->vmm.s.CritSectSync); 145 AssertRCReturn(rc, rc); 140 146 141 147 /* GC switchers are enabled by default. Turned off by HWACCM. */ … … 612 618 } 613 619 620 RTCritSectDelete(&pVM->vmm.s.CritSectSync); 621 614 622 #ifdef VBOX_STRICT_VMM_STACK 615 623 /* … … 1148 1156 1149 1157 /** 1150 * OnVCPU worker for VMMSendSipi.1158 * VCPU worker for VMMSendSipi. 1151 1159 * 1152 1160 * @param pVM The VM to operate on. … … 1215 1223 AssertRC(rc); 1216 1224 VMR3ReqFree(pReq); 1225 } 1226 1227 1228 /** 1229 * VCPU worker for VMMR3SynchronizeAllVCpus. 1230 * 1231 * @param pVM The VM to operate on. 1232 * @param idCpu Virtual CPU to perform SIPI on 1233 * @param uVector SIPI vector 1234 */ 1235 DECLCALLBACK(int) vmmR3SyncVCpu(PVM pVM) 1236 { 1237 /* Block until the job in the caller has finished. */ 1238 RTCritSectEnter(&pVM->vmm.s.CritSectSync); 1239 RTCritSectLeave(&pVM->vmm.s.CritSectSync); 1240 return VINF_SUCCESS; 1241 } 1242 1243 1244 /** 1245 * Atomically execute a callback handler 1246 * Note: This is very expensive; avoid using it frequently! 1247 * 1248 * @param pVM The VM to operate on. 1249 * @param pfnHandler Callback handler 1250 * @param pvUser User specified parameter 1251 */ 1252 VMMR3DECL(int) VMMR3AtomicExecuteHandler(PVM pVM, PFNATOMICHANDLER pfnHandler, void *pvUser) 1253 { 1254 int rc; 1255 PVMCPU pVCpu = VMMGetCpu(pVM); 1256 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT); 1257 1258 /* Shortcut for the uniprocessor case. */ 1259 if (pVM->cCPUs == 1) 1260 return pfnHandler(pVM, pvUser); 1261 1262 RTCritSectEnter(&pVM->vmm.s.CritSectSync); 1263 for (VMCPUID idCpu = 0; idCpu < pVM->cCPUs; idCpu++) 1264 { 1265 if (idCpu != pVCpu->idCpu) 1266 { 1267 rc = VMR3ReqCallU(pVM->pUVM, idCpu, NULL, 0, VMREQFLAGS_NO_WAIT, 1268 (PFNRT)vmmR3SyncVCpu, 1, pVM); 1269 AssertRC(rc); 1270 } 1271 } 1272 /* Wait until all other VCPUs are waiting for us. */ 1273 while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (pVM->cCPUs - 1)) 1274 RTThreadSleep(1); 1275 1276 rc = pfnHandler(pVM, pvUser); 1277 RTCritSectLeave(&pVM->vmm.s.CritSectSync); 1278 return rc; 1217 1279 } 1218 1280 -
trunk/src/VBox/VMM/VMMInternal.h
r19462 r19529 247 247 /** The timestamp of the previous yield. (nano) */ 248 248 uint64_t u64LastYield; 249 250 /** Critical section. 251 * Use for synchronizing all VCPUs 252 */ 253 RTCRITSECT CritSectSync; 249 254 250 255 /** Buffer for storing the standard assertion message for a ring-0 assertion.
Note:
See TracChangeset
for help on using the changeset viewer.