Changeset 54395 in vbox
- Timestamp:
- Feb 23, 2015 5:34:01 PM (10 years ago)
- Location:
- trunk/src/VBox
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDrvGip.cpp
r54392 r54395 109 109 #define GIP_TSC_DELTA_SYNC_PRESTART_WORKER 5 110 110 /** The TSC-refinement interval in seconds. */ 111 #define GIP_TSC_REFINE_P REIOD_IN_SECS 5111 #define GIP_TSC_REFINE_PERIOD_IN_SECS 5 112 112 /** The TSC-delta threshold for the SUPGIPUSETSCDELTA_PRACTICALLY_ZERO rating */ 113 113 #define GIP_TSC_DELTA_THRESHOLD_PRACTICALLY_ZERO 32 … … 142 142 static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick); 143 143 static void supdrvGipInitCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pCpu, uint64_t u64NanoTS, uint64_t uCpuHz); 144 static int supdrvMeasureInitialTscDeltas(PSUPDRVDEVEXT pDevExt); 145 static int supdrvMeasureTscDeltaOne(PSUPDRVDEVEXT pDevExt, uint32_t idxWorker); 144 146 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 145 147 static int supdrvTscDeltaThreadInit(PSUPDRVDEVEXT pDevExt); 146 148 static void supdrvTscDeltaTerm(PSUPDRVDEVEXT pDevExt); 147 static int supdrvTscDeltaThreadWaitForOnlineCpus(PSUPDRVDEVEXT pDevExt);149 static void supdrvTscDeltaThreadStartMeasurement(PSUPDRVDEVEXT pDevExt); 148 150 #endif 149 151 … … 197 199 * @note Don't you dare change the delta calculation. If you really do, make 198 200 * sure you update all places where it's used (IPRT, SUPLibAll.cpp, 199 * SUPDrv.c, supdrvGipMpEvent , and more).201 * SUPDrv.c, supdrvGipMpEvent(), and more). 200 202 */ 201 203 DECLINLINE(int) supdrvTscDeltaApply(PSUPGLOBALINFOPAGE pGip, uint64_t *puTsc, uint16_t idApic, bool *pfDeltaApplied) … … 769 771 770 772 /** 771 * Used by supdrvInitRefineInvariantTscFreqTimer and supdrvGipInitMeasureTscFreq 772 * to update the TSC frequency related GIP variables. 773 * Used by supdrvInitRefineInvariantTscFreqTimer() and 774 * supdrvGipInitMeasureTscFreq() to update the TSC frequency related GIP 775 * variables. 773 776 * 774 777 * @param pGip The GIP. … … 807 810 * 808 811 * This is started during driver init and fires once 809 * GIP_TSC_REFINE_P REIOD_IN_SECS seconds later.812 * GIP_TSC_REFINE_PERIOD_IN_SECS seconds later. 810 813 * 811 814 * @param pTimer The timer. … … 834 837 * an interrupt handler with higher priority than the clock 835 838 * interrupt, or spinning for ages in timer handlers is frowned 836 * upon, this lookmust be disabled!839 * upon, this code must be disabled! 837 840 * 838 841 * Darwin, FreeBSD, Linux, Solaris, Windows 8.1+: … … 863 866 /* 864 867 * If the above measurement was taken on a different CPU than the one we 865 * started the rprocess on, cTscTicksElapsed will need to be adjusted with868 * started the process on, cTscTicksElapsed will need to be adjusted with 866 869 * the TSC deltas of both the CPUs. 867 870 * … … 896 899 * calculations. 897 900 */ 898 else if (cNsElapsed <= GIP_TSC_REFINE_P REIOD_IN_SECS * 5 * RT_NS_1SEC_64)901 else if (cNsElapsed <= GIP_TSC_REFINE_PERIOD_IN_SECS * 5 * RT_NS_1SEC_64) 899 902 { 900 903 int rc = RTTimerStart(pTimer, RT_NS_1SEC); … … 905 908 { 906 909 SUPR0Printf("vboxdrv: Failed to refine invariant TSC frequency because deltas are unavailable after %u (%u) seconds\n", 907 (uint32_t)(cNsElapsed / RT_NS_1SEC), GIP_TSC_REFINE_P REIOD_IN_SECS);910 (uint32_t)(cNsElapsed / RT_NS_1SEC), GIP_TSC_REFINE_PERIOD_IN_SECS); 908 911 SUPR0Printf("vboxdrv: start: %u, %u, %#llx stop: %u, %u, %#llx\n", 909 912 iStartCpuSet, iStartGipCpu, iStartTscDelta, iStopCpuSet, iStopGipCpu, iStopTscDelta); … … 927 930 * Reschedule the timer if we haven't yet reached the defined refinement period. 928 931 */ 929 if (cNsElapsed < GIP_TSC_REFINE_P REIOD_IN_SECS * RT_NS_1SEC_64)932 if (cNsElapsed < GIP_TSC_REFINE_PERIOD_IN_SECS * RT_NS_1SEC_64) 930 933 { 931 934 int rc = RTTimerStart(pTimer, RT_NS_1SEC); … … 980 983 * first VMs before we're done. On most systems we will be loading the 981 984 * support driver during boot and VMs won't be started for a while yet, 982 * it is really only a problem during development (especiall with985 * it is really only a problem during development (especially with 983 986 * on-demand driver starting on windows). 984 987 * 985 * To avoid wasting time doing a long supdrvGipInitMeasureTscFreq call986 * to calculate the frequenc ey during driver loading, the timer is set988 * To avoid wasting time doing a long supdrvGipInitMeasureTscFreq() call 989 * to calculate the frequency during driver loading, the timer is set 987 990 * to fire after 200 ms the first time. It will then reschedule itself 988 * to fire every second until GIP_TSC_REFINE_P REIOD_IN_SECS has been991 * to fire every second until GIP_TSC_REFINE_PERIOD_IN_SECS has been 989 992 * reached or it notices that there is a user land client with GIP 990 993 * mapped (we want a stable frequency for all VMs). … … 1292 1295 1293 1296 AssertPtrReturnVoid(pGip); 1297 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1294 1298 AssertRelease(idCpu == RTMpCpuId()); 1295 1299 Assert(pGip->cPossibleCpus == RTMpGetCount()); … … 1333 1337 ASMAtomicWriteU16(&pGip->aiCpuFromCpuSetIdx[iCpuSet], i); 1334 1338 1339 /* Add this CPU to this set of CPUs we need to calculate the TSC-delta for. */ 1340 RTCpuSetAddByIndex(&pDevExt->TscDeltaCpuSet, RTMpCpuIdToSetIndex(idCpu)); 1341 1335 1342 /* Update the Mp online/offline counter. */ 1336 1343 ASMAtomicIncU32(&pDevExt->cMpOnOffEvents); 1337 1344 1338 /* Add this CPU to the set of CPUs for which we need to calculate their TSC-deltas. */ 1339 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED) 1340 { 1341 RTCpuSetAddByIndex(&pDevExt->TscDeltaCpuSet, iCpuSet); 1342 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 1343 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock); 1344 if ( pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Listening 1345 || pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Measuring) 1346 { 1347 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_WaitAndMeasure; 1348 } 1349 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock); 1350 #endif 1351 } 1352 1353 /* commit it */ 1345 /* Commit it. */ 1354 1346 ASMAtomicWriteSize(&pGip->aCPUs[i].enmState, SUPGIPCPUSTATE_ONLINE); 1355 1347 1356 1348 RTSpinlockRelease(pDevExt->hGipSpinlock); 1349 } 1350 1351 1352 /** 1353 * RTMpOnSpecific callback wrapper for supdrvGipMpEventOnlineOrInitOnCpu(). 1354 * 1355 * @param idCpu The CPU ID we are running on. 1356 * @param pvUser1 Opaque pointer to the device instance data. 1357 * @param pvUser2 Not used. 1358 */ 1359 static DECLCALLBACK(void) supdrvGipMpEventOnlineCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) 1360 { 1361 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser1; 1362 NOREF(pvUser2); 1363 supdrvGipMpEventOnlineOrInitOnCpu(pDevExt, idCpu); 1357 1364 } 1358 1365 … … 1396 1403 } 1397 1404 1398 /* commit it*/1405 /* Commit it. */ 1399 1406 ASMAtomicWriteSize(&pGip->aCPUs[i].enmState, SUPGIPCPUSTATE_OFFLINE); 1400 1407 … … 1412 1419 * @param idCpu The cpu it applies to. 1413 1420 * @param pvUser Pointer to the device extension. 1414 *1415 * @remarks This function -must- fire on the newly online'd CPU for the1416 * RTMPEVENT_ONLINE case and can fire on any CPU for the1417 * RTMPEVENT_OFFLINE case.1418 1421 */ 1419 1422 static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser) … … 1422 1425 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 1423 1426 1424 AssertRelease(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1425 1426 /*1427 * Update the GIP CPU data.1428 */1429 1427 if (pGip) 1430 1428 { 1429 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1431 1430 switch (enmEvent) 1432 1431 { 1433 1432 case RTMPEVENT_ONLINE: 1434 AssertRelease(idCpu == RTMpCpuId()); 1435 supdrvGipMpEventOnlineOrInitOnCpu(pDevExt, idCpu); 1433 { 1434 RTThreadPreemptDisable(&PreemptState); 1435 if (idCpu == RTMpCpuId()) 1436 { 1437 supdrvGipMpEventOnlineOrInitOnCpu(pDevExt, idCpu); 1438 RTThreadPreemptRestore(&PreemptState); 1439 } 1440 else 1441 { 1442 RTThreadPreemptRestore(&PreemptState); 1443 RTMpOnSpecific(idCpu, supdrvGipMpEventOnlineCallback, pDevExt, NULL /* pvUser2 */); 1444 } 1445 1446 /* 1447 * Recompute TSC-delta for the newly online'd CPU. 1448 */ 1449 if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED) 1450 { 1451 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 1452 supdrvTscDeltaThreadStartMeasurement(pDevExt); 1453 #else 1454 uint32_t iCpu = supdrvGipFindOrAllocCpuIndexForCpuId(pGip, idCpu); 1455 supdrvMeasureTscDeltaOne(pDevExt, iCpu); 1456 #endif 1457 } 1436 1458 break; 1459 } 1460 1437 1461 case RTMPEVENT_OFFLINE: 1438 1462 supdrvGipMpEventOffline(pDevExt, idCpu); … … 1689 1713 ASMAtomicWriteU16(&pCpu->idApic, UINT16_MAX); 1690 1714 1691 /* 1715 /* 1692 1716 * The first time we're called, we don't have a CPU frequency handy, 1693 1717 * so pretend it's a 4 GHz CPU. On CPUs that are online, we'll get … … 1873 1897 * If we're in any of the other two modes, neither which require MP init, 1874 1898 * notifications or deltas for the job, do the full measurement now so 1875 * that supdrvGipInitOnCpu can populate the TSC interval and history1899 * that supdrvGipInitOnCpu() can populate the TSC interval and history 1876 1900 * array with more reasonable values. 1877 1901 */ 1878 1902 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC) 1879 1903 { 1880 rc = supdrvGipInitMeasureTscFreq(pDevExt, pGip, true /* fRough*/); /* cannot fail */1904 rc = supdrvGipInitMeasureTscFreq(pDevExt, pGip, true /* fRough */); /* cannot fail */ 1881 1905 supdrvGipInitStartTimerForRefiningInvariantTscFreq(pDevExt, pGip); 1882 1906 } 1883 1907 else 1884 rc = supdrvGipInitMeasureTscFreq(pDevExt, pGip, false /* fRough*/);1908 rc = supdrvGipInitMeasureTscFreq(pDevExt, pGip, false /* fRough */); 1885 1909 if (RT_SUCCESS(rc)) 1886 1910 { … … 1910 1934 { 1911 1935 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 1912 if (pDevExt->hTscDeltaThread != NIL_RTTHREAD) 1913 RTThreadUserSignal(pDevExt->hTscDeltaThread); 1936 supdrvTscDeltaThreadStartMeasurement(pDevExt); 1914 1937 #else 1915 1938 uint16_t iCpu; … … 2432 2455 2433 2456 ASMSetFlags(uFlags); 2434 2435 #ifdef SUPDRV_USE_TSC_DELTA_THREAD2436 if ( pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED2437 && !RTCpuSetIsEmpty(&pDevExt->TscDeltaCpuSet))2438 {2439 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock);2440 if ( pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Listening2441 || pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Measuring)2442 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_WaitAndMeasure;2443 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock);2444 /** @todo Do the actual poking using -- RTThreadUserSignal() */2445 }2446 #endif2447 2457 } 2448 2458 … … 2577 2587 2578 2588 /** 2579 * Argument package/state passed by supdrvMeasureTscDeltaOne to the RTMpOn2589 * Argument package/state passed by supdrvMeasureTscDeltaOne() to the RTMpOn 2580 2590 * callback worker. 2581 2591 */ … … 3157 3167 * The idea here is that we have the two CPUs execute the exact same code 3158 3168 * collecting a largish set of TSC samples. The code has one data dependency on 3159 * the other CPU w hich intention it is to synchronize the execution as well as3160 * help cross references the two sets of TSC samples (the sequence numbers).3169 * the other CPU with the intention to synchronize the execution as well 3170 * as help cross references the two sets of TSC samples (the sequence numbers). 3161 3171 * 3162 3172 * The @a fLag parameter is used to modify the execution a tiny bit on one or … … 3811 3821 */ 3812 3822 PSUPDRVGIPTSCDELTARGS pArgs = (PSUPDRVGIPTSCDELTARGS)RTMemAllocZ(sizeof(*pArgs)); 3813 if ( pArgs)3823 if (RT_LIKELY(pArgs)) 3814 3824 { 3815 3825 pArgs->pWorker = pGipCpuWorker; … … 4106 4116 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock); 4107 4117 pDevExt->cMsTscDeltaTimeout = 1; 4108 RTThreadSleep(1 0);4118 RTThreadSleep(1); 4109 4119 /* fall thru */ 4110 4120 } … … 4256 4266 4257 4267 /** 4258 * Waits for TSC-delta measurements to be completed for all online CPUs. 4259 * 4260 * @returns VBox status code. 4261 * @param pDevExt Pointer to the device instance data. 4262 */ 4263 static int supdrvTscDeltaThreadWaitForOnlineCpus(PSUPDRVDEVEXT pDevExt) 4264 { 4265 int cTriesLeft = 5; 4266 int cMsTotalWait; 4267 int cMsWaited = 0; 4268 int cMsWaitGranularity = 1; 4269 4270 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 4271 AssertReturn(pGip, VERR_INVALID_POINTER); 4272 4273 if (RT_UNLIKELY(pDevExt->hTscDeltaThread == NIL_RTTHREAD)) 4274 return VERR_THREAD_NOT_WAITABLE; 4275 4276 cMsTotalWait = RT_MIN(pGip->cPresentCpus + 10, 200); 4277 while (cTriesLeft-- > 0) 4278 { 4279 if (RTCpuSetIsEqual(&pDevExt->TscDeltaObtainedCpuSet, &pGip->OnlineCpuSet)) 4280 return VINF_SUCCESS; 4281 RTThreadSleep(cMsWaitGranularity); 4282 cMsWaited += cMsWaitGranularity; 4283 if (cMsWaited >= cMsTotalWait) 4284 break; 4285 } 4286 4287 return VERR_TIMEOUT; 4268 * Signals the TSC-delta thread to start measuring TSC-deltas. 4269 * 4270 * @param pDevExt Pointer to the device instance data. 4271 */ 4272 static void supdrvTscDeltaThreadStartMeasurement(PSUPDRVDEVEXT pDevExt) 4273 { 4274 if (RT_LIKELY(pDevExt->hTscDeltaThread != NIL_RTTHREAD)) 4275 { 4276 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock); 4277 if ( pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Listening 4278 || pDevExt->enmTscDeltaThreadState == kTscDeltaThreadState_Measuring) 4279 { 4280 pDevExt->enmTscDeltaThreadState = kTscDeltaThreadState_WaitAndMeasure; 4281 } 4282 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock); 4283 RTThreadUserSignal(pDevExt->hTscDeltaThread); 4284 } 4288 4285 } 4289 4286 … … 4457 4454 return VERR_INVALID_FLAGS; 4458 4455 4456 /* 4457 * The request is a noop if the TSC delta isn't being used. 4458 */ 4459 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ZERO_CLAIMED) 4460 return VINF_SUCCESS; 4461 4459 4462 if (cTries == 0) 4460 4463 cTries = 12; … … 4466 4469 else if (cMsWaitRetry > 1000) 4467 4470 cMsWaitRetry = 1000; 4468 4469 /*4470 * The request is a noop if the TSC delta isn't being used.4471 */4472 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ZERO_CLAIMED)4473 return VINF_SUCCESS;4474 4471 4475 4472 #ifdef SUPDRV_USE_TSC_DELTA_THREAD … … 4497 4494 rc = VINF_SUCCESS; 4498 4495 } 4499 else 4496 else if (pDevExt->enmTscDeltaThreadState != kTscDeltaThreadState_WaitAndMeasure) 4500 4497 rc = VERR_THREAD_IS_DEAD; 4501 4498 RTSpinlockRelease(pDevExt->hTscDeltaSpinlock); … … 4613 4610 4614 4611 return SUPR0TscDeltaMeasureBySetIndex(pSession, iCpuSet, fFlags, cMsWaitRetry, 4615 cTries == 0 ? 5 *RT_MS_1SEC : cMsWaitRetry * cTries /*cMsWaitThread*/,4612 cTries == 0 ? 5 * RT_MS_1SEC : cMsWaitRetry * cTries /*cMsWaitThread*/, 4616 4613 cTries); 4617 4614 } … … 4691 4688 rc = supdrvMeasureTscDeltaOne(pDevExt, iGipCpu); 4692 4689 Assert(pGip->aCPUs[iGipCpu].i64TSCDelta != INT64_MAX || RT_FAILURE_NP(rc)); 4693 /** @todo should probably delay on failure... dpc watchdogs 4690 /** @todo should probably delay on failure... dpc watchdogs */ 4694 4691 } 4695 4692 else -
trunk/src/VBox/Runtime/r0drv/linux/mpnotification-r0drv-linux.c
r44529 r54395 5 5 6 6 /* 7 * Copyright (C) 2008-201 1Oracle Corporation7 * Copyright (C) 2008-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 32 32 #include "internal/iprt.h" 33 33 34 #include <iprt/mp.h>35 34 #include <iprt/asm-amd64-x86.h> 36 35 #include <iprt/err.h> … … 70 69 71 70 /** 72 * Notification wrapper that updates CPU states and invokes our notification73 * callbacks.74 *75 * @param idCpu The CPU Id.76 * @param pvUser1 Pointer to the notifier_block (unused).77 * @param pvUser2 The notification event.78 * @remarks This can be invoked in interrupt context.79 */80 static DECLCALLBACK(void) rtMpNotificationLinuxOnCurrentCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)81 {82 unsigned long ulNativeEvent = *(unsigned long *)pvUser2;83 NOREF(pvUser1);84 85 AssertRelease(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));86 AssertReleaseMsg(idCpu == RTMpCpuId(), /* ASSUMES iCpu == RTCPUID */87 ("idCpu=%u RTMpCpuId=%d ApicId=%d\n", idCpu, RTMpCpuId(), ASMGetApicId() ));88 89 switch (ulNativeEvent)90 {91 # ifdef CPU_DOWN_FAILED92 case CPU_DOWN_FAILED:93 # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)94 case CPU_DOWN_FAILED_FROZEN:95 # endif96 # endif97 case CPU_ONLINE:98 # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)99 case CPU_ONLINE_FROZEN:100 # endif101 rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);102 break;103 104 # ifdef CPU_DOWN_PREPARE105 case CPU_DOWN_PREPARE:106 # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)107 case CPU_DOWN_PREPARE_FROZEN:108 # endif109 rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);110 break;111 # endif112 }113 }114 115 116 /**117 71 * The native callback. 118 72 * … … 121 75 * @param ulNativeEvent The native event. 122 76 * @param pvCpu The cpu id cast into a pointer value. 77 * 123 78 * @remarks This can fire with preemption enabled and on any CPU. 124 79 */ 125 80 static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu) 126 81 { 127 int rc;128 82 bool fProcessEvent = false; 129 83 RTCPUID idCpu = (uintptr_t)pvCpu; … … 188 142 return NOTIFY_DONE; 189 143 190 /* 191 * Reschedule the callbacks to fire on the specific CPU with preemption disabled. 192 */ 193 rc = RTMpOnSpecific(idCpu, rtMpNotificationLinuxOnCurrentCpu, pNotifierBlock, &ulNativeEvent); 194 Assert(RT_SUCCESS(rc)); NOREF(rc); 144 switch (ulNativeEvent) 145 { 146 # ifdef CPU_DOWN_FAILED 147 case CPU_DOWN_FAILED: 148 # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) 149 case CPU_DOWN_FAILED_FROZEN: 150 # endif 151 # endif 152 case CPU_ONLINE: 153 # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) 154 case CPU_ONLINE_FROZEN: 155 # endif 156 rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu); 157 break; 158 159 # ifdef CPU_DOWN_PREPARE 160 case CPU_DOWN_PREPARE: 161 # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) 162 case CPU_DOWN_PREPARE_FROZEN: 163 # endif 164 rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu); 165 break; 166 # endif 167 } 168 195 169 return NOTIFY_DONE; 196 170 } -
trunk/src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c
r54294 r54395 5 5 6 6 /* 7 * Copyright (C) 2008-201 2Oracle Corporation7 * Copyright (C) 2008-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 64 64 65 65 /** 66 * PFNRTMPWORKER worker for executing Mp events on the target CPU.67 *68 * @param idCpu The current CPU Id.69 * @param pvArg Opaque pointer to event type (online/offline).70 * @param pvIgnored1 Ignored.71 */72 static void rtMpNotificationSolOnCurrentCpu(RTCPUID idCpu, void *pvArg, void *pvIgnored1)73 {74 NOREF(pvIgnored1);75 NOREF(idCpu);76 77 PRTMPARGS pArgs = (PRTMPARGS)pvArg;78 AssertRelease(pArgs && pArgs->idCpu == RTMpCpuId());79 Assert(pArgs->pvUser1);80 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));81 82 RTMPEVENT enmMpEvent = *(RTMPEVENT *)pArgs->pvUser1;83 rtMpNotificationDoCallbacks(enmMpEvent, pArgs->idCpu);84 }85 86 87 /**88 66 * Solaris callback function for Mp event notification. 89 67 * 68 * @returns Solaris error code. 90 69 * @param CpuState The current event/state of the CPU. 91 * @param iCpu Which CPU is this event for e.70 * @param iCpu Which CPU is this event for. 92 71 * @param pvArg Ignored. 93 72 * 94 73 * @remarks This function assumes index == RTCPUID. 95 * @returns Solaris error code. 74 * We may -not- be firing on the CPU going online/offline and called 75 * with preemption enabled. 96 76 */ 97 77 static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg) 98 78 { 99 79 RTMPEVENT enmMpEvent; 100 101 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;102 RTThreadPreemptDisable(&PreemptState);103 80 104 81 /* … … 119 96 return 0; 120 97 121 /* 122 * Since we don't absolutely need to do CPU bound code in any of the CPU offline 123 * notification hooks, run it on the current CPU. Scheduling a callback to execute 124 * on the CPU going offline at this point is too late and will not work reliably. 125 */ 126 bool fRunningOnTargetCpu = iCpu == RTMpCpuId(); 127 if ( fRunningOnTargetCpu == true 128 || enmMpEvent == RTMPEVENT_OFFLINE) 129 { 130 rtMpNotificationDoCallbacks(enmMpEvent, iCpu); 131 } 132 else 133 { 134 /** @todo We should probably be using thread_affinity_set() here, see 135 * cpu_online() code. */ 136 /* 137 * We're not on the target CPU, schedule (synchronous) the event notification callback 138 * to run on the target CPU i.e. the CPU that was online'd. 139 */ 140 RTMPARGS Args; 141 RT_ZERO(Args); 142 Args.pvUser1 = &enmMpEvent; 143 Args.pvUser2 = NULL; 144 Args.idCpu = iCpu; 145 RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */); 146 } 147 148 RTThreadPreemptRestore(&PreemptState); 149 98 rtMpNotificationDoCallbacks(enmMpEvent, iCpu); 150 99 NOREF(pvArg); 151 100 return 0; -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r54332 r54395 994 994 * 995 995 * @returns VBox status code. 996 * @param idCpu The identifier for the CPU th efunction is called on.996 * @param idCpu The identifier for the CPU this function is called on. 997 997 * 998 998 * @remarks Must be called with preemption disabled. … … 1007 1007 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 1008 1008 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ); 1009 AssertRelease(idCpu == RTMpCpuId()); 1009 1010 1010 1011 if (pCpu->hMemObj == NIL_RTR0MEMOBJ) … … 1012 1013 1013 1014 int rc; 1014 if ( pCpu->fConfigured 1015 && idCpu == RTMpCpuId()) /* We may not be firing on the CPU being disabled/going offline. */ 1015 if (pCpu->fConfigured) 1016 1016 { 1017 1017 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); … … 1043 1043 AssertReturnVoid(g_HvmR0.fGlobalInit); 1044 1044 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu)); 1045 } 1046 1047 1048 /** 1049 * Worker function passed to RTMpOnSpecific() that is to be called on the target 1050 * CPU. 1051 * 1052 * @param idCpu The identifier for the CPU the function is called on. 1053 * @param pvUser1 Null, not used. 1054 * @param pvUser2 Null, not used. 1055 */ 1056 static DECLCALLBACK(void) hmR0DisableCpuOnSpecificCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) 1057 { 1058 NOREF(pvUser1); 1059 NOREF(pvUser2); 1060 hmR0DisableCpu(idCpu); 1045 1061 } 1046 1062 … … 1061 1077 * CPU comes online, the initialization is done lazily in HMR0Enter(). 1062 1078 */ 1063 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1064 1079 switch (enmEvent) 1065 1080 { 1066 1081 case RTMPEVENT_OFFLINE: 1067 1082 { 1068 int rc = hmR0DisableCpu(idCpu); 1069 AssertRC(rc); 1083 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1084 RTThreadPreemptDisable(&PreemptState); 1085 if (idCpu == RTMpCpuId()) 1086 { 1087 int rc = hmR0DisableCpu(idCpu); 1088 AssertRC(rc); 1089 RTThreadPreemptRestore(&PreemptState); 1090 } 1091 else 1092 { 1093 RTThreadPreemptRestore(&PreemptState); 1094 RTMpOnSpecific(idCpu, hmR0DisableCpuOnSpecificCallback, NULL /* pvUser1 */, NULL /* pvUser2 */); 1095 } 1070 1096 break; 1071 1097 }
Note:
See TracChangeset
for help on using the changeset viewer.