Changeset 93628 in vbox for trunk/src/VBox
- Timestamp:
- Feb 6, 2022 11:44:05 PM (3 years ago)
- Location:
- trunk/src/VBox
- Files:
-
- 1 deleted
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DrvNetShaper.cpp
r93115 r93628 121 121 */ 122 122 PDMBOTHCBDECL(int) drvNetShaperUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin, 123 123 PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf) 124 124 { 125 125 PDRVNETSHAPER pThis = RT_FROM_MEMBER(pInterface, DRVNETSHAPER, CTX_SUFF(INetworkUp)); 126 if ( RT_UNLIKELY(!pThis->CTX_SUFF(pIBelowNet)))127 return VERR_NET_DOWN;128 //LogFlow(("drvNetShaperUp_AllocBuf: cb=%d\n", cbMin));129 STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesRequested, cbMin);130 STAM_REL_COUNTER_INC(&pThis->StatXmitPktsRequested);131 #if defined(IN_RING3) || defined(IN_RING0)132 if (!PDMDrvHlpNetShaperAllocateBandwidth(pThis->CTX_SUFF(pDrvIns), &pThis->Filter, cbMin))133 {134 STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin);135 STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied);136 return VERR_TRY_AGAIN;137 }138 #endif 139 STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesGranted, cbMin);140 STAM_REL_COUNTER_INC(&pThis->StatXmitPktsGranted);141 //LogFlow(("drvNetShaperUp_AllocBuf: got cb=%d\n", cbMin));142 return pThis->CTX_SUFF(pIBelowNet)->pfnAllocBuf(pThis->CTX_SUFF(pIBelowNet), cbMin, pGso, ppSgBuf);126 if (pThis->CTX_SUFF(pIBelowNet)) 127 { 128 //LogFlow(("drvNetShaperUp_AllocBuf: cb=%d\n", cbMin)); 129 STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesRequested, cbMin); 130 STAM_REL_COUNTER_INC(&pThis->StatXmitPktsRequested); 131 if (!PDMDrvHlpNetShaperAllocateBandwidth(pThis->CTX_SUFF(pDrvIns), &pThis->Filter, cbMin)) 132 { 133 STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin); 134 STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied); 135 return VERR_TRY_AGAIN; 136 } 137 STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesGranted, cbMin); 138 STAM_REL_COUNTER_INC(&pThis->StatXmitPktsGranted); 139 //LogFlow(("drvNetShaperUp_AllocBuf: got cb=%d\n", cbMin)); 140 return pThis->CTX_SUFF(pIBelowNet)->pfnAllocBuf(pThis->CTX_SUFF(pIBelowNet), cbMin, pGso, ppSgBuf); 141 } 142 return VERR_NET_DOWN; 143 143 } 144 144 -
trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
r93561 r93628 1464 1464 * Bandwidth groups. 1465 1465 */ 1466 ComPtr<IBandwidthControl> bwCtrl; 1467 hrc = pMachine->COMGETTER(BandwidthControl)(bwCtrl.asOutParam()); H(); 1468 1469 com::SafeIfaceArray<IBandwidthGroup> bwGroups; 1470 hrc = bwCtrl->GetAllBandwidthGroups(ComSafeArrayAsOutParam(bwGroups)); H(); 1471 1466 1472 PCFGMNODE pAc; 1473 InsertConfigNode(pPDM, "AsyncCompletion", &pAc); 1467 1474 PCFGMNODE pAcFile; 1475 InsertConfigNode(pAc, "File", &pAcFile); 1468 1476 PCFGMNODE pAcFileBwGroups; 1469 ComPtr<IBandwidthControl> bwCtrl;1470 com::SafeIfaceArray<IBandwidthGroup> bwGroups;1471 1472 hrc = pMachine->COMGETTER(BandwidthControl)(bwCtrl.asOutParam()); H();1473 1474 hrc = bwCtrl->GetAllBandwidthGroups(ComSafeArrayAsOutParam(bwGroups)); H();1475 1476 InsertConfigNode(pPDM, "AsyncCompletion", &pAc);1477 InsertConfigNode(pAc, "File", &pAcFile);1478 1477 InsertConfigNode(pAcFile, "BwGroups", &pAcFileBwGroups); 1479 1478 #ifdef VBOX_WITH_NETSHAPER 1480 1479 PCFGMNODE pNetworkShaper; 1480 InsertConfigNode(pPDM, "NetworkShaper", &pNetworkShaper); 1481 1481 PCFGMNODE pNetworkBwGroups; 1482 1483 InsertConfigNode(pPDM, "NetworkShaper", &pNetworkShaper);1484 1482 InsertConfigNode(pNetworkShaper, "BwGroups", &pNetworkBwGroups); 1485 1483 #endif /* VBOX_WITH_NETSHAPER */ … … 1488 1486 { 1489 1487 Bstr strName; 1490 LONG64 cMaxBytesPerSec;1491 BandwidthGroupType_T enmType;1492 1493 1488 hrc = bwGroups[i]->COMGETTER(Name)(strName.asOutParam()); H(); 1494 hrc = bwGroups[i]->COMGETTER(Type)(&enmType); H();1495 hrc = bwGroups[i]->COMGETTER(MaxBytesPerSec)(&cMaxBytesPerSec); H();1496 1497 1489 if (strName.isEmpty()) 1498 1490 return pVMM->pfnVMR3SetError(pUVM, VERR_CFGM_NO_NODE, RT_SRC_POS, N_("No bandwidth group name specified")); 1491 BandwidthGroupType_T enmType = BandwidthGroupType_Null; 1492 hrc = bwGroups[i]->COMGETTER(Type)(&enmType); H(); 1493 LONG64 cMaxBytesPerSec = 0; 1494 hrc = bwGroups[i]->COMGETTER(MaxBytesPerSec)(&cMaxBytesPerSec); H(); 1499 1495 1500 1496 if (enmType == BandwidthGroupType_Disk) -
trunk/src/VBox/Main/src-server/BandwidthControlImpl.cpp
r93115 r93628 28 28 #include <iprt/cpp/utils.h> 29 29 #include <VBox/com/array.h> 30 #include <VBox/param.h> 30 31 #include <algorithm> 31 32 … … 392 393 LONG64 aMaxBytesPerSec) 393 394 { 395 /* 396 * Validate input. 397 */ 394 398 if (aMaxBytesPerSec < 0) 395 return setError(E_INVALIDARG, 396 tr("Bandwidth group limit cannot be negative")); 397 398 /* the machine needs to be mutable */ 399 return setError(E_INVALIDARG, tr("Bandwidth group limit cannot be negative")); 400 switch (aType) 401 { 402 case BandwidthGroupType_Null: /*??*/ 403 case BandwidthGroupType_Disk: 404 break; 405 case BandwidthGroupType_Network: 406 if (aName.length() > PDM_NET_SHAPER_MAX_NAME_LEN) 407 return setError(E_INVALIDARG, tr("Bandwidth name is too long: %zu, max %u"), 408 aName.length(), PDM_NET_SHAPER_MAX_NAME_LEN); 409 break; 410 default: 411 AssertFailedReturn(setError(E_INVALIDARG, tr("Invalid group type: %d"), aType)); 412 } 413 if (aName.isEmpty()) 414 return setError(E_INVALIDARG, tr("Bandwidth group name must not be empty")); /* ConsoleImpl2.cpp fails then */ 415 416 /* 417 * The machine needs to be mutable: 418 */ 399 419 AutoMutableOrSavedStateDependency adep(m->pParent); 400 if (FAILED(adep.rc())) return adep.rc(); 401 402 AutoWriteLock alock(this COMMA_LOCKVAL_SRC_POS); 403 404 /* try to find one with the name first. */ 405 ComObjPtr<BandwidthGroup> group; 406 HRESULT rc = i_getBandwidthGroupByName(aName, group, false /* aSetError */); 407 408 if (SUCCEEDED(rc)) 409 return setError(VBOX_E_OBJECT_IN_USE, 410 tr("Bandwidth group named '%s' already exists"), 411 aName.c_str()); 412 413 group.createObject(); 414 415 rc = group->init(this, aName, aType, aMaxBytesPerSec); 416 if (FAILED(rc)) return rc; 417 418 m->pParent->i_setModified(Machine::IsModified_BandwidthControl); 419 m->llBandwidthGroups.backup(); 420 m->llBandwidthGroups->push_back(group); 421 422 return S_OK; 420 HRESULT hrc = adep.rc(); 421 if (SUCCEEDED(hrc)) 422 { 423 AutoWriteLock alock(this COMMA_LOCKVAL_SRC_POS); 424 425 /* 426 * Check that the group doesn't already exist: 427 */ 428 ComObjPtr<BandwidthGroup> group; 429 hrc = i_getBandwidthGroupByName(aName, group, false /* aSetError */); 430 if (FAILED(hrc)) 431 { 432 /* 433 * There is an upper limit of the number of network groups imposed by PDM. 434 */ 435 size_t cNetworkGroups = 0; 436 if (aType == BandwidthGroupType_Network) 437 for (BandwidthGroupList::const_iterator it = m->llBandwidthGroups->begin(); 438 it != m->llBandwidthGroups->end(); 439 ++it) 440 if ((*it)->i_getType() == BandwidthGroupType_Network) 441 cNetworkGroups++; 442 if (cNetworkGroups < PDM_NET_SHAPER_MAX_GROUPS) 443 { 444 /* 445 * Create the new group. 446 */ 447 hrc = group.createObject(); 448 if (SUCCEEDED(hrc)) 449 { 450 hrc = group->init(this, aName, aType, aMaxBytesPerSec); 451 if (SUCCEEDED(hrc)) 452 { 453 /* 454 * Add it to the settings. 455 */ 456 m->pParent->i_setModified(Machine::IsModified_BandwidthControl); 457 m->llBandwidthGroups.backup(); 458 m->llBandwidthGroups->push_back(group); 459 hrc = S_OK; 460 } 461 } 462 } 463 else 464 hrc = setError(E_FAIL, tr("Too many network bandwidth groups (max %u)"), PDM_NET_SHAPER_MAX_GROUPS); 465 } 466 else 467 hrc = setError(VBOX_E_OBJECT_IN_USE, tr("Bandwidth group named '%s' already exists"), aName.c_str()); 468 } 469 return hrc; 423 470 } 424 471 -
trunk/src/VBox/VMM/VMMAll/PDMAllNetShaper.cpp
r93115 r93628 21 21 *********************************************************************************************************************************/ 22 22 #define LOG_GROUP LOG_GROUP_NET_SHAPER 23 #include <VBox/vmm/pdm.h> 23 #include <VBox/vmm/pdmnetshaper.h> 24 #include "PDMInternal.h" 25 #include <VBox/vmm/vmcc.h> 26 24 27 #include <VBox/log.h> 25 28 #include <iprt/time.h> 26 27 #include <VBox/vmm/pdmnetshaper.h>28 #include "PDMNetShaperInternal.h"29 29 30 30 … … 40 40 { 41 41 AssertPtrReturn(pFilter, true); 42 if (!RT_VALID_PTR(pFilter->CTX_SUFF(pBwGroup)))43 return true;44 42 45 PPDMNSBWGROUP pBwGroup = ASMAtomicReadPtrT(&pFilter->CTX_SUFF(pBwGroup), PPDMNSBWGROUP); 46 int rc = PDMCritSectEnter(pVM, &pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc); 47 if (RT_SUCCESS(rc)) 48 { /* likely */ } 49 else 43 /* 44 * If we haven't got a valid bandwidth group, we always allow the traffic. 45 */ 46 bool fAllowed = true; 47 uint32_t iGroup = ASMAtomicUoReadU32(&pFilter->iGroup); 48 if (iGroup != 0) 50 49 { 51 if (rc == VERR_SEM_BUSY) 52 return true; 53 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pBwGroup->Lock, rc); 54 return false; 55 } 50 if (iGroup <= RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups))) 51 { 52 PPDMNSBWGROUP pGroup = &pVM->pdm.s.aNsGroups[iGroup - 1]; 53 int rc = PDMCritSectEnter(pVM, &pGroup->Lock, VINF_TRY_AGAIN); 54 if (rc == VINF_SUCCESS) 55 { 56 uint64_t const cbPerSecMax = pGroup->cbPerSecMax; 57 if (cbPerSecMax > 0) 58 { 59 /* 60 * Re-fill the bucket first 61 */ 62 uint64_t const tsNow = RTTimeSystemNanoTS(); 63 uint64_t const cNsDelta = tsNow - pGroup->tsUpdatedLast; 64 /** @todo r=bird: there might be an overflow issue here if the gap 65 * between two transfers is too large. */ 66 uint32_t cTokensAdded = cNsDelta * cbPerSecMax / RT_NS_1SEC; 56 67 57 bool fAllowed = true; 58 if (pBwGroup->cbPerSecMax) 59 { 60 /* Re-fill the bucket first */ 61 uint64_t tsNow = RTTimeSystemNanoTS(); 62 uint32_t uTokensAdded = (tsNow - pBwGroup->tsUpdatedLast) * pBwGroup->cbPerSecMax / (1000 * 1000 * 1000); 63 uint32_t uTokens = RT_MIN(pBwGroup->cbBucket, uTokensAdded + pBwGroup->cbTokensLast); 68 uint32_t const cbBucket = pGroup->cbBucket; 69 uint32_t const cbTokensLast = pGroup->cbTokensLast; 70 uint32_t const cTokens = RT_MIN(cbBucket, cTokensAdded + cbTokensLast); 64 71 65 if (cbTransfer > uTokens) 66 { 67 fAllowed = false; 68 ASMAtomicWriteBool(&pFilter->fChoked, true); 72 /* 73 * Allowed? 74 */ 75 if (cbTransfer <= cTokens) 76 { 77 pGroup->cbTokensLast = cTokens - (uint32_t)cbTransfer; 78 pGroup->tsUpdatedLast = tsNow; 79 Log2(("pdmNsAllocateBandwidth/%s: allowed - cbTransfer=%#zx cTokens=%#x cTokensAdded=%#x\n", 80 pGroup->szName, cbTransfer, cTokens, cTokensAdded)); 81 } 82 else 83 { 84 ASMAtomicWriteBool(&pFilter->fChoked, true); 85 Log2(("pdmNsAllocateBandwidth/%s: refused - cbTransfer=%#zx cTokens=%#x cTokensAdded=%#x\n", 86 pGroup->szName, cbTransfer, cTokens, cTokensAdded)); 87 fAllowed = false; 88 } 89 } 90 else 91 Log2(("pdmNsAllocateBandwidth/%s: disabled\n", pGroup->szName)); 92 93 rc = PDMCritSectLeave(pVM, &pGroup->Lock); 94 AssertRCSuccess(rc); 95 } 96 else if (rc == VINF_TRY_AGAIN) /* (accounted for by the critsect stats) */ 97 Log2(("pdmNsAllocateBandwidth/%s: allowed - lock contention\n", pGroup->szName)); 98 else 99 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pGroup->Lock, rc); 69 100 } 70 101 else 71 { 72 pBwGroup->tsUpdatedLast = tsNow; 73 pBwGroup->cbTokensLast = uTokens - (uint32_t)cbTransfer; 74 } 75 Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} cbTransfer=%u uTokens=%u uTokensAdded=%u fAllowed=%RTbool\n", 76 pBwGroup, R3STRING(pBwGroup->pszNameR3), cbTransfer, uTokens, uTokensAdded, fAllowed)); 102 AssertMsgFailed(("Invalid iGroup=%d\n", iGroup)); 77 103 } 78 else79 Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} disabled fAllowed=%RTbool\n",80 pBwGroup, R3STRING(pBwGroup->pszNameR3), fAllowed));81 82 rc = PDMCritSectLeave(pVM, &pBwGroup->Lock); AssertRC(rc);83 104 return fAllowed; 84 105 } 85 -
trunk/src/VBox/VMM/VMMR3/PDMDriver.cpp
r93609 r93628 1678 1678 pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter, pszBwGroup, pszBwGroup)); 1679 1679 1680 int rc = PDMR3NsAttach(pDrvIns->Internal.s.pVMR3 ->pUVM, pDrvIns, pszBwGroup, pFilter);1680 int rc = PDMR3NsAttach(pDrvIns->Internal.s.pVMR3, pDrvIns, pszBwGroup, pFilter); 1681 1681 1682 1682 LogFlow(("pdmR3DrvHlp_NetShaperAttach: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, … … 1698 1698 pDrvIns->pReg->szName, pDrvIns->iInstance, pFilter)); 1699 1699 1700 int rc = PDMR3NsDetach(pDrvIns->Internal.s.pVMR3 ->pUVM, pDrvIns, pFilter);1700 int rc = PDMR3NsDetach(pDrvIns->Internal.s.pVMR3, pDrvIns, pFilter); 1701 1701 1702 1702 LogFlow(("pdmR3DrvHlp_NetShaperDetach: caller='%s'/%d: returns %Rrc\n", pDrvIns->pReg->szName, -
trunk/src/VBox/VMM/VMMR3/PDMNetShaper.cpp
r93115 r93628 21 21 *********************************************************************************************************************************/ 22 22 #define LOG_GROUP LOG_GROUP_NET_SHAPER 23 #include <VBox/vmm/pdm.h> 23 24 #include "PDMInternal.h" 24 #include <VBox/vmm/pdm.h>25 #include <VBox/vmm/mm.h>26 25 #include <VBox/vmm/vm.h> 27 26 #include <VBox/vmm/uvm.h> … … 39 38 40 39 #include <VBox/vmm/pdmnetshaper.h> 41 #include "PDMNetShaperInternal.h" 42 43 44 /********************************************************************************************************************************* 45 * Structures and Typedefs * 46 *********************************************************************************************************************************/ 47 48 /** 49 * Network shaper data. One instance per VM. 50 */ 51 typedef struct PDMNETSHAPER 52 { 53 /** Pointer to the VM. */ 54 PVM pVM; 55 /** Critical section protecting all members below. */ 56 RTCRITSECT Lock; 57 /** Pending TX thread. */ 58 PPDMTHREAD pTxThread; 59 /** Pointer to the first bandwidth group. */ 60 PPDMNSBWGROUP pBwGroupsHead; 61 } PDMNETSHAPER; 62 63 64 /** Takes the shaper lock (asserts but doesn't return or anything on 65 * failure). */ 66 #define LOCK_NETSHAPER(a_pShaper) do { int rcShaper = RTCritSectEnter(&(a_pShaper)->Lock); AssertRC(rcShaper); } while (0) 67 68 /** Takes the shaper lock, returns + asserts on failure. */ 69 #define LOCK_NETSHAPER_RETURN(a_pShaper) \ 70 do { int rcShaper = RTCritSectEnter(&(a_pShaper)->Lock); AssertRCReturn(rcShaper, rcShaper); } while (0) 71 72 /** Releases the shaper lock (asserts on failure). */ 73 #define UNLOCK_NETSHAPER(a_pShaper) do { int rcShaper = RTCritSectLeave(&(a_pShaper)->Lock); AssertRC(rcShaper); } while (0) 74 75 76 77 78 static PPDMNSBWGROUP pdmNsBwGroupFindById(PPDMNETSHAPER pShaper, const char *pszId) 79 { 80 PPDMNSBWGROUP pBwGroup = NULL; 81 82 if (RT_VALID_PTR(pszId)) 83 { 84 LOCK_NETSHAPER(pShaper); 85 86 pBwGroup = pShaper->pBwGroupsHead; 87 while ( pBwGroup 88 && RTStrCmp(pBwGroup->pszNameR3, pszId)) 89 pBwGroup = pBwGroup->pNextR3; 90 91 UNLOCK_NETSHAPER(pShaper); 92 } 93 94 return pBwGroup; 95 } 96 97 98 static void pdmNsBwGroupLink(PPDMNSBWGROUP pBwGroup) 99 { 100 PPDMNETSHAPER pShaper = pBwGroup->pShaperR3; 101 LOCK_NETSHAPER(pShaper); 102 103 pBwGroup->pNextR3 = pShaper->pBwGroupsHead; 104 pShaper->pBwGroupsHead = pBwGroup; 105 106 UNLOCK_NETSHAPER(pShaper); 107 } 108 109 110 #if 0 111 static void pdmNsBwGroupUnlink(PPDMNSBWGROUP pBwGroup) 112 { 113 PPDMNETSHAPER pShaper = pBwGroup->pShaper; 114 LOCK_NETSHAPER(pShaper); 115 116 if (pBwGroup == pShaper->pBwGroupsHead) 117 pShaper->pBwGroupsHead = pBwGroup->pNext; 118 else 119 { 120 PPDMNSBWGROUP pPrev = pShaper->pBwGroupsHead; 121 while ( pPrev 122 && pPrev->pNext != pBwGroup) 123 pPrev = pPrev->pNext; 124 125 AssertPtr(pPrev); 126 pPrev->pNext = pBwGroup->pNext; 127 } 128 129 UNLOCK_NETSHAPER(pShaper); 40 41 42 43 44 /** 45 * Looks up a network bandwidth group by it's name. 46 * 47 * @returns Pointer to the group if found, NULL if not. 48 * @param pVM The cross context VM structure. 49 * @param pszName The name of the group to find. 50 */ 51 static PPDMNSBWGROUP pdmNsBwGroupFindByName(PVM pVM, const char *pszName) 52 { 53 AssertPtrReturn(pszName, NULL); 54 AssertReturn(*pszName != '\0', NULL); 55 56 size_t const cGroups = RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups)); 57 for (size_t i = 0; i < cGroups; i++) 58 if (RTStrCmp(pVM->pdm.s.aNsGroups[i].szName, pszName) == 0) 59 return &pVM->pdm.s.aNsGroups[i]; 60 return NULL; 61 } 62 63 64 #ifdef VBOX_STRICT 65 /** 66 * Checks if pFilter is attached to the given group by walking the list. 67 */ 68 DECLINLINE(bool) pdmR3NsIsFilterAttached(PPDMNSBWGROUP pGroup, PPDMNSFILTER pFilter) 69 { 70 PPDMNSFILTER pCur; 71 RTListForEach(&pGroup->FilterList, pCur, PDMNSFILTER, ListEntry) 72 { 73 if (pCur == pFilter) 74 return true; 75 } 76 return false; 130 77 } 131 78 #endif 132 79 133 134 static void pdmNsBwGroupSetLimit(PPDMNSBWGROUP pBwGroup, uint64_t cbPerSecMax) 135 { 136 pBwGroup->cbPerSecMax = cbPerSecMax;137 pBwGroup->cbBucket = RT_MAX(PDM_NETSHAPER_MIN_BUCKET_SIZE, cbPerSecMax * PDM_NETSHAPER_MAX_LATENCY / 1000);138 LogFlow(("pdmNsBwGroupSetLimit: New rate limit is %llu bytes per second, adjusted bucket size to %u bytes\n",139 pBwGroup->cbPerSecMax, pBwGroup->cbBucket));140 } 141 142 143 static int pdmNsBwGroupCreate(PPDMNETSHAPER pShaper, const char *pszBwGroup, uint64_t cbPerSecMax) 144 { 145 LogFlow(("pdmNsBwGroupCreate: pShaper=%#p pszBwGroup=%#p{%s} cbPerSecMax=%llu\n", pShaper, pszBwGroup, pszBwGroup, cbPerSecMax)); 146 147 AssertPtrReturn(pShaper, VERR_INVALID_POINTER);148 AssertPtrReturn(pszBwGroup, VERR_INVALID_POINTER);149 AssertReturn(*pszBwGroup != '\0', VERR_INVALID_PARAMETER);150 151 int rc;152 PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pszBwGroup);153 if (!pBwGroup) 154 {155 PVM const pVM = pShaper->pVM;156 rc = MMHyperAlloc(pVM, sizeof(PDMNSBWGROUP), 64, MM_TAG_PDM_NET_SHAPER, (void **)&pBwGroup);157 if (RT_SUCCESS(rc))158 { 159 rc = PDMR3CritSectInit(pVM, &pBwGroup->Lock, RT_SRC_POS, "BWGRP-%s", pszBwGroup);160 if (RT_SUCCESS(rc))161 {162 pBwGroup->pszNameR3 = MMR3HeapStrDup(pVM, MM_TAG_PDM_NET_SHAPER, pszBwGroup); 163 if (pBwGroup->pszNameR3)164 {165 pBwGroup->pShaperR3 = pShaper;166 pBwGroup->cRefs = 0;167 168 pdmNsBwGroupSetLimit(pBwGroup, cbPerSecMax);169 170 pBwGroup->cbTokensLast = pBwGroup->cbBucket;171 pBwGroup->tsUpdatedLast = RTTimeSystemNanoTS();172 173 LogFlowFunc(("pszBwGroup={%s} cbBucket=%u\n",174 pszBwGroup, pBwGroup->cbBucket)); 175 pdmNsBwGroupLink(pBwGroup);176 return VINF_SUCCESS;177 } 178 PDMR3CritSectDelete(pVM, &pBwGroup->Lock);179 }180 MMHyperFree(pVM, pBwGroup);80 /** 81 * Attaches a network filter driver to the named bandwidth group. 82 * 83 * @returns VBox status code. 84 * @retval VERR_ALREADY_INITIALIZED if already attached. 85 * @retval VERR_NOT_FOUND if the bandwidth wasn't found. 86 * 87 * @param pVM The cross context VM structure. 88 * @param pDrvIns The driver instance. 89 * @param pszName Name of the bandwidth group to attach to. 90 * @param pFilter Pointer to the filter to attach. 91 */ 92 VMMR3_INT_DECL(int) PDMR3NsAttach(PVM pVM, PPDMDRVINS pDrvIns, const char *pszName, PPDMNSFILTER pFilter) 93 { 94 /* 95 * Validate input. 96 */ 97 RT_NOREF(pDrvIns); 98 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 99 AssertPtrReturn(pFilter, VERR_INVALID_POINTER); 100 101 uint32_t iGroup = pFilter->iGroup; 102 AssertMsgReturn(iGroup == 0, ("iGroup=%d\n", iGroup), VERR_ALREADY_INITIALIZED); 103 Assert(pFilter->ListEntry.pNext == NULL); 104 Assert(pFilter->ListEntry.pPrev == NULL); 105 106 /* Resolve the group. */ 107 PPDMNSBWGROUP pGroup = pdmNsBwGroupFindByName(pVM, pszName); 108 AssertMsgReturn(pGroup, ("'%s'\n", pszName), VERR_NOT_FOUND); 109 110 /* 111 * The attach is protected by PDM::NsLock and by updating iGroup atomatically. 112 */ 113 int rc = RTCritSectEnter(&pVM->pdm.s.NsLock); 114 if (RT_SUCCESS(rc)) 115 { 116 if (ASMAtomicCmpXchgU32(&pFilter->iGroup, (uint32_t)(pGroup - &pVM->pdm.s.aNsGroups[0]) + 1, 0)) 117 { 118 Assert(pFilter->ListEntry.pNext == NULL); 119 Assert(pFilter->ListEntry.pPrev == NULL); 120 RTListAppend(&pGroup->FilterList, &pFilter->ListEntry); 121 122 uint32_t cRefs = ASMAtomicIncU32(&pGroup->cRefs); 123 AssertMsg(cRefs > 0 && cRefs < _16K, ("%u\n", cRefs)); 124 125 LogFlow(("PDMR3NsAttach: Attached '%s'/%u to %s (cRefs=%u)\n", 126 pDrvIns->pReg->szName, pDrvIns->iInstance, pGroup->szName, cRefs)); 127 rc = VINF_SUCCESS; 181 128 } 182 129 else 183 rc = VERR_NO_MEMORY; 184 } 185 else 186 rc = VERR_ALREADY_EXISTS; 187 188 LogFlowFunc(("returns rc=%Rrc\n", rc)); 130 { 131 AssertMsgFailed(("iGroup=%d (attach race)\n", pFilter->iGroup)); 132 rc = VERR_ALREADY_INITIALIZED; 133 } 134 135 int rc2 = RTCritSectLeave(&pVM->pdm.s.NsLock); 136 AssertRC(rc2); 137 } 138 189 139 return rc; 190 140 } 191 141 192 142 193 static void pdmNsBwGroupTerminate(PVM pVM, PPDMNSBWGROUP pBwGroup) 194 { 195 Assert(pBwGroup->cRefs == 0); 196 if (PDMCritSectIsInitialized(&pBwGroup->Lock)) 197 PDMR3CritSectDelete(pVM, &pBwGroup->Lock); 198 } 199 200 201 DECLINLINE(void) pdmNsBwGroupRef(PPDMNSBWGROUP pBwGroup) 202 { 203 ASMAtomicIncU32(&pBwGroup->cRefs); 204 } 205 206 207 DECLINLINE(void) pdmNsBwGroupUnref(PPDMNSBWGROUP pBwGroup) 208 { 209 Assert(pBwGroup->cRefs > 0); 210 ASMAtomicDecU32(&pBwGroup->cRefs); 211 } 212 213 214 static void pdmNsBwGroupXmitPending(PPDMNSBWGROUP pBwGroup) 215 { 216 /* 217 * We don't need to hold the bandwidth group lock to iterate over the list 218 * of filters since the filters are removed while the shaper lock is being 219 * held. 220 */ 221 AssertPtr(pBwGroup); 222 AssertPtr(pBwGroup->pShaperR3); 223 Assert(RTCritSectIsOwner(&pBwGroup->pShaperR3->Lock)); 224 //LOCK_NETSHAPER(pShaper); 225 226 /* Check if the group is disabled. */ 227 if (pBwGroup->cbPerSecMax == 0) 228 return; 229 230 PPDMNSFILTER pFilter = pBwGroup->pFiltersHeadR3; 231 while (pFilter) 232 { 233 bool fChoked = ASMAtomicXchgBool(&pFilter->fChoked, false); 234 Log3((LOG_FN_FMT ": pFilter=%#p fChoked=%RTbool\n", __PRETTY_FUNCTION__, pFilter, fChoked)); 235 if (fChoked && pFilter->pIDrvNetR3) 236 { 237 LogFlowFunc(("Calling pfnXmitPending for pFilter=%#p\n", pFilter)); 238 pFilter->pIDrvNetR3->pfnXmitPending(pFilter->pIDrvNetR3); 239 } 240 241 pFilter = pFilter->pNextR3; 242 } 243 244 //UNLOCK_NETSHAPER(pShaper); 245 } 246 247 248 static void pdmNsFilterLink(PPDMNSFILTER pFilter) 249 { 250 PPDMNSBWGROUP pBwGroup = pFilter->pBwGroupR3; 251 PVM const pVM = pBwGroup->pShaperR3->pVM; 252 int rc = PDMCritSectEnter(pVM, &pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc); 253 254 pFilter->pNextR3 = pBwGroup->pFiltersHeadR3; 255 pBwGroup->pFiltersHeadR3 = pFilter; 256 257 rc = PDMCritSectLeave(pVM, &pBwGroup->Lock); AssertRC(rc); 258 } 259 260 261 static void pdmNsFilterUnlink(PPDMNSFILTER pFilter) 262 { 263 PPDMNSBWGROUP pBwGroup = pFilter->pBwGroupR3; 264 /* 265 * We need to make sure we hold the shaper lock since pdmNsBwGroupXmitPending() 266 * does not hold the bandwidth group lock while iterating over the list 267 * of group's filters. 268 */ 269 AssertPtr(pBwGroup); 270 AssertPtr(pBwGroup->pShaperR3); 271 Assert(RTCritSectIsOwner(&pBwGroup->pShaperR3->Lock)); 272 PVM const pVM = pBwGroup->pShaperR3->pVM; 273 int rc = PDMCritSectEnter(pVM, &pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc); 274 275 if (pFilter == pBwGroup->pFiltersHeadR3) 276 pBwGroup->pFiltersHeadR3 = pFilter->pNextR3; 277 else 278 { 279 PPDMNSFILTER pPrev = pBwGroup->pFiltersHeadR3; 280 while ( pPrev 281 && pPrev->pNextR3 != pFilter) 282 pPrev = pPrev->pNextR3; 283 284 AssertPtr(pPrev); 285 pPrev->pNextR3 = pFilter->pNextR3; 286 } 287 288 rc = PDMCritSectLeave(pVM, &pBwGroup->Lock); AssertRC(rc); 289 } 290 291 292 /** 293 * Attach network filter driver from bandwidth group. 143 /** 144 * Detaches a network filter driver from its current bandwidth group (if any). 294 145 * 295 146 * @returns VBox status code. 296 * @param p UVM The user modeVM structure.147 * @param pVM The cross context VM structure. 297 148 * @param pDrvIns The driver instance. 298 * @param pszBwGroup Name of the bandwidth group to attach to. 299 * @param pFilter Pointer to the filter we attach. 300 */ 301 VMMR3_INT_DECL(int) PDMR3NsAttach(PUVM pUVM, PPDMDRVINS pDrvIns, const char *pszBwGroup, PPDMNSFILTER pFilter) 302 { 303 VM_ASSERT_EMT(pUVM->pVM); 304 AssertPtrReturn(pFilter, VERR_INVALID_POINTER); 305 AssertReturn(pFilter->pBwGroupR3 == NULL, VERR_ALREADY_EXISTS); 306 RT_NOREF_PV(pDrvIns); 307 308 PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; 309 LOCK_NETSHAPER_RETURN(pShaper); 310 311 int rc = VINF_SUCCESS; 312 PPDMNSBWGROUP pBwGroupNew = NULL; 313 if (pszBwGroup) 314 { 315 pBwGroupNew = pdmNsBwGroupFindById(pShaper, pszBwGroup); 316 if (pBwGroupNew) 317 pdmNsBwGroupRef(pBwGroupNew); 318 else 319 rc = VERR_NOT_FOUND; 320 } 321 322 if (RT_SUCCESS(rc)) 323 { 324 PPDMNSBWGROUP pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP); 325 ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pUVM->pVM, pBwGroupNew)); 326 if (pBwGroupOld) 327 pdmNsBwGroupUnref(pBwGroupOld); 328 pdmNsFilterLink(pFilter); 329 } 330 331 UNLOCK_NETSHAPER(pShaper); 332 return rc; 333 } 334 335 336 /** 337 * Detach network filter driver from bandwidth group. 338 * 339 * @returns VBox status code. 340 * @param pUVM The user mode VM handle. 341 * @param pDrvIns The driver instance. 342 * @param pFilter Pointer to the filter we detach. 343 */ 344 VMMR3_INT_DECL(int) PDMR3NsDetach(PUVM pUVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter) 345 { 346 RT_NOREF_PV(pDrvIns); 347 VM_ASSERT_EMT(pUVM->pVM); 149 * @param pFilter Pointer to the filter to detach. 150 */ 151 VMMR3_INT_DECL(int) PDMR3NsDetach(PVM pVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter) 152 { 153 /* 154 * Validate input. 155 */ 156 RT_NOREF(pDrvIns); 157 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 348 158 AssertPtrReturn(pFilter, VERR_INVALID_POINTER); 349 159 350 160 /* Now, return quietly if the filter isn't attached since driver/device 351 161 destructors are called on constructor failure. */ 352 if (!pFilter->pBwGroupR3) 162 uint32_t const iGroup = ASMAtomicUoReadU32(&pFilter->iGroup); 163 if (!iGroup) 353 164 return VINF_SUCCESS; 354 AssertPtrReturn(pFilter->pBwGroupR3, VERR_INVALID_POINTER); 355 356 PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; 357 LOCK_NETSHAPER_RETURN(pShaper); 358 359 pdmNsFilterUnlink(pFilter); 360 PPDMNSBWGROUP pBwGroup = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, NULL, PPDMNSBWGROUP); 361 if (pBwGroup) 362 pdmNsBwGroupUnref(pBwGroup); 363 364 UNLOCK_NETSHAPER(pShaper); 365 return VINF_SUCCESS; 165 AssertMsgReturn(iGroup - 1 < RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups)), ("iGroup=%#x\n", iGroup), 166 VERR_INVALID_HANDLE); 167 PPDMNSBWGROUP const pGroup = &pVM->pdm.s.aNsGroups[iGroup - 1]; 168 169 /* 170 * The detaching is protected by PDM::NsLock and by atomically updating iGroup. 171 */ 172 int rc = RTCritSectEnter(&pVM->pdm.s.NsLock); 173 if (RT_SUCCESS(rc)) 174 { 175 if (ASMAtomicCmpXchgU32(&pFilter->iGroup, 0, iGroup)) 176 { 177 Assert(pdmR3NsIsFilterAttached(pGroup, pFilter)); 178 RTListNodeRemove(&pFilter->ListEntry); 179 Assert(pFilter->ListEntry.pNext == NULL); 180 Assert(pFilter->ListEntry.pPrev == NULL); 181 ASMAtomicWriteU32(&pFilter->iGroup, 0); 182 183 uint32_t cRefs = ASMAtomicDecU32(&pGroup->cRefs); 184 Assert(cRefs < _16K); 185 186 LogFlow(("PDMR3NsDetach: Detached '%s'/%u from %s (cRefs=%u)\n", 187 pDrvIns->pReg->szName, pDrvIns->iInstance, pGroup->szName, cRefs)); 188 rc = VINF_SUCCESS; 189 } 190 else 191 AssertFailedStmt(rc = VERR_WRONG_ORDER); 192 193 int rc2 = RTCritSectLeave(&pVM->pdm.s.NsLock); 194 AssertRC(rc2); 195 } 196 else 197 AssertRC(rc); 198 return rc; 199 } 200 201 202 /** 203 * This is used both by pdmR3NsTxThread and PDMR3NsBwGroupSetLimit, 204 * the latter only when setting cbPerSecMax to zero. 205 * 206 * @param pGroup The group which filters should be unchoked. 207 * @note Caller owns the PDM::NsLock critsect. 208 */ 209 static void pdmR3NsUnchokeGroupFilters(PPDMNSBWGROUP pGroup) 210 { 211 PPDMNSFILTER pFilter; 212 RTListForEach(&pGroup->FilterList, pFilter, PDMNSFILTER, ListEntry) 213 { 214 bool fChoked = ASMAtomicXchgBool(&pFilter->fChoked, false); 215 if (fChoked) 216 { 217 PPDMINETWORKDOWN pIDrvNet = pFilter->pIDrvNetR3; 218 if (pIDrvNet && pIDrvNet->pfnXmitPending != NULL) 219 { 220 Log3(("pdmR3NsUnchokeGroupFilters: Unchoked %p in %s, calling %p\n", 221 pFilter, pGroup->szName, pIDrvNet->pfnXmitPending)); 222 pIDrvNet->pfnXmitPending(pIDrvNet); 223 } 224 else 225 Log3(("pdmR3NsUnchokeGroupFilters: Unchoked %p in %s (no callback)\n", pFilter, pGroup->szName)); 226 } 227 } 228 } 229 230 231 /** 232 * Worker for PDMR3NsBwGroupSetLimit and pdmR3NetShaperInit. 233 * 234 * @returns New bucket size. 235 * @param pGroup The group to update. 236 * @param cbPerSecMax The new max bytes per second. 237 */ 238 static uint32_t pdmNsBwGroupSetLimit(PPDMNSBWGROUP pGroup, uint64_t cbPerSecMax) 239 { 240 uint32_t const cbRet = RT_MAX(PDM_NETSHAPER_MIN_BUCKET_SIZE, cbPerSecMax * PDM_NETSHAPER_MAX_LATENCY / RT_MS_1SEC); 241 pGroup->cbBucket = cbRet; 242 pGroup->cbPerSecMax = cbPerSecMax; 243 LogFlow(("pdmNsBwGroupSetLimit: New rate limit is %#RX64 bytes per second, adjusted bucket size to %#x bytes\n", 244 cbPerSecMax, cbRet)); 245 return cbRet; 366 246 } 367 247 … … 371 251 * 372 252 * @returns VBox status code. 373 * @param pUVM The user mode VM handle. 374 * @param pszBwGroup Name of the bandwidth group to attach to. 375 * @param cbPerSecMax Maximum number of bytes per second to be transmitted. 376 */ 377 VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PUVM pUVM, const char *pszBwGroup, uint64_t cbPerSecMax) 378 { 253 * @param pUVM The user mode VM handle. 254 * @param pszName Name of the bandwidth group to attach to. 255 * @param cbPerSecMax Maximum number of bytes per second to be transmitted. 256 */ 257 VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PUVM pUVM, const char *pszName, uint64_t cbPerSecMax) 258 { 259 /* 260 * Validate input. 261 */ 379 262 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE); 380 P PDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;381 LOCK_NETSHAPER_RETURN(pShaper);263 PVM const pVM = pUVM->pVM; 264 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 382 265 383 266 int rc; 384 PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pszBwGroup); 385 if (pBwGroup) 386 { 387 rc = PDMCritSectEnter(pUVM->pVM, &pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc); 267 PPDMNSBWGROUP pGroup = pdmNsBwGroupFindByName(pVM, pszName); 268 if (pGroup) 269 { 270 /* 271 * Lock the group while we effect the changes. 272 */ 273 rc = PDMCritSectEnter(pVM, &pGroup->Lock, VERR_IGNORED); 388 274 if (RT_SUCCESS(rc)) 389 275 { 390 pdmNsBwGroupSetLimit(pBwGroup, cbPerSecMax);276 uint32_t const cbBucket = pdmNsBwGroupSetLimit(pGroup, cbPerSecMax); 391 277 392 278 /* Drop extra tokens */ 393 if (pBwGroup->cbTokensLast > pBwGroup->cbBucket) 394 pBwGroup->cbTokensLast = pBwGroup->cbBucket; 395 396 int rc2 = PDMCritSectLeave(pUVM->pVM, &pBwGroup->Lock); AssertRC(rc2); 397 } 279 if (pGroup->cbTokensLast > cbBucket) 280 pGroup->cbTokensLast = cbBucket; 281 Log(("PDMR3NsBwGroupSetLimit/%s: cbBucket=%#x cbPerSecMax=%#RX64\n", pGroup->szName, cbBucket, cbPerSecMax)); 282 283 int rc2 = PDMCritSectLeave(pVM, &pGroup->Lock); 284 AssertRC(rc2); 285 286 /* 287 * If we disabled the group, we must make sure to unchoke all filter 288 * as the thread will ignore the group from now on. 289 * 290 * We do this after leaving the group lock to keep the locking simple. 291 * Extra pfnXmitPending calls should be harmless, of course ASSUMING 292 * nobody take offence to being called on this thread. 293 */ 294 if (cbPerSecMax == 0) 295 { 296 Log(("PDMR3NsBwGroupSetLimit: cbPerSecMax was set to zero, so unchoking filters...\n")); 297 rc = RTCritSectEnter(&pVM->pdm.s.NsLock); 298 AssertRC(rc); 299 300 pdmR3NsUnchokeGroupFilters(pGroup); 301 302 rc2 = RTCritSectLeave(&pVM->pdm.s.NsLock); 303 AssertRC(rc2); 304 } 305 } 306 else 307 AssertRC(rc); 398 308 } 399 309 else 400 310 rc = VERR_NOT_FOUND; 401 402 UNLOCK_NETSHAPER(pShaper);403 311 return rc; 404 312 } … … 414 322 static DECLCALLBACK(int) pdmR3NsTxThread(PVM pVM, PPDMTHREAD pThread) 415 323 { 416 RT_NOREF_PV(pVM); 417 418 PPDMNETSHAPER pShaper = (PPDMNETSHAPER)pThread->pvUser; 419 LogFlow(("pdmR3NsTxThread: pShaper=%p\n", pShaper)); 324 LogFlow(("pdmR3NsTxThread: pVM=%p\n", pVM)); 420 325 while (pThread->enmState == PDMTHREADSTATE_RUNNING) 421 326 { 327 /** @todo r=bird: This sleep is horribly crude and wasteful! (Michael would go nuts if he knew) */ 422 328 RTThreadSleep(PDM_NETSHAPER_MAX_LATENCY); 423 329 424 /* Go over all bandwidth groups/filters calling pfnXmitPending */ 425 LOCK_NETSHAPER(pShaper); 426 PPDMNSBWGROUP pBwGroup = pShaper->pBwGroupsHead; 427 while (pBwGroup) 428 { 429 pdmNsBwGroupXmitPending(pBwGroup); 430 pBwGroup = pBwGroup->pNextR3; 431 } 432 UNLOCK_NETSHAPER(pShaper); 330 /* 331 * Go over all bandwidth groups/filters and unchoke their filters. 332 * 333 * We take the main lock here to prevent any detaching or attaching 334 * from taking place while we're traversing the filter lists. 335 */ 336 int rc = RTCritSectEnter(&pVM->pdm.s.NsLock); 337 AssertRC(rc); 338 339 size_t const cGroups = RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups)); 340 for (size_t i = 0; i < cGroups; i++) 341 { 342 PPDMNSBWGROUP const pGroup = &pVM->pdm.s.aNsGroups[i]; 343 if ( pGroup->cRefs > 0 344 && pGroup->cbPerSecMax > 0) 345 pdmR3NsUnchokeGroupFilters(pGroup); 346 } 347 348 rc = RTCritSectLeave(&pVM->pdm.s.NsLock); 349 AssertRC(rc); 433 350 } 434 351 return VINF_SUCCESS; … … 444 361 LogFlow(("pdmR3NsTxWakeUp: pShaper=%p\n", pThread->pvUser)); 445 362 /* Nothing to do */ 363 /** @todo r=bird: use a semaphore, this'll cause a PDM_NETSHAPER_MAX_LATENCY/2 364 * delay every time we pause the VM! Stupid stupid stupid. */ 446 365 return VINF_SUCCESS; 447 366 } … … 449 368 450 369 /** 451 * Terminate the network shaper .370 * Terminate the network shaper, groups, lock and everything. 452 371 * 453 372 * @returns VBox error code. 454 373 * @param pVM The cross context VM structure. 455 * 456 * @remarks This method destroys all bandwidth group objects. 457 */ 458 int pdmR3NetShaperTerm(PVM pVM) 459 { 460 PUVM pUVM = pVM->pUVM; 461 AssertPtrReturn(pUVM, VERR_INVALID_POINTER); 462 PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; 463 AssertPtrReturn(pShaper, VERR_INVALID_POINTER); 464 465 /* Destroy the bandwidth managers. */ 466 PPDMNSBWGROUP pBwGroup = pShaper->pBwGroupsHead; 467 while (pBwGroup) 468 { 469 PPDMNSBWGROUP pFree = pBwGroup; 470 pBwGroup = pBwGroup->pNextR3; 471 pdmNsBwGroupTerminate(pVM, pFree); 472 MMR3HeapFree(pFree->pszNameR3); 473 MMHyperFree(pVM, pFree); 474 } 475 476 RTCritSectDelete(&pShaper->Lock); 477 MMR3HeapFree(pShaper); 478 pUVM->pdm.s.pNetShaper = NULL; 479 return VINF_SUCCESS; 374 */ 375 void pdmR3NetShaperTerm(PVM pVM) 376 { 377 size_t const cGroups = RT_MIN(pVM->pdm.s.cNsGroups, RT_ELEMENTS(pVM->pdm.s.aNsGroups)); 378 for (size_t i = 0; i < cGroups; i++) 379 { 380 PPDMNSBWGROUP const pGroup = &pVM->pdm.s.aNsGroups[i]; 381 AssertMsg(pGroup->cRefs == 0, ("cRefs=%s '%s'\n", pGroup->cRefs, pGroup->szName)); 382 AssertContinue(PDMCritSectIsInitialized(&pGroup->Lock)); 383 PDMR3CritSectDelete(pVM, &pGroup->Lock); 384 } 385 386 RTCritSectDelete(&pVM->pdm.s.NsLock); 480 387 } 481 388 … … 491 398 LogFlow(("pdmR3NetShaperInit: pVM=%p\n", pVM)); 492 399 VM_ASSERT_EMT(pVM); 493 PUVM pUVM = pVM->pUVM; 494 AssertMsgReturn(!pUVM->pdm.s.pNetShaper, ("Network shaper was already initialized\n"), VERR_WRONG_ORDER); 495 496 PPDMNETSHAPER pShaper; 497 int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_NET_SHAPER, sizeof(PDMNETSHAPER), (void **)&pShaper); 400 401 /* 402 * Initialize the critical section protecting attaching, detaching and unchoking. 403 * 404 * This is a non-recursive lock to make sure nobody tries to mess with the groups 405 * from the pfnXmitPending callback. 406 */ 407 int rc = RTCritSectInitEx(&pVM->pdm.s.NsLock, RTCRITSECT_FLAGS_NO_NESTING, 408 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "PDMNetShaper"); 409 AssertRCReturn(rc, rc); 410 411 /* 412 * Initialize all bandwidth groups. 413 */ 414 PCFGMNODE pCfgNetShaper = CFGMR3GetChild(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "NetworkShaper"); 415 PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNetShaper, "BwGroups"); 416 if (pCfgBwGrp) 417 { 418 uint32_t iGroup = 0; 419 for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur)) 420 { 421 /* 422 * Get the config data. 423 */ 424 size_t cchName = CFGMR3GetNameLen(pCur); 425 AssertBreakStmt(cchName <= PDM_NET_SHAPER_MAX_NAME_LEN, 426 rc = VMR3SetError(pVM->pUVM, VERR_INVALID_NAME, RT_SRC_POS, 427 N_("Network shaper group name #%u is too long: %zu, max %u"), 428 iGroup, cchName, PDM_NET_SHAPER_MAX_NAME_LEN)); 429 char szName[PDM_NET_SHAPER_MAX_NAME_LEN + 1]; 430 rc = CFGMR3GetName(pCur, szName, sizeof(szName)); 431 AssertRCBreak(rc); 432 AssertBreakStmt(szName[0] != '\0', 433 rc = VMR3SetError(pVM->pUVM, VERR_INVALID_NAME, RT_SRC_POS, 434 N_("Empty network shaper group name #%u"), iGroup)); 435 436 uint64_t cbMax; 437 rc = CFGMR3QueryU64(pCur, "Max", &cbMax); 438 AssertRCBreakStmt(rc, rc = VMR3SetError(pVM->pUVM, rc, RT_SRC_POS, 439 N_("Failed to read 'Max' value for network shaper group '%s': %Rrc"), 440 szName, rc)); 441 442 /* 443 * Initialize the group table entry. 444 */ 445 AssertBreakStmt(iGroup < RT_ELEMENTS(pVM->pdm.s.aNsGroups), 446 rc = VMR3SetError(pVM->pUVM, VERR_TOO_MUCH_DATA, RT_SRC_POS, N_("Too many bandwidth groups (max %zu)"), 447 RT_ELEMENTS(pVM->pdm.s.aNsGroups))); 448 449 rc = PDMR3CritSectInit(pVM, &pVM->pdm.s.aNsGroups[iGroup].Lock, RT_SRC_POS, "BWGRP%02u-%s", iGroup, szName); 450 AssertRCBreak(rc); 451 452 RTListInit(&pVM->pdm.s.aNsGroups[iGroup].FilterList); 453 pVM->pdm.s.aNsGroups[iGroup].cRefs = 0; 454 RTStrCopy(pVM->pdm.s.aNsGroups[iGroup].szName, sizeof(pVM->pdm.s.aNsGroups[iGroup].szName), szName); 455 pVM->pdm.s.aNsGroups[iGroup].cbTokensLast = pdmNsBwGroupSetLimit(&pVM->pdm.s.aNsGroups[iGroup], cbMax); 456 pVM->pdm.s.aNsGroups[iGroup].tsUpdatedLast = RTTimeSystemNanoTS(); 457 LogFlowFunc(("PDM NetShaper Group #%u: %s - cbPerSecMax=%#RU64 cbBucket=%#x\n", 458 iGroup, pVM->pdm.s.aNsGroups[iGroup].szName, pVM->pdm.s.aNsGroups[iGroup].cbPerSecMax, 459 pVM->pdm.s.aNsGroups[iGroup].cbBucket)); 460 461 pVM->pdm.s.cNsGroups = ++iGroup; 462 } 463 } 498 464 if (RT_SUCCESS(rc)) 499 465 { 500 PCFGMNODE pCfgNetShaper = CFGMR3GetChild(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "NetworkShaper"); 501 502 pShaper->pVM = pVM; 503 rc = RTCritSectInit(&pShaper->Lock); 466 /* 467 * Create the transmit thread. 468 */ 469 rc = PDMR3ThreadCreate(pVM, &pVM->pdm.s.pNsTxThread, NULL, pdmR3NsTxThread, pdmR3NsTxWakeUp, 470 0 /*cbStack*/, RTTHREADTYPE_IO, "PDMNsTx"); 504 471 if (RT_SUCCESS(rc)) 505 472 { 506 /* Create all bandwidth groups. */ 507 PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNetShaper, "BwGroups"); 508 if (pCfgBwGrp) 509 { 510 for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur)) 511 { 512 size_t cbName = CFGMR3GetNameLen(pCur) + 1; 513 char *pszBwGrpId = (char *)RTMemAllocZ(cbName); 514 if (pszBwGrpId) 515 { 516 rc = CFGMR3GetName(pCur, pszBwGrpId, cbName); 517 if (RT_SUCCESS(rc)) 518 { 519 uint64_t cbMax; 520 rc = CFGMR3QueryU64(pCur, "Max", &cbMax); 521 if (RT_SUCCESS(rc)) 522 rc = pdmNsBwGroupCreate(pShaper, pszBwGrpId, cbMax); 523 } 524 RTMemFree(pszBwGrpId); 525 } 526 else 527 rc = VERR_NO_MEMORY; 528 if (RT_FAILURE(rc)) 529 break; 530 } 531 } 532 533 if (RT_SUCCESS(rc)) 534 { 535 rc = PDMR3ThreadCreate(pVM, &pShaper->pTxThread, pShaper, pdmR3NsTxThread, pdmR3NsTxWakeUp, 536 0 /*cbStack*/, RTTHREADTYPE_IO, "PDMNsTx"); 537 if (RT_SUCCESS(rc)) 538 { 539 pUVM->pdm.s.pNetShaper = pShaper; 540 return VINF_SUCCESS; 541 } 542 } 543 544 RTCritSectDelete(&pShaper->Lock); 545 } 546 547 MMR3HeapFree(pShaper); 548 } 549 550 LogFlow(("pdmR3NetShaperInit: pVM=%p rc=%Rrc\n", pVM, rc)); 473 LogFlowFunc(("returns VINF_SUCCESS\n")); 474 return VINF_SUCCESS; 475 } 476 } 477 478 RTCritSectDelete(&pVM->pdm.s.NsLock); 479 LogRel(("pdmR3NetShaperInit: failed rc=%Rrc\n", rc)); 551 480 return rc; 552 481 } -
trunk/src/VBox/VMM/include/PDMInternal.h
r93609 r93628 1227 1227 1228 1228 1229 /** @name PDM Network Shaper 1230 * @{ */ 1231 1232 /** 1233 * Bandwidth group. 1234 */ 1235 typedef struct PDMNSBWGROUP 1236 { 1237 /** Critical section protecting all members below. */ 1238 PDMCRITSECT Lock; 1239 /** List of filters in this group (PDMNSFILTER). */ 1240 RTLISTANCHORR3 FilterList; 1241 /** Reference counter - How many filters are associated with this group. */ 1242 volatile uint32_t cRefs; 1243 uint32_t uPadding1; 1244 /** The group name. */ 1245 char szName[PDM_NET_SHAPER_MAX_NAME_LEN + 1]; 1246 /** Maximum number of bytes filters are allowed to transfer. */ 1247 volatile uint64_t cbPerSecMax; 1248 /** Number of bytes we are allowed to transfer in one burst. */ 1249 volatile uint32_t cbBucket; 1250 /** Number of bytes we were allowed to transfer at the last update. */ 1251 volatile uint32_t cbTokensLast; 1252 /** Timestamp of the last update */ 1253 volatile uint64_t tsUpdatedLast; 1254 /** Pad the structure to a multiple of 64 bytes. */ 1255 uint64_t au64Padding[2]; 1256 } PDMNSBWGROUP; 1257 AssertCompileSizeAlignment(PDMNSBWGROUP, 64); 1258 /** Pointer to a bandwidth group. */ 1259 typedef PDMNSBWGROUP *PPDMNSBWGROUP; 1260 1261 /* @} */ 1262 1263 1229 1264 /** 1230 1265 * Queue device helper task operation. … … 1507 1542 /** @} */ 1508 1543 1544 /** @name Network Shaper 1545 * @{ */ 1546 /** Pending TX thread. */ 1547 PPDMTHREAD pNsTxThread; 1548 uint32_t au32Padding[1+8]; 1549 /** Number of network shaper groups. 1550 * @note Marked volatile to prevent re-reading after validation. */ 1551 uint32_t volatile cNsGroups; 1552 /** The network shaper groups. */ 1553 PDMNSBWGROUP aNsGroups[PDM_NET_SHAPER_MAX_GROUPS]; 1554 /** Critical section protecting attaching, detaching and unchoking. 1555 * This helps making sure pNsTxThread can do unchoking w/o needing to lock the 1556 * individual groups and cause unnecessary contention. */ 1557 RTCRITSECT NsLock; 1558 /** @} */ 1559 1509 1560 /** Number of times a critical section leave request needed to be queued for ring-3 execution. */ 1510 1561 STAMCOUNTER StatQueuedCritSectLeaves; … … 1529 1580 AssertCompileMemberAlignment(PDM, CritSect, 8); 1530 1581 AssertCompileMemberAlignment(PDM, aTaskSets, 64); 1582 AssertCompileMemberAlignment(PDM, aNsGroups, 8); 1583 AssertCompileMemberAlignment(PDM, aNsGroups, 16); 1584 AssertCompileMemberAlignment(PDM, aNsGroups, 32); 1585 AssertCompileMemberAlignment(PDM, aNsGroups, 64); 1531 1586 AssertCompileMemberAlignment(PDM, StatQueuedCritSectLeaves, 8); 1532 1587 AssertCompileMemberAlignment(PDM, GCPhysVMMDevHeap, sizeof(RTGCPHYS)); … … 1587 1642 /** Global block cache data. */ 1588 1643 R3PTRTYPE(PPDMBLKCACHEGLOBAL) pBlkCacheGlobal; 1589 #ifdef VBOX_WITH_NETSHAPER1590 /** Pointer to network shaper instance. */1591 R3PTRTYPE(PPDMNETSHAPER) pNetShaper;1592 #endif /* VBOX_WITH_NETSHAPER */1593 1594 1644 } PDMUSERPERVM; 1595 1645 /** Pointer to the PDM data kept in the UVM. */ … … 1734 1784 # ifdef VBOX_WITH_NETSHAPER 1735 1785 int pdmR3NetShaperInit(PVM pVM); 1736 intpdmR3NetShaperTerm(PVM pVM);1786 void pdmR3NetShaperTerm(PVM pVM); 1737 1787 # endif 1738 1788
Note:
See TracChangeset
for help on using the changeset viewer.