Changeset 33218 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Oct 18, 2010 7:54:10 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 66762
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PDMAsyncCompletion.cpp
r31183 r33218 37 37 #include <iprt/tcp.h> 38 38 #include <iprt/path.h> 39 #include <iprt/string.h> 39 40 40 41 #include <VBox/pdmasynccompletion.h> … … 113 114 } PDMASYNCCOMPLETIONTEMPLATE; 114 115 116 /** 117 * Bandwidth control manager instance data 118 */ 119 typedef struct PDMACBWMGR 120 { 121 /** Pointer to the next manager in the list. */ 122 struct PDMACBWMGR *pNext; 123 /** Pointer to the shared UVM structure. */ 124 PPDMASYNCCOMPLETIONEPCLASS pEpClass; 125 /** Identifer of the manager. */ 126 char *pszId; 127 /** Maximum number of bytes the endpoints are allowed to transfer (Max is 4GB/s currently) */ 128 volatile uint32_t cbTransferPerSecMax; 129 /** Number of bytes we start with */ 130 volatile uint32_t cbTransferPerSecStart; 131 /** Step after each update */ 132 volatile uint32_t cbTransferPerSecStep; 133 /** Number of bytes we are allowed to transfer till the next update. 134 * Reset by the refresh timer. */ 135 volatile uint32_t cbTransferAllowed; 136 /** Timestamp of the last update */ 137 volatile uint64_t tsUpdatedLast; 138 /** Reference counter - How many endpoints are associated with this manager. */ 139 volatile uint32_t cRefs; 140 } PDMACBWMGR; 141 /** Pointer to a bandwidth control manager pointer. */ 142 typedef PPDMACBWMGR *PPPDMACBWMGR; 143 115 144 static void pdmR3AsyncCompletionPutTask(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, PPDMASYNCCOMPLETIONTASK pTask); 116 145 … … 565 594 } 566 595 596 597 static PPDMACBWMGR pdmacBwMgrFindById(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pcszId) 598 { 599 PPDMACBWMGR pBwMgr = NULL; 600 601 if (VALID_PTR(pcszId)) 602 { 603 int rc; 604 rc = RTCritSectEnter(&pEpClass->CritSect); 605 AssertRC(rc); 606 607 pBwMgr = pEpClass->pBwMgrsHead; 608 while ( pBwMgr 609 && RTStrCmp(pBwMgr->pszId, pcszId)) 610 pBwMgr = pBwMgr->pNext; 611 612 rc = RTCritSectLeave(&pEpClass->CritSect); 613 AssertRC(rc); 614 } 615 616 return pBwMgr; 617 } 618 619 static void pdmacBwMgrLink(PPDMACBWMGR pBwMgr) 620 { 621 PPDMASYNCCOMPLETIONEPCLASS pEpClass = pBwMgr->pEpClass; 622 int rc; 623 624 rc = RTCritSectEnter(&pEpClass->CritSect); 625 AssertRC(rc); 626 627 pBwMgr->pNext = pEpClass->pBwMgrsHead; 628 pEpClass->pBwMgrsHead = pBwMgr; 629 630 rc = RTCritSectLeave(&pEpClass->CritSect); 631 AssertRC(rc); 632 } 633 634 static void pdmacBwMgrUnlink(PPDMACBWMGR pBwMgr) 635 { 636 int rc; 637 PPDMASYNCCOMPLETIONEPCLASS pEpClass = pBwMgr->pEpClass; 638 639 rc = RTCritSectEnter(&pEpClass->CritSect); 640 AssertRC(rc); 641 642 if (pBwMgr == pEpClass->pBwMgrsHead) 643 pEpClass->pBwMgrsHead = pBwMgr->pNext; 644 else 645 { 646 PPDMACBWMGR pPrev = pEpClass->pBwMgrsHead; 647 while ( pPrev 648 && pPrev->pNext != pBwMgr) 649 pPrev = pPrev->pNext; 650 651 AssertPtr(pPrev); 652 pPrev->pNext = pBwMgr->pNext; 653 } 654 655 rc = RTCritSectLeave(&pEpClass->CritSect); 656 AssertRC(rc); 657 } 658 659 static int pdmacAsyncCompletionBwMgrCreate(PPDMASYNCCOMPLETIONEPCLASS pEpClass, const char *pcszBwMgr, uint32_t cbTransferPerSecMax, 660 uint32_t cbTransferPerSecStart, uint32_t cbTransferPerSecStep) 661 { 662 int rc = VINF_SUCCESS; 663 PPDMACBWMGR pBwMgr; 664 665 LogFlowFunc(("pEpClass=%#p pcszBwMgr=%#p{%s} cbTransferPerSecMax=%u cbTransferPerSecStart=%u cbTransferPerSecStep=%u\n", 666 pEpClass, pcszBwMgr, cbTransferPerSecMax, cbTransferPerSecStart, cbTransferPerSecStep)); 667 668 AssertPtrReturn(pEpClass, VERR_INVALID_POINTER); 669 AssertPtrReturn(pcszBwMgr, VERR_INVALID_POINTER); 670 AssertReturn(*pcszBwMgr != '\0', VERR_INVALID_PARAMETER); 671 672 pBwMgr = pdmacBwMgrFindById(pEpClass, pcszBwMgr); 673 if (!pBwMgr) 674 { 675 rc = MMR3HeapAllocZEx(pEpClass->pVM, MM_TAG_PDM_ASYNC_COMPLETION, 676 sizeof(PDMACBWMGR), 677 (void **)&pBwMgr); 678 if (RT_SUCCESS(rc)) 679 { 680 pBwMgr->pszId = RTStrDup(pcszBwMgr); 681 if (pBwMgr->pszId) 682 { 683 pBwMgr->pEpClass = pEpClass; 684 pBwMgr->cRefs = 0; 685 686 /* Init I/O flow control. */ 687 pBwMgr->cbTransferPerSecMax = cbTransferPerSecMax; 688 pBwMgr->cbTransferPerSecStart = cbTransferPerSecStart; 689 pBwMgr->cbTransferPerSecStep = cbTransferPerSecStep; 690 691 pBwMgr->cbTransferAllowed = pBwMgr->cbTransferPerSecStart; 692 pBwMgr->tsUpdatedLast = RTTimeSystemNanoTS(); 693 694 pdmacBwMgrLink(pBwMgr); 695 rc = VINF_SUCCESS; 696 } 697 else 698 { 699 rc = VERR_NO_MEMORY; 700 MMR3HeapFree(pBwMgr); 701 } 702 } 703 } 704 else 705 rc = VERR_ALREADY_EXISTS; 706 707 LogFlowFunc(("returns rc=%Rc\n", rc)); 708 return rc; 709 } 710 711 DECLINLINE(void) pdmacBwMgrRef(PPDMACBWMGR pBwMgr) 712 { 713 ASMAtomicIncU32(&pBwMgr->cRefs); 714 } 715 716 DECLINLINE(void) pdmacBwMgrUnref(PPDMACBWMGR pBwMgr) 717 { 718 Assert(pBwMgr->cRefs > 0); 719 ASMAtomicDecU32(&pBwMgr->cRefs); 720 } 721 722 bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cbTransfer, RTMSINTERVAL *pmsWhenNext) 723 { 724 bool fAllowed = true; 725 PPDMACBWMGR pBwMgr = ASMAtomicReadPtrT(&pEndpoint->pBwMgr, PPDMACBWMGR); 726 727 LogFlowFunc(("pEndpoint=%p pBwMgr=%p cbTransfer=%u\n", pEndpoint, pBwMgr, cbTransfer)); 728 729 if (pBwMgr) 730 { 731 uint32_t cbOld = ASMAtomicSubU32(&pBwMgr->cbTransferAllowed, cbTransfer); 732 if (RT_LIKELY(cbOld >= cbTransfer)) 733 fAllowed = true; 734 else 735 { 736 fAllowed = false; 737 738 /* We are out of ressources Check if we can update again. */ 739 uint64_t tsNow = RTTimeSystemNanoTS(); 740 uint64_t tsUpdatedLast = ASMAtomicUoReadU64(&pBwMgr->tsUpdatedLast); 741 742 if (tsNow - tsUpdatedLast >= (1000*1000*1000)) 743 { 744 if (ASMAtomicCmpXchgU64(&pBwMgr->tsUpdatedLast, tsNow, tsUpdatedLast)) 745 { 746 if (pBwMgr->cbTransferPerSecStart < pBwMgr->cbTransferPerSecMax) 747 { 748 pBwMgr->cbTransferPerSecStart = RT_MIN(pBwMgr->cbTransferPerSecMax, pBwMgr->cbTransferPerSecStart + pBwMgr->cbTransferPerSecStep); 749 LogFlow(("AIOMgr: Increasing maximum bandwidth to %u bytes/sec\n", pBwMgr->cbTransferPerSecStart)); 750 } 751 752 /* Update */ 753 ASMAtomicWriteU32(&pBwMgr->cbTransferAllowed, pBwMgr->cbTransferPerSecStart - cbTransfer); 754 fAllowed = true; 755 LogFlow(("AIOMgr: Refreshed bandwidth\n")); 756 } 757 } 758 else 759 { 760 ASMAtomicAddU32(&pBwMgr->cbTransferAllowed, cbTransfer); 761 *pmsWhenNext = ((1000*1000*1000) - (tsNow - tsUpdatedLast)) / (1000*1000); 762 } 763 } 764 } 765 766 LogFlowFunc(("fAllowed=%RTbool\n", fAllowed)); 767 768 return fAllowed; 769 } 770 567 771 void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, bool fCallCompletionHandler) 568 772 { … … 649 853 if (RT_SUCCESS(rc)) 650 854 { 651 PUVM pUVM = pVM->pUVM; 652 AssertMsg(!pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType], 653 ("Endpoint class was already initialized\n")); 654 655 pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType] = pEndpointClass; 656 LogFlowFunc((": Initialized endpoint class \"%s\" rc=%Rrc\n", pEpClassOps->pcszName, rc)); 657 return VINF_SUCCESS; 855 /* Create all bandwidth groups for resource control. */ 856 PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNodeClass, "BwGroups"); 857 858 if (pCfgBwGrp) 859 { 860 for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur)) 861 { 862 uint32_t cbMax, cbStart, cbStep; 863 size_t cchName = CFGMR3GetNameLen(pCur) + 1; 864 char *pszBwGrpId = (char *)RTMemAllocZ(cchName); 865 866 if (!pszBwGrpId) 867 { 868 rc = VERR_NO_MEMORY; 869 break; 870 } 871 872 rc = CFGMR3GetName(pCur, pszBwGrpId, cchName); 873 AssertRC(rc); 874 875 if (RT_SUCCESS(rc)) 876 rc = CFGMR3QueryU32(pCur, "Start", &cbStart); 877 if (RT_SUCCESS(rc)) 878 rc = CFGMR3QueryU32(pCur, "Max", &cbMax); 879 if (RT_SUCCESS(rc)) 880 rc = CFGMR3QueryU32(pCur, "Step", &cbStep); 881 if (RT_SUCCESS(rc)) 882 rc = pdmacAsyncCompletionBwMgrCreate(pEndpointClass, pszBwGrpId, cbMax, cbStart, cbStep); 883 884 RTMemFree(pszBwGrpId); 885 886 if (RT_FAILURE(rc)) 887 break; 888 } 889 } 890 891 if (RT_SUCCESS(rc)) 892 { 893 PUVM pUVM = pVM->pUVM; 894 AssertMsg(!pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType], 895 ("Endpoint class was already initialized\n")); 896 897 pUVM->pdm.s.apAsyncCompletionEndpointClass[pEpClassOps->enmClassType] = pEndpointClass; 898 LogFlowFunc((": Initialized endpoint class \"%s\" rc=%Rrc\n", pEpClassOps->pcszName, rc)); 899 return VINF_SUCCESS; 900 } 658 901 } 659 902 RTMemCacheDestroy(pEndpointClass->hMemCacheTasks); … … 686 929 PDMR3AsyncCompletionEpClose(pEndpointClass->pEndpointsHead); 687 930 931 /* Destroy the bandwidth managers. */ 932 PPDMACBWMGR pBwMgr = pEndpointClass->pBwMgrsHead; 933 while (pBwMgr) 934 { 935 PPDMACBWMGR pFree = pBwMgr; 936 pBwMgr = pBwMgr->pNext; 937 MMR3HeapFree(pFree); 938 } 939 688 940 /* Call the termination callback of the class. */ 689 941 pEndpointClass->pEndpointOps->pfnTerminate(pEndpointClass); … … 738 990 739 991 return VINF_SUCCESS; 992 } 993 994 /** 995 * Resume worker for the async completion manager. 996 * 997 * @returns nothing. 998 * @param pVM Pointer to the shared VM structure. 999 */ 1000 void pdmR3AsyncCompletionResume(PVM pVM) 1001 { 1002 LogFlowFunc((": pVM=%p\n", pVM)); 1003 PUVM pUVM = pVM->pUVM; 1004 1005 /* Log the bandwidth groups and all assigned endpoints. */ 1006 for (size_t i = 0; i < RT_ELEMENTS(pUVM->pdm.s.apAsyncCompletionEndpointClass); i++) 1007 if (pUVM->pdm.s.apAsyncCompletionEndpointClass[i]) 1008 { 1009 PPDMASYNCCOMPLETIONEPCLASS pEpClass = pUVM->pdm.s.apAsyncCompletionEndpointClass[i]; 1010 PPDMASYNCCOMPLETIONENDPOINT pEp; 1011 PPDMACBWMGR pBwMgr = pEpClass->pBwMgrsHead; 1012 1013 if (pBwMgr) 1014 LogRel(("AIOMgr: Bandwidth groups for class '%s'\n", i == PDMASYNCCOMPLETIONEPCLASSTYPE_FILE 1015 ? "File" : "<Unknown>")); 1016 1017 while (pBwMgr) 1018 { 1019 LogRel(("AIOMgr: Id: %s\n", pBwMgr->pszId)); 1020 LogRel(("AIOMgr: Max: %u B/s\n", pBwMgr->cbTransferPerSecMax)); 1021 LogRel(("AIOMgr: Start: %u B/s\n", pBwMgr->cbTransferPerSecStart)); 1022 LogRel(("AIOMgr: Step: %u B/s\n", pBwMgr->cbTransferPerSecStep)); 1023 LogRel(("AIOMgr: Endpoints:\n")); 1024 1025 pEp = pEpClass->pEndpointsHead; 1026 while (pEp) 1027 { 1028 if (pEp->pBwMgr == pBwMgr) 1029 LogRel(("AIOMgr: %s\n", pEp->pszUri)); 1030 1031 pEp = pEp->pNext; 1032 } 1033 1034 pBwMgr = pBwMgr->pNext; 1035 } 1036 1037 /* Print all endpoints without assigned bandwidth groups. */ 1038 pEp = pEpClass->pEndpointsHead; 1039 if (pEp) 1040 LogRel(("AIOMgr: Endpoints without assigned bandwidth groups:\n")); 1041 1042 while (pEp) 1043 { 1044 if (!pEp->pBwMgr) 1045 LogRel(("AIOMgr: %s\n", pEp->pszUri)); 1046 1047 pEp = pEp->pNext; 1048 } 1049 } 740 1050 } 741 1051 … … 914 1224 pEndpoint->pszUri = RTStrDup(pszFilename); 915 1225 pEndpoint->cUsers = 1; 1226 pEndpoint->pBwMgr = NULL; 916 1227 917 1228 #ifdef VBOX_WITH_STATISTICS … … 1243 1554 } 1244 1555 1556 VMMR3DECL(int) PDMR3AsyncCompletionEpSetBwMgr(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, 1557 const char *pcszBwMgr) 1558 { 1559 int rc = VINF_SUCCESS; 1560 AssertReturn(VALID_PTR(pEndpoint), VERR_INVALID_POINTER); 1561 PPDMACBWMGR pBwMgrOld = NULL; 1562 PPDMACBWMGR pBwMgrNew = NULL; 1563 1564 if (pcszBwMgr) 1565 { 1566 pBwMgrNew = pdmacBwMgrFindById(pEndpoint->pEpClass, pcszBwMgr); 1567 1568 if (pBwMgrNew) 1569 pdmacBwMgrRef(pBwMgrNew); 1570 else 1571 rc = VERR_NOT_FOUND; 1572 } 1573 1574 if (RT_SUCCESS(rc)) 1575 { 1576 pBwMgrOld = ASMAtomicXchgPtrT(&pEndpoint->pBwMgr, pBwMgrNew, PPDMACBWMGR); 1577 1578 if (pBwMgrOld) 1579 pdmacBwMgrUnref(pBwMgrOld); 1580 } 1581 1582 return rc; 1583 } 1584 1245 1585 VMMR3DECL(int) PDMR3AsyncCompletionTaskCancel(PPDMASYNCCOMPLETIONTASK pTask) 1246 1586 { -
trunk/src/VBox/VMM/PDMAsyncCompletionFile.cpp
r31182 r33218 467 467 } 468 468 469 static int pdmacFileBwMgrInitialize(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile,470 PCFGMNODE pCfgNode, PPPDMACFILEBWMGR ppBwMgr)471 {472 int rc = VINF_SUCCESS;473 PPDMACFILEBWMGR pBwMgr = NULL;474 475 rc = MMR3HeapAllocZEx(pEpClassFile->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION,476 sizeof(PDMACFILEBWMGR),477 (void **)&pBwMgr);478 if (RT_SUCCESS(rc))479 {480 /* Init I/O flow control. */481 rc = CFGMR3QueryU32Def(pCfgNode, "VMTransferPerSecMax", &pBwMgr->cbVMTransferPerSecMax, UINT32_MAX);482 AssertLogRelRCReturn(rc, rc);483 rc = CFGMR3QueryU32Def(pCfgNode, "VMTransferPerSecStart", &pBwMgr->cbVMTransferPerSecStart, UINT32_MAX /*5 * _1M*/);484 AssertLogRelRCReturn(rc, rc);485 rc = CFGMR3QueryU32Def(pCfgNode, "VMTransferPerSecStep", &pBwMgr->cbVMTransferPerSecStep, _1M);486 AssertLogRelRCReturn(rc, rc);487 488 pBwMgr->cbVMTransferAllowed = pBwMgr->cbVMTransferPerSecStart;489 pBwMgr->tsUpdatedLast = RTTimeSystemNanoTS();490 491 if (pBwMgr->cbVMTransferPerSecMax != UINT32_MAX)492 LogRel(("AIOMgr: I/O bandwidth limited to %u bytes/sec\n", pBwMgr->cbVMTransferPerSecMax));493 else494 LogRel(("AIOMgr: I/O bandwidth not limited\n"));495 496 *ppBwMgr = pBwMgr;497 }498 499 return rc;500 }501 502 static void pdmacFileBwMgrDestroy(PPDMACFILEBWMGR pBwMgr)503 {504 MMR3HeapFree(pBwMgr);505 }506 507 static void pdmacFileBwRef(PPDMACFILEBWMGR pBwMgr)508 {509 pBwMgr->cRefs++;510 }511 512 static void pdmacFileBwUnref(PPDMACFILEBWMGR pBwMgr)513 {514 Assert(pBwMgr->cRefs > 0);515 pBwMgr->cRefs--;516 }517 518 bool pdmacFileBwMgrIsTransferAllowed(PPDMACFILEBWMGR pBwMgr, uint32_t cbTransfer)519 {520 bool fAllowed = true;521 522 LogFlowFunc(("pBwMgr=%p cbTransfer=%u\n", pBwMgr, cbTransfer));523 524 if (pBwMgr->cbVMTransferPerSecMax != UINT32_MAX) /* No need to check if bandwidth is unlimited. */525 {526 uint32_t cbOld = ASMAtomicSubU32(&pBwMgr->cbVMTransferAllowed, cbTransfer);527 if (RT_LIKELY(cbOld >= cbTransfer))528 fAllowed = true;529 else530 {531 fAllowed = false;532 533 /* We are out of ressources Check if we can update again. */534 uint64_t tsNow = RTTimeSystemNanoTS();535 uint64_t tsUpdatedLast = ASMAtomicUoReadU64(&pBwMgr->tsUpdatedLast);536 537 if (tsNow - tsUpdatedLast >= (1000*1000*1000))538 {539 if (ASMAtomicCmpXchgU64(&pBwMgr->tsUpdatedLast, tsNow, tsUpdatedLast))540 {541 if (pBwMgr->cbVMTransferPerSecStart < pBwMgr->cbVMTransferPerSecMax)542 {543 pBwMgr->cbVMTransferPerSecStart = RT_MIN(pBwMgr->cbVMTransferPerSecMax, pBwMgr->cbVMTransferPerSecStart + pBwMgr->cbVMTransferPerSecStep);544 LogFlow(("AIOMgr: Increasing maximum bandwidth to %u bytes/sec\n", pBwMgr->cbVMTransferPerSecStart));545 }546 547 /* Update */548 ASMAtomicWriteU32(&pBwMgr->cbVMTransferAllowed, pBwMgr->cbVMTransferPerSecStart - cbTransfer);549 fAllowed = true;550 LogFlow(("AIOMgr: Refreshed bandwidth\n"));551 }552 }553 else554 ASMAtomicAddU32(&pBwMgr->cbVMTransferAllowed, cbTransfer);555 }556 }557 558 LogFlowFunc(("fAllowed=%RTbool\n", fAllowed));559 560 return fAllowed;561 }562 563 469 static int pdmacFileMgrTypeFromName(const char *pszVal, PPDMACEPFILEMGRTYPE penmMgrType) 564 470 { … … 763 669 else 764 670 LogRel(("AIOMgr: Cache was globally disabled\n")); 765 766 rc = pdmacFileBwMgrInitialize(pEpClassFile, pCfgNode, &pEpClassFile->pBwMgr);767 if (RT_FAILURE(rc))768 RTCritSectDelete(&pEpClassFile->CritSect);769 671 } 770 672 … … 788 690 789 691 RTCritSectDelete(&pEpClassFile->CritSect); 790 pdmacFileBwMgrDestroy(pEpClassFile->pBwMgr);791 692 } 792 693 … … 921 822 pEpFile->pTasksFreeTail = pEpFile->pTasksFreeHead; 922 823 pEpFile->cTasksCached = 0; 923 pEpFile->pBwMgr = pEpClassFile->pBwMgr;924 824 pEpFile->enmBackendType = enmEpBackend; 925 825 /* … … 932 832 pEpFile->fAsyncFlushSupported = false; 933 833 #endif 934 pdmacFileBwRef(pEpFile->pBwMgr);935 834 936 835 if (enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE) … … 984 883 RTMemFree(pEpFile->AioMgr.pTreeRangesLocked); 985 884 MMR3HeapFree(pEpFile->pTasksFreeHead); 986 pdmacFileBwUnref(pEpFile->pBwMgr);987 885 } 988 886 } … … 1062 960 MMR3HeapFree(pTaskFree); 1063 961 } 1064 1065 /* Remove from the bandwidth manager */1066 pdmacFileBwUnref(pEpFile->pBwMgr);1067 962 1068 963 /* Destroy the locked ranges tree now. */ -
trunk/src/VBox/VMM/PDMAsyncCompletionFileInternal.h
r32655 r33218 211 211 212 212 /** 213 * Bandwidth control manager instance data214 */215 typedef struct PDMACFILEBWMGR216 {217 /** Maximum number of bytes the VM is allowed to transfer (Max is 4GB/s) */218 uint32_t cbVMTransferPerSecMax;219 /** Number of bytes we start with */220 uint32_t cbVMTransferPerSecStart;221 /** Step after each update */222 uint32_t cbVMTransferPerSecStep;223 /** Number of bytes we are allowed to transfer till the next update.224 * Reset by the refresh timer. */225 volatile uint32_t cbVMTransferAllowed;226 /** Timestamp of the last update */227 volatile uint64_t tsUpdatedLast;228 /** Reference counter - How many endpoints are associated with this manager. */229 uint32_t cRefs;230 } PDMACFILEBWMGR;231 /** Pointer to a bandwidth control manager */232 typedef PDMACFILEBWMGR *PPDMACFILEBWMGR;233 /** Pointer to a bandwidth control manager pointer */234 typedef PPDMACFILEBWMGR *PPPDMACFILEBWMGR;235 236 /**237 213 * A file access range lock. 238 214 */ … … 459 435 /** Flag whether the out of resources warning was printed already. */ 460 436 bool fOutOfResourcesWarningPrinted; 461 /** The global bandwidth control manager */462 PPDMACFILEBWMGR pBwMgr;463 437 } PDMASYNCCOMPLETIONEPCLASSFILE; 464 438 /** Pointer to the endpoint class data. */ … … 547 521 /** Cache of endpoint data. */ 548 522 PDMACFILEENDPOINTCACHE DataCache; 549 /** Pointer to the associated bandwidth control manager */550 PPDMACFILEBWMGR pBwMgr;551 523 552 524 /** Flag whether a flush request is currently active */ … … 714 686 void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc); 715 687 716 bool pdmacFileBwMgrIsTransferAllowed(PPDMACFILEBWMGR pBwMgr, uint32_t cbTransfer);717 718 688 int pdmacFileCacheInit(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile, PCFGMNODE pCfgNode); 719 689 void pdmacFileCacheDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile); -
trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp
r32467 r33218 974 974 && RT_SUCCESS(rc)) 975 975 { 976 RTMSINTERVAL msWhenNext; 976 977 PPDMACTASKFILE pCurr = pTaskHead; 977 978 978 if (!pdmac FileBwMgrIsTransferAllowed(pEndpoint->pBwMgr, (uint32_t)pCurr->DataSeg.cbSeg))979 if (!pdmacEpIsTransferAllowed(&pEndpoint->Core, (uint32_t)pCurr->DataSeg.cbSeg, &msWhenNext)) 979 980 { 980 981 pAioMgr->fBwLimitReached = true; -
trunk/src/VBox/VMM/PDMAsyncCompletionInternal.h
r29496 r33218 161 161 #define PDMAC_EPCLASS_OPS_VERSION 0x00000001 162 162 163 /** Pointer to a bandwidth control manager. */ 164 typedef struct PDMACBWMGR *PPDMACBWMGR; 165 163 166 /** 164 167 * PDM Async completion endpoint class. … … 169 172 /** Pointer to the shared VM structure. */ 170 173 PVM pVM; 171 /** Critical section protecting the endpoint list. */174 /** Critical section protecting the lists below. */ 172 175 RTCRITSECT CritSect; 173 176 /** Number of endpoints in the list. */ … … 175 178 /** Head of endpoints with this class. */ 176 179 R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pEndpointsHead; 180 /** Head of the bandwidth managers for this class. */ 181 R3PTRTYPE(PPDMACBWMGR) pBwMgrsHead; 177 182 /** Pointer to the callback table. */ 178 183 R3PTRTYPE(PCPDMASYNCCOMPLETIONEPCLASSOPS) pEndpointOps; … … 205 210 /** URI describing the endpoint */ 206 211 char *pszUri; 212 /** Pointer to the assigned bandwidth manager. */ 213 volatile PPDMACBWMGR pBwMgr; 207 214 #ifdef VBOX_WITH_STATISTICS 208 215 STAMCOUNTER StatTaskRunTimesNs[10]; … … 252 259 void pdmR3AsyncCompletionCompleteTask(PPDMASYNCCOMPLETIONTASK pTask, int rc, bool fCallCompletionHandler); 253 260 261 /** 262 * Checks if the endpoint is allowed to transfer the given amount of bytes. 263 * 264 * @returns true if the endpoint is allowed to transfer the data. 265 * false otherwise 266 * @param pEndpoint The endpoint. 267 * @param cbTransfer The number of bytes to transfer. 268 * @param pmsWhenNext Where to store the number of milliseconds 269 * until the bandwidth is refreshed. 270 * Only set if false is returned. 271 */ 272 bool pdmacEpIsTransferAllowed(PPDMASYNCCOMPLETIONENDPOINT pEndpoint, uint32_t cbTransfer, RTMSINTERVAL *pmsWhenNext); 273 254 274 RT_C_DECLS_END 255 275
Note:
See TracChangeset
for help on using the changeset viewer.