Changeset 43090 in vbox for trunk/src/VBox/Devices
- Timestamp:
- Aug 30, 2012 9:11:57 AM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 80450
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DevE1000.cpp
r42954 r43090 1107 1107 /** RX: Fetched RX descriptors. */ 1108 1108 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE]; 1109 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE]; 1109 1110 /** RX: Actual number of fetched RX descriptors. */ 1110 1111 uint32_t nRxDFetched; … … 1551 1552 #define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc) 1552 1553 #define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx) 1554 #define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx) 1553 1555 1554 1556 #ifndef E1K_WITH_TX_CS … … 1617 1619 #endif /* E1K_WITH_TXD_CACHE */ 1618 1620 #ifdef E1K_WITH_RXD_CACHE 1619 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY); 1620 if (RT_LIKELY(rc == VINF_SUCCESS)) 1621 if (RT_LIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS)) 1621 1622 { 1622 1623 pState->iRxDCurrent = pState->nRxDFetched = 0; … … 1913 1914 } 1914 1915 1916 /** 1917 * Advance the head pointer of the receive descriptor queue. 1918 * 1919 * @remarks RDH always points to the next available RX descriptor. 1920 * 1921 * @param pState The device state structure. 1922 */ 1923 DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState) 1924 { 1925 Assert(e1kCsRxIsOwner(pState)); 1926 //e1kCsEnter(pState, RT_SRC_POS); 1927 if (++RDH * sizeof(E1KRXDESC) >= RDLEN) 1928 RDH = 0; 1929 /* 1930 * Compute current receive queue length and fire RXDMT0 interrupt 1931 * if we are low on receive buffers 1932 */ 1933 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH; 1934 /* 1935 * The minimum threshold is controlled by RDMTS bits of RCTL: 1936 * 00 = 1/2 of RDLEN 1937 * 01 = 1/4 of RDLEN 1938 * 10 = 1/8 of RDLEN 1939 * 11 = reserved 1940 */ 1941 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS)); 1942 if (uRQueueLen <= uMinRQThreshold) 1943 { 1944 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold)); 1945 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n", 1946 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold)); 1947 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0); 1948 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0); 1949 } 1950 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n", 1951 INSTANCE(pState), RDH, RDT, uRQueueLen)); 1952 //e1kCsLeave(pState); 1953 } 1954 1915 1955 #ifdef E1K_WITH_RXD_CACHE 1916 1956 /** … … 1976 2016 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC), 1977 2017 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC)); 2018 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL; 2019 // unsigned i, j; 2020 // for (i = pState->nRxDFetched; i < pState->nRxDFetched + nDescsInSingleRead; ++i) 2021 // { 2022 // pState->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pState->nRxDFetched) * sizeof(E1KRXDESC); 2023 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i])); 2024 // } 1978 2025 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n", 1979 2026 INSTANCE(pState), nDescsInSingleRead, … … 1986 2033 pFirstEmptyDesc + nDescsInSingleRead, 1987 2034 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC)); 2035 // Assert(i == pState->nRxDFetched + nDescsInSingleRead); 2036 // for (j = 0; i < pState->nRxDFetched + nDescsToFetch; ++i, ++j) 2037 // { 2038 // pState->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC); 2039 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i])); 2040 // } 1988 2041 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n", 1989 2042 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead, … … 1994 2047 } 1995 2048 2049 /** 2050 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the 2051 * RX ring if the cache is empty. 2052 * 2053 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will 2054 * go out of sync with RDH which will cause trouble when EMT checks if the 2055 * cache is empty to do pre-fetch @bugref(6217). 2056 * 2057 * @param pState The device state structure. 2058 * @thread RX 2059 */ 1996 2060 DECLINLINE(E1KRXDESC*) e1kRxDGet(E1KSTATE* pState) 1997 2061 { 2062 Assert(e1kCsRxIsOwner(pState)); 1998 2063 /* Check the cache first. */ 1999 2064 if (pState->iRxDCurrent < pState->nRxDFetched) 2000 return &pState->aRxDescriptors[pState->iRxDCurrent ++];2065 return &pState->aRxDescriptors[pState->iRxDCurrent]; 2001 2066 /* Cache is empty, reset it and check if we can fetch more. */ 2002 2067 pState->iRxDCurrent = pState->nRxDFetched = 0; 2003 2068 if (e1kRxDPrefetch(pState)) 2004 return &pState->aRxDescriptors[pState->iRxDCurrent ++];2069 return &pState->aRxDescriptors[pState->iRxDCurrent]; 2005 2070 /* Out of Rx descriptors. */ 2006 2071 return NULL; 2007 2072 } 2008 #endif /* E1K_WITH_RXD_CACHE */ 2009 2010 /** 2011 * Advance the head pointer of the receive descriptor queue. 2012 * 2013 * @remarks RDH always points to the next available RX descriptor. 2073 2074 /** 2075 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache 2076 * pointer. The descriptor gets written back to the RXD ring. 2014 2077 * 2015 2078 * @param pState The device state structure. 2016 */ 2017 DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState) 2018 { 2019 //e1kCsEnter(pState, RT_SRC_POS); 2020 if (++RDH * sizeof(E1KRXDESC) >= RDLEN) 2021 RDH = 0; 2022 /* 2023 * Compute current receive queue length and fire RXDMT0 interrupt 2024 * if we are low on receive buffers 2025 */ 2026 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH; 2027 /* 2028 * The minimum threshold is controlled by RDMTS bits of RCTL: 2029 * 00 = 1/2 of RDLEN 2030 * 01 = 1/4 of RDLEN 2031 * 10 = 1/8 of RDLEN 2032 * 11 = reserved 2033 */ 2034 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS)); 2035 if (uRQueueLen <= uMinRQThreshold) 2036 { 2037 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold)); 2038 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n", 2039 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold)); 2040 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0); 2041 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0); 2042 } 2043 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n", 2044 INSTANCE(pState), RDH, RDT, uRQueueLen)); 2045 //e1kCsLeave(pState); 2046 } 2047 2048 #ifndef E1K_WITH_RXD_CACHE 2079 * @param pDesc The descriptor being "returned" to the RX ring. 2080 * @thread RX 2081 */ 2082 DECLINLINE(void) e1kRxDPut(E1KSTATE* pState, E1KRXDESC* pDesc) 2083 { 2084 Assert(e1kCsRxIsOwner(pState)); 2085 pState->iRxDCurrent++; 2086 // Assert(pDesc >= pState->aRxDescriptors); 2087 // Assert(pDesc < pState->aRxDescriptors + E1K_RXD_CACHE_SIZE); 2088 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH); 2089 // uint32_t rdh = RDH; 2090 // Assert(pState->aRxDescAddr[pDesc - pState->aRxDescriptors] == addr); 2091 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), 2092 e1kDescAddr(RDBAH, RDBAL, RDH), 2093 pDesc, sizeof(E1KRXDESC)); 2094 e1kAdvanceRDH(pState); 2095 e1kPrintRDesc(pState, pDesc); 2096 } 2097 2098 /** 2099 * Store a fragment of received packet at the specifed address. 2100 * 2101 * @param pState The device state structure. 2102 * @param pDesc The next available RX descriptor. 2103 * @param pvBuf The fragment. 2104 * @param cb The size of the fragment. 2105 */ 2106 static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb) 2107 { 2108 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a); 2109 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", 2110 INSTANCE(pState), cb, pDesc->u64BufAddr, pDesc->status.fEOP)); 2111 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb); 2112 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb); 2113 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a); 2114 } 2115 2116 #else /* !E1K_WITH_RXD_CACHE */ 2117 2049 2118 /** 2050 2119 * Store a fragment of received packet that fits into the next available RX … … 2095 2164 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a); 2096 2165 } 2097 #else /* E1K_WITH_RXD_CACHE */ 2098 /** 2099 * Store a fragment of received packet at the specifed address. 2100 * 2101 * @param pState The device state structure. 2102 * @param pDesc The next available RX descriptor. 2103 * @param pvBuf The fragment. 2104 * @param cb The size of the fragment. 2105 */ 2106 static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb) 2107 { 2108 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a); 2109 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", 2110 INSTANCE(pState), cb, pDesc->u64BufAddr, pDesc->status.fEOP)); 2111 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb); 2112 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb); 2113 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a); 2114 } 2115 #endif /* E1K_WITH_RXD_CACHE */ 2166 #endif /* !E1K_WITH_RXD_CACHE */ 2116 2167 2117 2168 /** … … 2362 2413 /* Write back the descriptor. */ 2363 2414 pDesc->status.fDD = true; 2364 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), 2365 e1kDescAddr(RDBAH, RDBAL, RDH), 2366 pDesc, sizeof(E1KRXDESC)); 2367 e1kAdvanceRDH(pState); 2368 e1kPrintRDesc(pState, pDesc); 2415 e1kRxDPut(pState, pDesc); 2369 2416 #ifndef E1K_WITH_RXD_CACHE 2370 2417 } … … 7029 7076 e1kRDescInfo(pState, pHlp, e1kDescAddr(RDBAH, RDBAL, i), &desc); 7030 7077 } 7078 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n", 7079 pState->iRxDCurrent, RDH, pState->nRxDFetched, E1K_RXD_CACHE_SIZE); 7080 int rdh = RDH; 7081 for (i = pState->iRxDCurrent; i < pState->nRxDFetched; ++i) 7082 e1kRDescInfo(pState, pHlp, e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs), &pState->aRxDescriptors[i]); 7083 7031 7084 cDescs = TDLEN / sizeof(E1KTXDESC); 7032 7085 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
Note:
See TracChangeset
for help on using the changeset viewer.