Changeset 40985 in vbox for trunk/src/VBox/Devices
- Timestamp:
- Apr 19, 2012 9:18:02 AM (13 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DevE1000.cpp
r40799 r40985 43 43 //#define E1K_USE_SUPLIB_SEMEVENT 44 44 //#define E1K_WITH_MSI 45 #define E1K_WITH_TXD_CACHE 1 46 47 #ifdef E1K_WITH_TXD_CACHE 48 #define E1K_TXD_CACHE_SIZE 16u 49 #endif /* E1K_WITH_TXD_CACHE */ 45 50 46 51 #include <iprt/crc.h> … … 1018 1023 /** TX: Context used for ordinary packets. */ 1019 1024 E1KTXCTX contextNormal; 1025 #ifdef E1K_WITH_TXD_CACHE 1026 /** EMT/TX: Fetched TX descriptors. */ 1027 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE]; 1028 /** EMT/TX: Actual number of fetched TX descriptors. */ 1029 uint8_t nTxDFetched; 1030 /** EMT/TX: Index in cache of TX descriptor being processed. */ 1031 uint8_t iTxDCurrent; 1032 /** EMT/TX: Will this frame be sent as GSO. */ 1033 bool fGSO; 1034 /** EMT/TX: Number of bytes in next packet. */ 1035 uint32_t cbTxAlloc; 1036 1037 #endif /* E1K_WITH_TXD_CACHE */ 1020 1038 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not 1021 1039 * applicable to the current TSE mode. */ … … 3102 3120 } 3103 3121 3104 /** 3105 * Allocates a xmit buffer. 3106 * 3107 * Presently this will always return a buffer. Later on we'll have a 3108 * out-of-buffer mechanism in place where the driver calls us back when buffers 3109 * becomes available. 3122 #ifndef E1K_WITH_TXD_CACHE 3123 /** 3124 * Allocates an xmit buffer. 3110 3125 * 3111 3126 * @returns See PDMINETWORKUP::pfnAllocBuf. … … 3162 3177 return VINF_SUCCESS; 3163 3178 } 3179 #else /* E1K_WITH_TXD_CACHE */ 3180 /** 3181 * Allocates an xmit buffer. 3182 * 3183 * @returns See PDMINETWORKUP::pfnAllocBuf. 3184 * @param pState The device state structure. 3185 * @param cbMin The minimum frame size. 3186 * @param fExactSize Whether cbMin is exact or if we have to max it 3187 * out to the max MTU size. 3188 * @param fGso Whether this is a GSO frame or not. 3189 */ 3190 DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, bool fGso) 3191 { 3192 /* Deal with existing buffer (descriptor screw up, reset, etc). */ 3193 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg))) 3194 e1kXmitFreeBuf(pState); 3195 Assert(pState->CTX_SUFF(pTxSg) == NULL); 3196 3197 /* 3198 * Allocate the buffer. 3199 */ 3200 PPDMSCATTERGATHER pSg; 3201 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR)) 3202 { 3203 Assert(pState->cbTxAlloc != 0); 3204 if (pState->cbTxAlloc == 0) 3205 return VERR_NET_IO_ERROR; 3206 3207 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv); 3208 if (RT_UNLIKELY(!pDrv)) 3209 return VERR_NET_DOWN; 3210 int rc = pDrv->pfnAllocBuf(pDrv, pState->cbTxAlloc, fGso ? &pState->GsoCtx : NULL, &pSg); 3211 if (RT_FAILURE(rc)) 3212 { 3213 /* Suspend TX as we are out of buffers atm */ 3214 STATUS |= STATUS_TXOFF; 3215 return rc; 3216 } 3217 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n", 3218 INSTANCE(pState), pState->cbTxAlloc, 3219 pState->fVTag ? "VLAN " : "", 3220 pState->fGSO ? "GSO " : "")); 3221 pState->cbTxAlloc = 0; 3222 } 3223 else 3224 { 3225 /* Create a loopback using the fallback buffer and preallocated SG. */ 3226 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t)); 3227 pSg = &pState->uTxFallback.Sg; 3228 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3; 3229 pSg->cbUsed = 0; 3230 pSg->cbAvailable = 0; 3231 pSg->pvAllocator = pState; 3232 pSg->pvUser = NULL; /* No GSO here. */ 3233 pSg->cSegs = 1; 3234 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback; 3235 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback); 3236 } 3237 3238 pState->CTX_SUFF(pTxSg) = pSg; 3239 return VINF_SUCCESS; 3240 } 3241 #endif /* E1K_WITH_TXD_CACHE */ 3164 3242 3165 3243 /** … … 3180 3258 } 3181 3259 3260 #ifndef E1K_WITH_TXD_CACHE 3182 3261 /** 3183 3262 * Load transmit descriptor from guest memory. … … 3192 3271 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC)); 3193 3272 } 3273 #else /* E1K_WITH_TXD_CACHE */ 3274 /** 3275 * Load transmit descriptors from guest memory. 3276 * 3277 * We need two physical reads in case the tail wrapped around the end of TX 3278 * descriptor ring. 3279 * 3280 * @returns the actual number of descriptors fetched. 3281 * @param pState The device state structure. 3282 * @param pDesc Pointer to descriptor union. 3283 * @param addr Physical address in guest context. 3284 * @thread E1000_TX 3285 */ 3286 DECLINLINE(unsigned) e1kTxDLoadMore(E1KSTATE* pState) 3287 { 3288 unsigned nDescsToFetch = RT_MIN(e1kGetTxLen(pState), E1K_TXD_CACHE_SIZE - pState->nTxDFetched); 3289 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, TDLEN / sizeof(E1KTXDESC) - TDH); 3290 if (nDescsToFetch == 0) 3291 return 0; 3292 E1KTXDESC* pFirstEmptyDesc = &pState->aTxDescriptors[pState->nTxDFetched]; 3293 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), 3294 ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(E1KTXDESC), 3295 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC)); 3296 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n", 3297 INSTANCE(pState), nDescsInSingleRead, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT)); 3298 if (nDescsToFetch > nDescsInSingleRead) 3299 { 3300 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), 3301 ((uint64_t)TDBAH << 32) + TDBAL, 3302 pFirstEmptyDesc + nDescsInSingleRead, 3303 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC)); 3304 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n", 3305 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead, 3306 TDBAH, TDBAL, TDLEN, TDH, TDT)); 3307 } 3308 pState->nTxDFetched += nDescsToFetch; 3309 return nDescsToFetch; 3310 } 3311 3312 /** 3313 * Load transmit descriptors from guest memory only if there are no loaded 3314 * descriptors. 3315 * 3316 * @returns true if there are descriptors in cache. 3317 * @param pState The device state structure. 3318 * @param pDesc Pointer to descriptor union. 3319 * @param addr Physical address in guest context. 3320 * @thread E1000_TX 3321 */ 3322 DECLINLINE(bool) e1kTxDLazyLoad(E1KSTATE* pState) 3323 { 3324 if (pState->nTxDFetched == 0) 3325 return e1kTxDLoadMore(pState) != 0; 3326 return true; 3327 } 3328 #endif /* E1K_WITH_TXD_CACHE */ 3194 3329 3195 3330 /** … … 3335 3470 static void e1kInsertChecksum(E1KSTATE* pState, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse) 3336 3471 { 3337 if (cso > u16PktLen) 3338 { 3339 E1kLog2(("%s cso(%X) is greater than packet length(%X), checksum is not inserted\n", 3472 if (css >= u16PktLen) 3473 { 3474 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n", 3475 INSTANCE(pState), cso, u16PktLen)); 3476 return; 3477 } 3478 3479 if (cso >= u16PktLen - 1) 3480 { 3481 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n", 3340 3482 INSTANCE(pState), cso, u16PktLen)); 3341 3483 return; … … 3364 3506 * @thread E1000_TX 3365 3507 */ 3508 #ifndef E1K_WITH_TXD_CACHE 3366 3509 static void e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread) 3367 3510 { … … 3469 3612 } 3470 3613 } 3471 3614 #else /* E1K_WITH_TXD_CACHE */ 3615 static int e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread) 3616 { 3617 int rc = VINF_SUCCESS; 3618 /* TCP header being transmitted */ 3619 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *) 3620 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS); 3621 /* IP header being transmitted */ 3622 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *) 3623 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS); 3624 3625 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n", 3626 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend)); 3627 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0); 3628 3629 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr, 3630 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len); 3631 E1kLog3(("%s Dump of the segment:\n" 3632 "%.*Rhxd\n" 3633 "%s --- End of dump ---\n", 3634 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState))); 3635 pState->u16TxPktLen += u16Len; 3636 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n", 3637 INSTANCE(pState), pState->u16TxPktLen)); 3638 if (pState->u16HdrRemain > 0) 3639 { 3640 /* The header was not complete, check if it is now */ 3641 if (u16Len >= pState->u16HdrRemain) 3642 { 3643 /* The rest is payload */ 3644 u16Len -= pState->u16HdrRemain; 3645 pState->u16HdrRemain = 0; 3646 /* Save partial checksum and flags */ 3647 pState->u32SavedCsum = pTcpHdr->chksum; 3648 pState->u16SavedFlags = pTcpHdr->hdrlen_flags; 3649 /* Clear FIN and PSH flags now and set them only in the last segment */ 3650 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH); 3651 } 3652 else 3653 { 3654 /* Still not */ 3655 pState->u16HdrRemain -= u16Len; 3656 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n", 3657 INSTANCE(pState), pState->u16HdrRemain)); 3658 return rc; 3659 } 3660 } 3661 3662 pState->u32PayRemain -= u16Len; 3663 3664 if (fSend) 3665 { 3666 /* Leave ethernet header intact */ 3667 /* IP Total Length = payload + headers - ethernet header */ 3668 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS); 3669 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n", 3670 INSTANCE(pState), ntohs(pIpHdr->total_len))); 3671 /* Update IP Checksum */ 3672 pIpHdr->chksum = 0; 3673 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen, 3674 pState->contextTSE.ip.u8CSO, 3675 pState->contextTSE.ip.u8CSS, 3676 pState->contextTSE.ip.u16CSE); 3677 3678 /* Update TCP flags */ 3679 /* Restore original FIN and PSH flags for the last segment */ 3680 if (pState->u32PayRemain == 0) 3681 { 3682 pTcpHdr->hdrlen_flags = pState->u16SavedFlags; 3683 E1K_INC_CNT32(TSCTC); 3684 } 3685 /* Add TCP length to partial pseudo header sum */ 3686 uint32_t csum = pState->u32SavedCsum 3687 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS); 3688 while (csum >> 16) 3689 csum = (csum >> 16) + (csum & 0xFFFF); 3690 pTcpHdr->chksum = csum; 3691 /* Compute final checksum */ 3692 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen, 3693 pState->contextTSE.tu.u8CSO, 3694 pState->contextTSE.tu.u8CSS, 3695 pState->contextTSE.tu.u16CSE); 3696 3697 /* 3698 * Transmit it. 3699 */ 3700 if (pState->CTX_SUFF(pTxSg)) 3701 { 3702 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable); 3703 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1); 3704 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback) 3705 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen); 3706 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen; 3707 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen; 3708 } 3709 e1kTransmitFrame(pState, fOnWorkerThread); 3710 3711 /* Update Sequence Number */ 3712 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen 3713 - pState->contextTSE.dw3.u8HDRLEN); 3714 /* Increment IP identification */ 3715 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1); 3716 3717 /* Allocate new buffer for the next segment. */ 3718 if (pState->u32PayRemain) 3719 { 3720 pState->cbTxAlloc = RT_MIN(pState->u32PayRemain, 3721 pState->contextTSE.dw3.u16MSS) 3722 + pState->contextTSE.dw3.u8HDRLEN 3723 + (pState->fVTag ? 4 : 0); 3724 rc = e1kXmitAllocBuf(pState, false /* fGSO */); 3725 } 3726 } 3727 3728 return rc; 3729 } 3730 #endif /* E1K_WITH_TXD_CACHE */ 3731 3732 #ifndef E1K_WITH_TXD_CACHE 3472 3733 /** 3473 3734 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit … … 3534 3795 return false; 3535 3796 } 3797 #else /* E1K_WITH_TXD_CACHE */ 3798 /** 3799 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit 3800 * frame. 3801 * 3802 * We construct the frame in the fallback buffer first and the copy it to the SG 3803 * buffer before passing it down to the network driver code. 3804 * 3805 * @returns error code 3806 * 3807 * @param pState The device state structure. 3808 * @param pDesc Pointer to the descriptor to transmit. 3809 * @param cbFragment Length of descriptor's buffer. 3810 * @param fOnWorkerThread Whether we're on a worker thread or an EMT. 3811 * @thread E1000_TX 3812 */ 3813 static int e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, bool fOnWorkerThread) 3814 { 3815 int rc = VINF_SUCCESS; 3816 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg); 3817 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA); 3818 Assert(pDesc->data.cmd.fTSE); 3819 Assert(!e1kXmitIsGsoBuf(pTxSg)); 3820 3821 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS; 3822 Assert(u16MaxPktLen != 0); 3823 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE); 3824 3825 /* 3826 * Carve out segments. 3827 */ 3828 do 3829 { 3830 /* Calculate how many bytes we have left in this TCP segment */ 3831 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen; 3832 if (cb > pDesc->data.cmd.u20DTALEN) 3833 { 3834 /* This descriptor fits completely into current segment */ 3835 cb = pDesc->data.cmd.u20DTALEN; 3836 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread); 3837 } 3838 else 3839 { 3840 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread); 3841 /* 3842 * Rewind the packet tail pointer to the beginning of payload, 3843 * so we continue writing right beyond the header. 3844 */ 3845 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN; 3846 } 3847 3848 pDesc->data.u64BufAddr += cb; 3849 pDesc->data.cmd.u20DTALEN -= cb; 3850 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc)); 3851 3852 if (pDesc->data.cmd.fEOP) 3853 { 3854 /* End of packet, next segment will contain header. */ 3855 if (pState->u32PayRemain != 0) 3856 E1K_INC_CNT32(TSCTFC); 3857 pState->u16TxPktLen = 0; 3858 e1kXmitFreeBuf(pState); 3859 } 3860 3861 return false; 3862 } 3863 #endif /* E1K_WITH_TXD_CACHE */ 3536 3864 3537 3865 … … 3657 3985 } 3658 3986 3987 #ifndef E1K_WITH_TXD_CACHE 3659 3988 /** 3660 3989 * Process Transmit Descriptor. … … 3913 4242 return rc; 3914 4243 } 3915 3916 4244 #else /* E1K_WITH_TXD_CACHE */ 4245 /** 4246 * Process Transmit Descriptor. 4247 * 4248 * E1000 supports three types of transmit descriptors: 4249 * - legacy data descriptors of older format (context-less). 4250 * - data the same as legacy but providing new offloading capabilities. 4251 * - context sets up the context for following data descriptors. 4252 * 4253 * @param pState The device state structure. 4254 * @param pDesc Pointer to descriptor union. 4255 * @param addr Physical address of descriptor in guest memory. 4256 * @param fOnWorkerThread Whether we're on a worker thread or an EMT. 4257 * @param cbPacketSize Size of the packet as previously computed. 4258 * @thread E1000_TX 4259 */ 4260 static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr, 4261 bool fOnWorkerThread) 4262 { 4263 int rc = VINF_SUCCESS; 4264 uint32_t cbVTag = 0; 4265 4266 e1kPrintTDesc(pState, pDesc, "vvv"); 4267 4268 #ifdef E1K_USE_TX_TIMERS 4269 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer)); 4270 #endif /* E1K_USE_TX_TIMERS */ 4271 4272 switch (e1kGetDescType(pDesc)) 4273 { 4274 case E1K_DTYP_CONTEXT: 4275 /* The caller have already updated the context */ 4276 E1K_INC_ISTAT_CNT(pState->uStatDescCtx); 4277 e1kDescReport(pState, pDesc, addr); 4278 break; 4279 4280 case E1K_DTYP_DATA: 4281 { 4282 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0) 4283 { 4284 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState))); 4285 /** @todo Same as legacy when !TSE. See below. */ 4286 break; 4287 } 4288 STAM_COUNTER_INC(pDesc->data.cmd.fTSE? 4289 &pState->StatTxDescTSEData: 4290 &pState->StatTxDescData); 4291 E1K_INC_ISTAT_CNT(pState->uStatDescDat); 4292 4293 /* 4294 * Add the descriptor data to the frame. If the frame is complete, 4295 * transmit it and reset the u16TxPktLen field. 4296 */ 4297 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg))) 4298 { 4299 STAM_COUNTER_INC(&pState->StatTxPathGSO); 4300 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN); 4301 if (pDesc->data.cmd.fEOP) 4302 { 4303 if ( fRc 4304 && pState->CTX_SUFF(pTxSg) 4305 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN) 4306 { 4307 e1kTransmitFrame(pState, fOnWorkerThread); 4308 E1K_INC_CNT32(TSCTC); 4309 } 4310 else 4311 { 4312 if (fRc) 4313 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState), 4314 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0, 4315 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)); 4316 e1kXmitFreeBuf(pState); 4317 E1K_INC_CNT32(TSCTFC); 4318 } 4319 pState->u16TxPktLen = 0; 4320 } 4321 } 4322 else if (!pDesc->data.cmd.fTSE) 4323 { 4324 STAM_COUNTER_INC(&pState->StatTxPathRegular); 4325 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN); 4326 if (pDesc->data.cmd.fEOP) 4327 { 4328 if (fRc && pState->CTX_SUFF(pTxSg)) 4329 { 4330 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1); 4331 if (pState->fIPcsum) 4332 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen, 4333 pState->contextNormal.ip.u8CSO, 4334 pState->contextNormal.ip.u8CSS, 4335 pState->contextNormal.ip.u16CSE); 4336 if (pState->fTCPcsum) 4337 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen, 4338 pState->contextNormal.tu.u8CSO, 4339 pState->contextNormal.tu.u8CSS, 4340 pState->contextNormal.tu.u16CSE); 4341 e1kTransmitFrame(pState, fOnWorkerThread); 4342 } 4343 else 4344 e1kXmitFreeBuf(pState); 4345 pState->u16TxPktLen = 0; 4346 } 4347 } 4348 else 4349 { 4350 STAM_COUNTER_INC(&pState->StatTxPathFallback); 4351 rc = e1kFallbackAddToFrame(pState, pDesc, fOnWorkerThread); 4352 } 4353 4354 e1kDescReport(pState, pDesc, addr); 4355 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a); 4356 break; 4357 } 4358 4359 case E1K_DTYP_LEGACY: 4360 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0) 4361 { 4362 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState))); 4363 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */ 4364 break; 4365 } 4366 STAM_COUNTER_INC(&pState->StatTxDescLegacy); 4367 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a); 4368 4369 /* Add fragment to frame. */ 4370 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length)) 4371 { 4372 E1K_INC_ISTAT_CNT(pState->uStatDescLeg); 4373 4374 /* Last fragment: Transmit and reset the packet storage counter. */ 4375 if (pDesc->legacy.cmd.fEOP) 4376 { 4377 if (pDesc->legacy.cmd.fIC) 4378 { 4379 e1kInsertChecksum(pState, 4380 (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, 4381 pState->u16TxPktLen, 4382 pDesc->legacy.cmd.u8CSO, 4383 pDesc->legacy.dw3.u8CSS, 4384 0); 4385 } 4386 e1kTransmitFrame(pState, fOnWorkerThread); 4387 pState->u16TxPktLen = 0; 4388 } 4389 } 4390 /* Last fragment + failure: free the buffer and reset the storage counter. */ 4391 else if (pDesc->legacy.cmd.fEOP) 4392 { 4393 e1kXmitFreeBuf(pState); 4394 pState->u16TxPktLen = 0; 4395 } 4396 4397 e1kDescReport(pState, pDesc, addr); 4398 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a); 4399 break; 4400 4401 default: 4402 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n", 4403 INSTANCE(pState), e1kGetDescType(pDesc))); 4404 break; 4405 } 4406 4407 return rc; 4408 } 4409 4410 4411 DECLINLINE(void) e1kUpdateTxContext(E1KSTATE* pState, E1KTXDESC* pDesc) 4412 { 4413 if (pDesc->context.dw2.fTSE) 4414 { 4415 pState->contextTSE = pDesc->context; 4416 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN; 4417 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN; 4418 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context); 4419 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE); 4420 } 4421 else 4422 { 4423 pState->contextNormal = pDesc->context; 4424 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal); 4425 } 4426 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X" 4427 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState), 4428 pDesc->context.dw2.fTSE ? "TSE" : "Normal", 4429 pDesc->context.ip.u8CSS, 4430 pDesc->context.ip.u8CSO, 4431 pDesc->context.ip.u16CSE, 4432 pDesc->context.tu.u8CSS, 4433 pDesc->context.tu.u8CSO, 4434 pDesc->context.tu.u16CSE)); 4435 } 4436 4437 4438 static bool e1kLocateTxPacket(E1KSTATE *pState) 4439 { 4440 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n", 4441 INSTANCE(pState), pState->cbTxAlloc)); 4442 /* Check if we have located the packet already. */ 4443 if (pState->cbTxAlloc) 4444 { 4445 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n", 4446 INSTANCE(pState), pState->cbTxAlloc)); 4447 return true; 4448 } 4449 4450 bool fTSE = false; 4451 uint32_t cbPacket = 0; 4452 4453 for (int i = pState->iTxDCurrent; i < pState->nTxDFetched; ++i) 4454 { 4455 E1KTXDESC *pDesc = &pState->aTxDescriptors[i]; 4456 switch (e1kGetDescType(pDesc)) 4457 { 4458 case E1K_DTYP_CONTEXT: 4459 e1kUpdateTxContext(pState, pDesc); 4460 continue; 4461 case E1K_DTYP_LEGACY: 4462 cbPacket += pDesc->legacy.cmd.u16Length; 4463 pState->fGSO = false; 4464 break; 4465 case E1K_DTYP_DATA: 4466 if (cbPacket == 0) 4467 { 4468 /* 4469 * The first fragment: save IXSM and TXSM options 4470 * as these are only valid in the first fragment. 4471 */ 4472 pState->fIPcsum = pDesc->data.dw3.fIXSM; 4473 pState->fTCPcsum = pDesc->data.dw3.fTXSM; 4474 fTSE = pDesc->data.cmd.fTSE; 4475 /* 4476 * TSE descriptors have VLE bit properly set in 4477 * the first fragment. 4478 */ 4479 if (fTSE) 4480 { 4481 pState->fVTag = pDesc->data.cmd.fVLE; 4482 pState->u16VTagTCI = pDesc->data.dw3.u16Special; 4483 } 4484 pState->fGSO = e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE); 4485 } 4486 cbPacket += pDesc->data.cmd.u20DTALEN; 4487 break; 4488 default: 4489 AssertMsgFailed(("Impossible descriptor type!")); 4490 } 4491 if (pDesc->legacy.cmd.fEOP) 4492 { 4493 /* 4494 * Non-TSE descriptors have VLE bit properly set in 4495 * the last fragment. 4496 */ 4497 if (!fTSE) 4498 { 4499 pState->fVTag = pDesc->data.cmd.fVLE; 4500 pState->u16VTagTCI = pDesc->data.dw3.u16Special; 4501 } 4502 /* 4503 * Compute the required buffer size. If we cannot do GSO but still 4504 * have to do segmentation we allocate the first segment only. 4505 */ 4506 pState->cbTxAlloc = (!fTSE || pState->fGSO) ? 4507 cbPacket : 4508 RT_MIN(cbPacket, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN); 4509 if (pState->fVTag) 4510 pState->cbTxAlloc += 4; 4511 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n", 4512 INSTANCE(pState), pState->cbTxAlloc)); 4513 return true; 4514 } 4515 } 4516 4517 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n", 4518 INSTANCE(pState), pState->cbTxAlloc)); 4519 return false; 4520 } 4521 4522 4523 static int e1kXmitPacket(E1KSTATE *pState, bool fOnWorkerThread) 4524 { 4525 int rc = VINF_SUCCESS; 4526 4527 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n", 4528 INSTANCE(pState), pState->iTxDCurrent, pState->nTxDFetched)); 4529 4530 while (pState->iTxDCurrent < pState->nTxDFetched) 4531 { 4532 E1KTXDESC *pDesc = &pState->aTxDescriptors[pState->iTxDCurrent]; 4533 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n", 4534 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT)); 4535 rc = e1kXmitDesc(pState, pDesc, 4536 ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(E1KTXDESC), 4537 fOnWorkerThread); 4538 if (RT_FAILURE(rc)) 4539 break; 4540 if (++TDH * sizeof(E1KTXDESC) >= TDLEN) 4541 TDH = 0; 4542 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8; 4543 if (uLowThreshold != 0 && e1kGetTxLen(pState) <= uLowThreshold) 4544 { 4545 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n", 4546 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8)); 4547 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW); 4548 } 4549 ++pState->iTxDCurrent; 4550 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP) 4551 break; 4552 } 4553 4554 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n", 4555 INSTANCE(pState), rc, pState->iTxDCurrent, pState->nTxDFetched)); 4556 return rc; 4557 } 4558 #endif /* E1K_WITH_TXD_CACHE */ 4559 4560 #ifndef E1K_WITH_TXD_CACHE 3917 4561 /** 3918 4562 * Transmit pending descriptors. … … 3980 4624 return rc; 3981 4625 } 4626 #else /* E1K_WITH_TXD_CACHE */ 4627 /** 4628 * Transmit pending descriptors. 4629 * 4630 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy. 4631 * 4632 * @param pState The E1000 state. 4633 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT. 4634 */ 4635 static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread) 4636 { 4637 int rc; 4638 4639 /* 4640 * Grab the xmit lock of the driver as well as the E1K device state. 4641 */ 4642 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv); 4643 if (pDrv) 4644 { 4645 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread); 4646 if (RT_FAILURE(rc)) 4647 return rc; 4648 } 4649 rc = e1kMutexAcquire(pState, VERR_TRY_AGAIN, RT_SRC_POS); 4650 if (RT_SUCCESS(rc)) 4651 { 4652 /*size_t cbPacket = 0; 4653 int nDescInPacket = 0; 4654 E1KTXDESC *pFirstDesc = pState->aTxDescriptors;*/ 4655 /* 4656 * Process all pending descriptors. 4657 * Note! Do not process descriptors in locked state 4658 */ 4659 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a); 4660 while (!pState->fLocked && e1kTxDLazyLoad(pState)) 4661 { 4662 while (e1kLocateTxPacket(pState)) 4663 { 4664 // 1) packet located -- allocate it! 4665 rc = e1kXmitAllocBuf(pState, pState->fGSO); 4666 /* If we're out of bandwidth we'll come back later. */ 4667 if (RT_FAILURE(rc)) 4668 goto out; 4669 /* Copy the packet to allocated buffer and send it. */ 4670 rc = e1kXmitPacket(pState, fOnWorkerThread); 4671 /* If we're out of bandwidth we'll come back later. */ 4672 if (RT_FAILURE(rc)) 4673 goto out; 4674 } 4675 uint8_t u8Remain = pState->nTxDFetched - pState->iTxDCurrent; 4676 if (u8Remain > 0) 4677 { 4678 /* 4679 * A packet was partially fetched. Move incomplete packet to 4680 * the beginning of cache buffer, then load more descriptors. 4681 */ 4682 memmove(pState->aTxDescriptors, 4683 &pState->aTxDescriptors[pState->iTxDCurrent], 4684 u8Remain * sizeof(E1KTXDESC)); 4685 pState->nTxDFetched = u8Remain; 4686 e1kTxDLoadMore(pState); 4687 } 4688 else 4689 pState->nTxDFetched = 0; 4690 pState->iTxDCurrent = 0; 4691 } 4692 if (!pState->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0) 4693 { 4694 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n", 4695 INSTANCE(pState))); 4696 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW); 4697 } 4698 4699 out: 4700 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a); 4701 4702 /// @todo: uncomment: pState->uStatIntTXQE++; 4703 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE); 4704 4705 /* 4706 * Release the locks. 4707 */ 4708 e1kMutexRelease(pState); 4709 } 4710 if (pDrv) 4711 pDrv->pfnEndXmit(pDrv); 4712 return rc; 4713 } 4714 #endif /* E1K_WITH_TXD_CACHE */ 3982 4715 3983 4716 #ifdef IN_RING3 … … 5315 6048 SSMR3PutBool(pSSM, pState->fVTag); 5316 6049 SSMR3PutU16(pSSM, pState->u16VTagTCI); 6050 #ifdef E1K_WITH_TXD_CACHE 6051 SSMR3PutU8(pSSM, pState->nTxDFetched); 6052 SSMR3PutMem(pSSM, pState->aTxDescriptors, 6053 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0])); 6054 #endif /* E1K_WITH_TXD_CACHE */ 5317 6055 /**@todo GSO requires some more state here. */ 5318 6056 E1kLog(("%s State has been saved\n", INSTANCE(pState))); … … 5433 6171 rc = SSMR3GetU16(pSSM, &pState->u16VTagTCI); 5434 6172 AssertRCReturn(rc, rc); 6173 #ifdef E1K_WITH_TXD_CACHE 6174 rc = SSMR3GetU8(pSSM, &pState->nTxDFetched); 6175 AssertRCReturn(rc, rc); 6176 SSMR3GetMem(pSSM, pState->aTxDescriptors, 6177 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0])); 6178 #endif /* E1K_WITH_TXD_CACHE */ 5435 6179 } 5436 6180 else … … 5438 6182 pState->fVTag = false; 5439 6183 pState->u16VTagTCI = 0; 6184 #ifdef E1K_WITH_TXD_CACHE 6185 pState->nTxDFetched = 0; 6186 #endif /* E1K_WITH_TXD_CACHE */ 5440 6187 } 5441 6188 /* derived state */ … … 5624 6371 pState->fLocked = false; 5625 6372 pState->u64AckedAt = 0; 6373 #ifdef E1K_WITH_TXD_CACHE 6374 pState->nTxDFetched = 0; 6375 pState->iTxDCurrent = 0; 6376 pState->fGSO = false; 6377 pState->cbTxAlloc = 0; 6378 #endif /* E1K_WITH_TXD_CACHE */ 5626 6379 e1kHardReset(pState); 5627 6380 }
Note:
See TracChangeset
for help on using the changeset viewer.