Changeset 14470 in vbox for trunk/src/VBox/Devices
- Timestamp:
- Nov 21, 2008 4:04:46 PM (16 years ago)
- Location:
- trunk/src/VBox/Devices/Network/slirp
- Files:
-
- 28 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/slirp/cksum.c
r1 r14470 1 1 /* 2 2 * Copyright (c) 1988, 1992, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)in_cksum.c8.1 (Berkeley) 6/10/9333 * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93 34 34 * in_cksum.c,v 1.2 1994/08/02 07:48:16 davidg Exp 35 35 */ … … 51 51 int cksum(struct mbuf *m, int len) 52 52 { 53 54 55 56 53 register u_int16_t *w; 54 register int sum = 0; 55 register int mlen = 0; 56 int byte_swapped = 0; 57 57 58 59 u_int8_tc[2];60 u_int16_ts;61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 58 union { 59 u_int8_t c[2]; 60 u_int16_t s; 61 } s_util; 62 union { 63 u_int16_t s[2]; 64 u_int32_t l; 65 } l_util; 66 67 if (m->m_len == 0) 68 goto cont; 69 w = mtod(m, u_int16_t *); 70 71 mlen = m->m_len; 72 73 if (len < mlen) 74 mlen = len; 75 len -= mlen; 76 /* 77 * Force to even boundary. 78 */ 79 if ((1 & (long) w) && (mlen > 0)) { 80 REDUCE; 81 sum <<= 8; 82 s_util.c[0] = *(u_int8_t *)w; 83 w = (u_int16_t *)((int8_t *)w + 1); 84 mlen--; 85 byte_swapped = 1; 86 } 87 /* 88 * Unroll the loop to make overhead from 89 * branches &c small. 90 */ 91 while ((mlen -= 32) >= 0) { 92 sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; 93 sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; 94 sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; 95 sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; 96 w += 16; 97 } 98 mlen += 32; 99 while ((mlen -= 8) >= 0) { 100 sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; 101 w += 4; 102 } 103 mlen += 8; 104 if (mlen == 0 && byte_swapped == 0) 105 goto cont; 106 REDUCE; 107 while ((mlen -= 2) >= 0) { 108 sum += *w++; 109 } 110 111 if (byte_swapped) { 112 REDUCE; 113 sum <<= 8; 114 byte_swapped = 0; 115 if (mlen == -1) { 116 s_util.c[1] = *(u_int8_t *)w; 117 sum += s_util.s; 118 mlen = 0; 119 } else 120 121 mlen = -1; 122 } else if (mlen == -1) 123 s_util.c[0] = *(u_int8_t *)w; 124 125 125 cont: 126 126 #ifdef DEBUG 127 128 129 130 127 if (len) { 128 DEBUG_ERROR((dfd, "cksum: out of data\n")); 129 DEBUG_ERROR((dfd, " len = %d\n", len)); 130 } 131 131 #endif 132 133 134 135 136 137 138 139 140 132 if (mlen == -1) { 133 /* The last mbuf has odd # of bytes. Follow the 134 standard (the odd byte may be shifted left by 8 bits 135 or not as determined by endian-ness of the machine) */ 136 s_util.c[1] = 0; 137 sum += s_util.s; 138 } 139 REDUCE; 140 return (~sum & 0xffff); 141 141 } -
trunk/src/VBox/Devices/Network/slirp/debug.c
r14466 r14470 41 41 void 42 42 ttystats(ttyp) 43 44 { 45 46 47 48 49 50 51 52 53 54 55 56 57 43 struct ttys *ttyp; 44 { 45 struct slirp_ifstats *is = &ttyp->ifstats; 46 char buff[512]; 47 48 lprint(" \r\n"); 49 50 if (if_comp & IF_COMPRESS) 51 strcpy(buff, "on"); 52 else if (if_comp & IF_NOCOMPRESS) 53 strcpy(buff, "off"); 54 else 55 strcpy(buff, "off (for now)"); 56 lprint("Unit %d:\r\n", ttyp->unit); 57 lprint(" using %s encapsulation (VJ compression is %s)\r\n", ( 58 58 #ifdef USE_PPP 59 60 #endif 61 62 63 64 59 ttyp->proto==PROTO_PPP?"PPP": 60 #endif 61 "SLIP"), buff); 62 lprint(" %d baudrate\r\n", ttyp->baud); 63 lprint(" interface is %s\r\n", ttyp->up?"up":"down"); 64 lprint(" using fd %d, guardian pid is %d\r\n", ttyp->fd, ttyp->pid); 65 65 #ifndef FULL_BOLT 66 67 #endif 68 69 70 71 72 73 74 75 76 77 66 lprint(" towrite is %d bytes\r\n", ttyp->towrite); 67 #endif 68 if (ttyp->zeros) 69 lprint(" %d zeros have been typed\r\n", ttyp->zeros); 70 else if (ttyp->ones) 71 lprint(" %d ones have been typed\r\n", ttyp->ones); 72 lprint("Interface stats:\r\n"); 73 lprint(" %6d output packets sent (%d bytes)\r\n", is->out_pkts, is->out_bytes); 74 lprint(" %6d output packets dropped (%d bytes)\r\n", is->out_errpkts, is->out_errbytes); 75 lprint(" %6d input packets received (%d bytes)\r\n", is->in_pkts, is->in_bytes); 76 lprint(" %6d input packets dropped (%d bytes)\r\n", is->in_errpkts, is->in_errbytes); 77 lprint(" %6d bad input packets\r\n", is->in_mbad); 78 78 } 79 79 … … 81 81 allttystats() 82 82 { 83 84 85 86 83 struct ttys *ttyp; 84 85 for (ttyp = ttys; ttyp; ttyp = ttyp->next) 86 ttystats(ttyp); 87 87 } 88 88 #endif … … 91 91 ipstats(PNATState pData) 92 92 { 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 93 lprint(" \r\n"); 94 95 lprint("IP stats:\r\n"); 96 lprint(" %6d total packets received (%d were unaligned)\r\n", 97 ipstat.ips_total, ipstat.ips_unaligned); 98 lprint(" %6d with incorrect version\r\n", ipstat.ips_badvers); 99 lprint(" %6d with bad header checksum\r\n", ipstat.ips_badsum); 100 lprint(" %6d with length too short (len < sizeof(iphdr))\r\n", ipstat.ips_tooshort); 101 lprint(" %6d with length too small (len < ip->len)\r\n", ipstat.ips_toosmall); 102 lprint(" %6d with bad header length\r\n", ipstat.ips_badhlen); 103 lprint(" %6d with bad packet length\r\n", ipstat.ips_badlen); 104 lprint(" %6d fragments received\r\n", ipstat.ips_fragments); 105 lprint(" %6d fragments dropped\r\n", ipstat.ips_fragdropped); 106 lprint(" %6d fragments timed out\r\n", ipstat.ips_fragtimeout); 107 lprint(" %6d packets reassembled ok\r\n", ipstat.ips_reassembled); 108 lprint(" %6d outgoing packets fragmented\r\n", ipstat.ips_fragmented); 109 lprint(" %6d total outgoing fragments\r\n", ipstat.ips_ofragments); 110 lprint(" %6d with bad protocol field\r\n", ipstat.ips_noproto); 111 lprint(" %6d total packets delivered\r\n", ipstat.ips_delivered); 112 112 } 113 113 … … 116 116 vjstats() 117 117 { 118 119 120 121 122 123 124 125 126 127 128 129 118 lprint(" \r\n"); 119 120 lprint("VJ compression stats:\r\n"); 121 122 lprint(" %6d outbound packets (%d compressed)\r\n", 123 comp_s.sls_packets, comp_s.sls_compressed); 124 lprint(" %6d searches for connection stats (%d misses)\r\n", 125 comp_s.sls_searches, comp_s.sls_misses); 126 lprint(" %6d inbound uncompressed packets\r\n", comp_s.sls_uncompressedin); 127 lprint(" %6d inbound compressed packets\r\n", comp_s.sls_compressedin); 128 lprint(" %6d inbound unknown type packets\r\n", comp_s.sls_errorin); 129 lprint(" %6d inbound packets tossed due to error\r\n", comp_s.sls_tossed); 130 130 } 131 131 #endif … … 134 134 tcpstats(PNATState pData) 135 135 { 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 136 lprint(" \r\n"); 137 138 lprint("TCP stats:\r\n"); 139 140 lprint(" %6d packets sent\r\n", tcpstat.tcps_sndtotal); 141 lprint(" %6d data packets (%d bytes)\r\n", 142 tcpstat.tcps_sndpack, tcpstat.tcps_sndbyte); 143 lprint(" %6d data packets retransmitted (%d bytes)\r\n", 144 tcpstat.tcps_sndrexmitpack, tcpstat.tcps_sndrexmitbyte); 145 lprint(" %6d ack-only packets (%d delayed)\r\n", 146 tcpstat.tcps_sndacks, tcpstat.tcps_delack); 147 lprint(" %6d URG only packets\r\n", tcpstat.tcps_sndurg); 148 lprint(" %6d window probe packets\r\n", tcpstat.tcps_sndprobe); 149 lprint(" %6d window update packets\r\n", tcpstat.tcps_sndwinup); 150 lprint(" %6d control (SYN/FIN/RST) packets\r\n", tcpstat.tcps_sndctrl); 151 lprint(" %6d times tcp_output did nothing\r\n", tcpstat.tcps_didnuttin); 152 153 lprint(" %6d packets received\r\n", tcpstat.tcps_rcvtotal); 154 lprint(" %6d acks (for %d bytes)\r\n", 155 tcpstat.tcps_rcvackpack, tcpstat.tcps_rcvackbyte); 156 lprint(" %6d duplicate acks\r\n", tcpstat.tcps_rcvdupack); 157 lprint(" %6d acks for unsent data\r\n", tcpstat.tcps_rcvacktoomuch); 158 lprint(" %6d packets received in sequence (%d bytes)\r\n", 159 tcpstat.tcps_rcvpack, tcpstat.tcps_rcvbyte); 160 160 lprint(" %6d completely duplicate packets (%d bytes)\r\n", 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 /* lprint(" Packets received too short:%d\r\n", tcpstat.tcps_rcvshort); */197 /* lprint(" Segments dropped due to PAWS:%d\r\n", tcpstat.tcps_pawsdrop); */161 tcpstat.tcps_rcvduppack, tcpstat.tcps_rcvdupbyte); 162 163 lprint(" %6d packets with some duplicate data (%d bytes duped)\r\n", 164 tcpstat.tcps_rcvpartduppack, tcpstat.tcps_rcvpartdupbyte); 165 lprint(" %6d out-of-order packets (%d bytes)\r\n", 166 tcpstat.tcps_rcvoopack, tcpstat.tcps_rcvoobyte); 167 lprint(" %6d packets of data after window (%d bytes)\r\n", 168 tcpstat.tcps_rcvpackafterwin, tcpstat.tcps_rcvbyteafterwin); 169 lprint(" %6d window probes\r\n", tcpstat.tcps_rcvwinprobe); 170 lprint(" %6d window update packets\r\n", tcpstat.tcps_rcvwinupd); 171 lprint(" %6d packets received after close\r\n", tcpstat.tcps_rcvafterclose); 172 lprint(" %6d discarded for bad checksums\r\n", tcpstat.tcps_rcvbadsum); 173 lprint(" %6d discarded for bad header offset fields\r\n", 174 tcpstat.tcps_rcvbadoff); 175 176 lprint(" %6d connection requests\r\n", tcpstat.tcps_connattempt); 177 lprint(" %6d connection accepts\r\n", tcpstat.tcps_accepts); 178 lprint(" %6d connections established (including accepts)\r\n", tcpstat.tcps_connects); 179 lprint(" %6d connections closed (including %d drop)\r\n", 180 tcpstat.tcps_closed, tcpstat.tcps_drops); 181 lprint(" %6d embryonic connections dropped\r\n", tcpstat.tcps_conndrops); 182 lprint(" %6d segments we tried to get rtt (%d succeeded)\r\n", 183 tcpstat.tcps_segstimed, tcpstat.tcps_rttupdated); 184 lprint(" %6d retransmit timeouts\r\n", tcpstat.tcps_rexmttimeo); 185 lprint(" %6d connections dropped by rxmt timeout\r\n", 186 tcpstat.tcps_timeoutdrop); 187 lprint(" %6d persist timeouts\r\n", tcpstat.tcps_persisttimeo); 188 lprint(" %6d keepalive timeouts\r\n", tcpstat.tcps_keeptimeo); 189 lprint(" %6d keepalive probes sent\r\n", tcpstat.tcps_keepprobe); 190 lprint(" %6d connections dropped by keepalive\r\n", tcpstat.tcps_keepdrops); 191 lprint(" %6d correct ACK header predictions\r\n", tcpstat.tcps_predack); 192 lprint(" %6d correct data packet header predictions\n", tcpstat.tcps_preddat); 193 lprint(" %6d TCP cache misses\r\n", tcpstat.tcps_socachemiss); 194 195 196 /* lprint(" Packets received too short: %d\r\n", tcpstat.tcps_rcvshort); */ 197 /* lprint(" Segments dropped due to PAWS: %d\r\n", tcpstat.tcps_pawsdrop); */ 198 198 199 199 } … … 204 204 lprint(" \r\n"); 205 205 206 207 208 209 210 211 212 206 lprint("UDP stats:\r\n"); 207 lprint(" %6d datagrams received\r\n", udpstat.udps_ipackets); 208 lprint(" %6d with packets shorter than header\r\n", udpstat.udps_hdrops); 209 lprint(" %6d with bad checksums\r\n", udpstat.udps_badsum); 210 lprint(" %6d with data length larger than packet\r\n", udpstat.udps_badlen); 211 lprint(" %6d UDP socket cache misses\r\n", udpstat.udpps_pcbcachemiss); 212 lprint(" %6d datagrams sent\r\n", udpstat.udps_opackets); 213 213 } 214 214 … … 216 216 icmpstats(PNATState pData) 217 217 { 218 219 220 221 222 223 224 225 218 lprint(" \r\n"); 219 lprint("ICMP stats:\r\n"); 220 lprint(" %6d ICMP packets received\r\n", icmpstat.icps_received); 221 lprint(" %6d were too short\r\n", icmpstat.icps_tooshort); 222 lprint(" %6d with bad checksums\r\n", icmpstat.icps_checksum); 223 lprint(" %6d with type not supported\r\n", icmpstat.icps_notsupp); 224 lprint(" %6d with bad type feilds\r\n", icmpstat.icps_badtype); 225 lprint(" %6d ICMP packets sent in reply\r\n", icmpstat.icps_reflect); 226 226 } 227 227 … … 229 229 mbufstats(PNATState pData) 230 230 { 231 232 233 234 lprint(" \r\n"); 235 236 237 238 239 240 241 242 243 244 245 246 247 248 231 struct mbuf *m; 232 int i; 233 234 lprint(" \r\n"); 235 236 lprint("Mbuf stats:\r\n"); 237 238 lprint(" %6d mbufs allocated (%d max)\r\n", mbuf_alloced, mbuf_max); 239 240 i = 0; 241 for (m = m_freelist.m_next; m != &m_freelist; m = m->m_next) 242 i++; 243 lprint(" %6d mbufs on free list\r\n", i); 244 245 i = 0; 246 for (m = m_usedlist.m_next; m != &m_usedlist; m = m->m_next) 247 i++; 248 lprint(" %6d mbufs on used list\r\n", i); 249 249 lprint(" %6d mbufs queued as packets\r\n\r\n", if_queued); 250 250 } … … 253 253 sockstats(PNATState pData) 254 254 { 255 256 257 258 259 lprint(" \r\n"); 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 } 292 255 char buff[256]; 256 int n; 257 struct socket *so; 258 259 lprint(" \r\n"); 260 261 lprint( 262 "Proto[state] Sock Local Address, Port Remote Address, Port RecvQ SendQ\r\n"); 263 264 for (so = tcb.so_next; so != &tcb; so = so->so_next) { 265 266 n = sprintf(buff, "tcp[%s]", so->so_tcpcb?tcpstates[so->so_tcpcb->t_state]:"NONE"); 267 while (n < 17) 268 buff[n++] = ' '; 269 buff[17] = 0; 270 lprint("%s %3d %15s %5d ", 271 buff, so->s, 272 inet_ntoa(so->so_laddr), ntohs(so->so_lport)); 273 lprint("%15s %5d %5d %5d\r\n", 274 inet_ntoa(so->so_faddr), ntohs(so->so_fport), 275 so->so_rcv.sb_cc, so->so_snd.sb_cc); 276 } 277 278 for (so = udb.so_next; so != &udb; so = so->so_next) { 279 280 n = sprintf(buff, "udp[%d sec]", (so->so_expire - curtime) / 1000); 281 while (n < 17) 282 buff[n++] = ' '; 283 buff[17] = 0; 284 lprint("%s %3d %15s %5d ", 285 buff, so->s, 286 inet_ntoa(so->so_laddr), ntohs(so->so_lport)); 287 lprint("%15s %5d %5d %5d\r\n", 288 inet_ntoa(so->so_faddr), ntohs(so->so_fport), 289 so->so_rcv.sb_cc, so->so_snd.sb_cc); 290 } 291 } 292 -
trunk/src/VBox/Devices/Network/slirp/debug.h
r1076 r14470 6 6 */ 7 7 8 #define PRN_STDERR 9 #define PRN_SPRINTF 8 #define PRN_STDERR 1 9 #define PRN_SPRINTF 2 10 10 11 11 /* Unused anyway, using VBox Log facility. */ -
trunk/src/VBox/Devices/Network/slirp/if.c
r14333 r14470 13 13 static void ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) 14 14 { 15 16 17 18 15 ifm->ifs_next = ifmhead->ifs_next; 16 ifmhead->ifs_next = ifm; 17 ifm->ifs_prev = ifmhead; 18 ifm->ifs_next->ifs_prev = ifm; 19 19 } 20 20 21 21 static void ifs_remque(struct mbuf *ifm) 22 22 { 23 24 23 ifm->ifs_prev->ifs_next = ifm->ifs_next; 24 ifm->ifs_next->ifs_prev = ifm->ifs_prev; 25 25 } 26 26 … … 29 29 { 30 30 #if 0 31 32 33 34 31 /* 32 * Set if_maxlinkhdr to 48 because it's 40 bytes for TCP/IP, 33 * and 8 bytes for PPP, but need to have it on an 8byte boundary 34 */ 35 35 #ifdef USE_PPP 36 36 if_maxlinkhdr = 48; 37 37 #else 38 38 if_maxlinkhdr = 40; 39 39 #endif 40 40 #else … … 44 44 if_queued = 0; 45 45 if_thresh = 10; 46 47 48 49 50 51 /* 52 46 if_mtu = 1500; 47 if_mru = 1500; 48 if_comp = IF_AUTOCOMP; 49 if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq; 50 if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq; 51 /* sl_compress_init(&comp_s); */ 52 next_m = &if_batchq; 53 53 } 54 54 … … 69 69 if_output(PNATState pData, struct socket *so, struct mbuf *ifm) 70 70 { 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 71 struct mbuf *ifq; 72 int on_fastq = 1; 73 74 DEBUG_CALL("if_output"); 75 DEBUG_ARG("so = %lx", (long)so); 76 DEBUG_ARG("ifm = %lx", (long)ifm); 77 78 /* 79 * First remove the mbuf from m_usedlist, 80 * since we're gonna use m_next and m_prev ourselves 81 * XXX Shouldn't need this, gotta change dtom() etc. 82 */ 83 if (ifm->m_flags & M_USEDLIST) { 84 remque(pData, ifm); 85 ifm->m_flags &= ~M_USEDLIST; 86 } 87 88 /* 89 * See if there's already a batchq list for this session. 90 * This can include an interactive session, which should go on fastq, 91 * but gets too greedy... hence it'll be downgraded from fastq to batchq. 92 * We mustn't put this packet back on the fastq (or we'll send it out of order) 93 * XXX add cache here? 94 */ 95 for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) { 96 if (so == ifq->ifq_so) { 97 /* A match! */ 98 ifm->ifq_so = so; 99 ifs_insque(ifm, ifq->ifs_prev); 100 goto diddit; 101 } 102 } 103 104 /* No match, check which queue to put it on */ 105 if (so && (so->so_iptos & IPTOS_LOWDELAY)) { 106 ifq = if_fastq.ifq_prev; 107 on_fastq = 1; 108 /* 109 * Check if this packet is a part of the last 110 * packet's session 111 */ 112 if (ifq->ifq_so == so) { 113 ifm->ifq_so = so; 114 ifs_insque(ifm, ifq->ifs_prev); 115 goto diddit; 116 } 117 } else 118 ifq = if_batchq.ifq_prev; 119 120 /* Create a new doubly linked list for this session */ 121 ifm->ifq_so = so; 122 ifs_init(ifm); 123 insque(pData, ifm, ifq); 124 124 125 125 diddit: 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 126 ++if_queued; 127 128 if (so) { 129 /* Update *_queued */ 130 so->so_queued++; 131 so->so_nqueued++; 132 /* 133 * Check if the interactive session should be downgraded to 134 * the batchq. A session is downgraded if it has queued 6 135 * packets without pausing, and at least 3 of those packets 136 * have been sent over the link 137 * (XXX These are arbitrary numbers, probably not optimal..) 138 */ 139 if (on_fastq && ((so->so_nqueued >= 6) && 140 (so->so_nqueued - so->so_queued) >= 3)) { 141 142 /* Remove from current queue... */ 143 remque(pData, ifm->ifs_next); 144 145 /* ...And insert in the new. That'll teach ya! */ 146 insque(pData, ifm->ifs_next, &if_batchq); 147 } 148 } 149 149 150 150 #ifndef FULL_BOLT 151 152 153 154 155 156 157 151 /* 152 * This prevents us from malloc()ing too many mbufs 153 */ 154 if (link_up) { 155 /* if_start will check towrite */ 156 if_start(pData); 157 } 158 158 #endif 159 159 } … … 174 174 if_start(PNATState pData) 175 175 { 176 177 178 179 180 181 176 struct mbuf *ifm, *ifqt; 177 178 DEBUG_CALL("if_start"); 179 180 if (if_queued == 0) 181 return; /* Nothing to do */ 182 182 183 183 again: … … 186 186 return; 187 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 188 /* 189 * See which queue to get next packet from 190 * If there's something in the fastq, select it immediately 191 */ 192 if (if_fastq.ifq_next != &if_fastq) { 193 ifm = if_fastq.ifq_next; 194 } else { 195 /* Nothing on fastq, see if next_m is valid */ 196 if (next_m != &if_batchq) 197 ifm = next_m; 198 else 199 ifm = if_batchq.ifq_next; 200 201 /* Set which packet to send on next iteration */ 202 next_m = ifm->ifq_next; 203 } 204 /* Remove it from the queue */ 205 ifqt = ifm->ifq_prev; 206 remque(pData, ifm); 207 --if_queued; 208 209 /* If there are more packets for this session, re-queue them */ 210 if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) { 211 insque(pData, ifm->ifs_next, ifqt); 212 ifs_remque(ifm); 213 } 214 215 /* Update so_queued */ 216 if (ifm->ifq_so) { 217 if (--ifm->ifq_so->so_queued == 0) 218 /* If there's no more queued, reset nqueued */ 219 ifm->ifq_so->so_nqueued = 0; 220 } 221 222 /* Encapsulate the packet for sending */ 223 223 if_encap(pData, (const uint8_t *)ifm->m_data, ifm->m_len); 224 224 225 225 m_free(pData, ifm); 226 226 227 228 229 } 227 if (if_queued) 228 goto again; 229 } -
trunk/src/VBox/Devices/Network/slirp/if.h
r1 r14470 9 9 #define _IF_H_ 10 10 11 #define IF_COMPRESS 0x01/* We want compression */12 #define IF_NOCOMPRESS 0x02/* Do not do compression */13 #define IF_AUTOCOMP 0x04/* Autodetect (default) */14 #define IF_NOCIDCOMP 0x08/* CID compression */11 #define IF_COMPRESS 0x01 /* We want compression */ 12 #define IF_NOCOMPRESS 0x02 /* Do not do compression */ 13 #define IF_AUTOCOMP 0x04 /* Autodetect (default) */ 14 #define IF_NOCIDCOMP 0x08 /* CID compression */ 15 15 16 16 /* Needed for FreeBSD */ 17 17 #undef if_mtu 18 extern int 19 extern int if_mru;/* MTU and MRU */20 extern int if_comp;/* Flags for compression */21 extern int 22 extern int if_queued;/* Number of packets queued so far */23 extern int if_thresh;/* Number of packets queued before we start sending24 18 extern int if_mtu; 19 extern int if_mru; /* MTU and MRU */ 20 extern int if_comp; /* Flags for compression */ 21 extern int if_maxlinkhdr; 22 extern int if_queued; /* Number of packets queued so far */ 23 extern int if_thresh; /* Number of packets queued before we start sending 24 * (to prevent allocing too many mbufs) */ 25 25 26 extern 27 extern 28 extern 26 extern struct mbuf if_fastq; /* fast queue (for interactive data) */ 27 extern struct mbuf if_batchq; /* queue for non-interactive data */ 28 extern struct mbuf *next_m; 29 29 30 30 #define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm)) … … 32 32 /* Interface statistics */ 33 33 struct slirp_ifstats { 34 u_int out_pkts;/* Output packets */35 u_int out_bytes;/* Output bytes */36 u_int out_errpkts;/* Output Error Packets */37 u_int out_errbytes;/* Output Error Bytes */38 u_int in_pkts;/* Input packets */39 u_int in_bytes;/* Input bytes */40 u_int in_errpkts;/* Input Error Packets */41 u_int in_errbytes;/* Input Error Bytes */42 43 u_int bytes_saved;/* Number of bytes that compression "saved" */44 45 46 47 u_int in_mbad;/* Bad incoming packets */34 u_int out_pkts; /* Output packets */ 35 u_int out_bytes; /* Output bytes */ 36 u_int out_errpkts; /* Output Error Packets */ 37 u_int out_errbytes; /* Output Error Bytes */ 38 u_int in_pkts; /* Input packets */ 39 u_int in_bytes; /* Input bytes */ 40 u_int in_errpkts; /* Input Error Packets */ 41 u_int in_errbytes; /* Input Error Bytes */ 42 43 u_int bytes_saved; /* Number of bytes that compression "saved" */ 44 /* ie: number of bytes that didn't need to be sent over the link 45 * because of compression */ 46 47 u_int in_mbad; /* Bad incoming packets */ 48 48 }; 49 49 -
trunk/src/VBox/Devices/Network/slirp/ip.h
r14407 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)ip.h8.1 (Berkeley) 6/10/9333 * @(#)ip.h 8.1 (Berkeley) 6/10/93 34 34 * ip.h,v 1.3 1994/08/21 05:27:30 paul Exp 35 35 */ … … 76 76 * Per RFC 791, September 1981. 77 77 */ 78 #define IPVERSION478 #define IPVERSION 4 79 79 80 80 /* … … 86 86 */ 87 87 #ifdef WORDS_BIGENDIAN 88 u_int ip_v:4,/* version */89 ip_hl:4;/* header length */88 u_int ip_v:4, /* version */ 89 ip_hl:4; /* header length */ 90 90 #else 91 91 #ifdef _MSC_VER 92 u_int8_t ip_hl:4,/* header length */93 #else 94 u_int ip_hl:4,/* header length */95 #endif 96 ip_v:4;/* version */97 #endif 98 u_int8_t ip_tos;/* type of service */99 u_int16_t ip_len;/* total length */100 u_int16_t ip_id;/* identification */101 u_int16_t ip_off;/* fragment offset field */102 #define IP_DF 0x4000/* don't fragment flag */103 #define IP_MF 0x2000/* more fragments flag */104 #define IP_OFFMASK 0x1fff/* mask for fragmenting bits */105 u_int8_t ip_ttl;/* time to live */106 u_int8_t ip_p;/* protocol */107 u_int16_t ip_sum;/* checksum */108 struct in_addr ip_src,ip_dst;/* source and dest address */109 }; 110 111 #define IP_MAXPACKET 65535/* maximum packet size */92 u_int8_t ip_hl:4, /* header length */ 93 #else 94 u_int ip_hl:4, /* header length */ 95 #endif 96 ip_v:4; /* version */ 97 #endif 98 u_int8_t ip_tos; /* type of service */ 99 u_int16_t ip_len; /* total length */ 100 u_int16_t ip_id; /* identification */ 101 u_int16_t ip_off; /* fragment offset field */ 102 #define IP_DF 0x4000 /* don't fragment flag */ 103 #define IP_MF 0x2000 /* more fragments flag */ 104 #define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ 105 u_int8_t ip_ttl; /* time to live */ 106 u_int8_t ip_p; /* protocol */ 107 u_int16_t ip_sum; /* checksum */ 108 struct in_addr ip_src,ip_dst; /* source and dest address */ 109 }; 110 111 #define IP_MAXPACKET 65535 /* maximum packet size */ 112 112 113 113 /* 114 114 * Definitions for IP type of service (ip_tos) 115 115 */ 116 #define IPTOS_LOWDELAY0x10117 #define IPTOS_THROUGHPUT0x08118 #define IPTOS_RELIABILITY0x04116 #define IPTOS_LOWDELAY 0x10 117 #define IPTOS_THROUGHPUT 0x08 118 #define IPTOS_RELIABILITY 0x04 119 119 120 120 /* 121 121 * Definitions for options. 122 122 */ 123 #define IPOPT_COPIED(o)((o)&0x80)124 #define IPOPT_CLASS(o)((o)&0x60)125 #define IPOPT_NUMBER(o)((o)&0x1f)126 127 #define IPOPT_CONTROL0x00128 #define IPOPT_RESERVED10x20129 #define IPOPT_DEBMEAS0x40130 #define IPOPT_RESERVED20x60131 132 #define IPOPT_EOL 0/* end of option list */133 #define IPOPT_NOP 1/* no operation */134 135 #define IPOPT_RR 7/* record packet route */136 #define IPOPT_TS 68/* timestamp */137 #define IPOPT_SECURITY 130/* provide s,c,h,tcc */138 #define IPOPT_LSRR 131/* loose source route */139 #define IPOPT_SATID 136/* satnet id */140 #define IPOPT_SSRR 137/* strict source route */123 #define IPOPT_COPIED(o) ((o)&0x80) 124 #define IPOPT_CLASS(o) ((o)&0x60) 125 #define IPOPT_NUMBER(o) ((o)&0x1f) 126 127 #define IPOPT_CONTROL 0x00 128 #define IPOPT_RESERVED1 0x20 129 #define IPOPT_DEBMEAS 0x40 130 #define IPOPT_RESERVED2 0x60 131 132 #define IPOPT_EOL 0 /* end of option list */ 133 #define IPOPT_NOP 1 /* no operation */ 134 135 #define IPOPT_RR 7 /* record packet route */ 136 #define IPOPT_TS 68 /* timestamp */ 137 #define IPOPT_SECURITY 130 /* provide s,c,h,tcc */ 138 #define IPOPT_LSRR 131 /* loose source route */ 139 #define IPOPT_SATID 136 /* satnet id */ 140 #define IPOPT_SSRR 137 /* strict source route */ 141 141 142 142 /* 143 143 * Offsets to fields in options other than EOL and NOP. 144 144 */ 145 #define IPOPT_OPTVAL 0/* option ID */146 #define IPOPT_OLEN 1/* option length */147 #define IPOPT_OFFSET 2/* offset within option */148 #define IPOPT_MINOFF 4/* min value of above */145 #define IPOPT_OPTVAL 0 /* option ID */ 146 #define IPOPT_OLEN 1 /* option length */ 147 #define IPOPT_OFFSET 2 /* offset within option */ 148 #define IPOPT_MINOFF 4 /* min value of above */ 149 149 150 150 /* 151 151 * Time stamp option structure. 152 152 */ 153 struct 154 u_int8_t ipt_code;/* IPOPT_TS */155 u_int8_t ipt_len;/* size of structure (variable) */156 u_int8_t ipt_ptr;/* index of current entry */153 struct ip_timestamp { 154 u_int8_t ipt_code; /* IPOPT_TS */ 155 u_int8_t ipt_len; /* size of structure (variable) */ 156 u_int8_t ipt_ptr; /* index of current entry */ 157 157 /* 158 158 * bitfield types must be u_int8_t for MSVC, otherwise it will use a full dword (for u_int) 159 159 */ 160 160 #ifdef WORDS_BIGENDIAN 161 u_int ipt_oflw:4,/* overflow counter */162 ipt_flg:4;/* flags, see below */161 u_int ipt_oflw:4, /* overflow counter */ 162 ipt_flg:4; /* flags, see below */ 163 163 #else 164 164 #ifdef _MSC_VER 165 u_int8_t ipt_flg:4,/* flags, see below */166 #else 167 u_int ipt_flg:4,/* flags, see below */168 #endif 169 ipt_oflw:4;/* overflow counter */170 #endif 171 172 n_longipt_time[1];173 structipt_ta {174 175 176 177 165 u_int8_t ipt_flg:4, /* flags, see below */ 166 #else 167 u_int ipt_flg:4, /* flags, see below */ 168 #endif 169 ipt_oflw:4; /* overflow counter */ 170 #endif 171 union ipt_timestamp { 172 n_long ipt_time[1]; 173 struct ipt_ta { 174 struct in_addr ipt_addr; 175 n_long ipt_time; 176 } ipt_ta[1]; 177 } ipt_timestamp; 178 178 }; 179 179 180 180 /* flag bits for ipt_flg */ 181 #define IPOPT_TS_TSONLY 0/* timestamps only */182 #define IPOPT_TS_TSANDADDR 1/* timestamps and addresses */183 #define IPOPT_TS_PRESPEC 3/* specified modules only */181 #define IPOPT_TS_TSONLY 0 /* timestamps only */ 182 #define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */ 183 #define IPOPT_TS_PRESPEC 3 /* specified modules only */ 184 184 185 185 /* bits for security (not byte swapped) */ 186 #define IPOPT_SECUR_UNCLASS0x0000187 #define IPOPT_SECUR_CONFID0xf135188 #define IPOPT_SECUR_EFTO0x789a189 #define IPOPT_SECUR_MMMM0xbc4d190 #define IPOPT_SECUR_RESTR0xaf13191 #define IPOPT_SECUR_SECRET0xd788192 #define IPOPT_SECUR_TOPSECRET0x6bc5186 #define IPOPT_SECUR_UNCLASS 0x0000 187 #define IPOPT_SECUR_CONFID 0xf135 188 #define IPOPT_SECUR_EFTO 0x789a 189 #define IPOPT_SECUR_MMMM 0xbc4d 190 #define IPOPT_SECUR_RESTR 0xaf13 191 #define IPOPT_SECUR_SECRET 0xd788 192 #define IPOPT_SECUR_TOPSECRET 0x6bc5 193 193 194 194 /* 195 195 * Internet implementation parameters. 196 196 */ 197 #define MAXTTL 255/* maximum time to live (seconds) */198 #define IPDEFTTL 64/* default ttl, from RFC 1340 */199 #define IPFRAGTTL 60/* time to live for frags, slowhz */200 #define IPTTLDEC 1/* subtracted when forwarding */201 202 #define IP_MSS 576/* default maximum segment size */197 #define MAXTTL 255 /* maximum time to live (seconds) */ 198 #define IPDEFTTL 64 /* default ttl, from RFC 1340 */ 199 #define IPFRAGTTL 60 /* time to live for frags, slowhz */ 200 #define IPTTLDEC 1 /* subtracted when forwarding */ 201 202 #define IP_MSS 576 /* default maximum segment size */ 203 203 204 204 #ifdef HAVE_SYS_TYPES32_H /* Overcome some Solaris 2.x junk */ … … 224 224 */ 225 225 struct ipovly { 226 caddr32_t ih_next, ih_prev;/* for protocol sequence q's */227 u_int8_t ih_x1;/* (unused) */228 u_int8_t ih_pr;/* protocol */229 u_int16_t ih_len;/* protocol length */230 struct in_addr ih_src;/* source internet address */231 struct in_addr ih_dst;/* destination internet address */226 caddr32_t ih_next, ih_prev; /* for protocol sequence q's */ 227 u_int8_t ih_x1; /* (unused) */ 228 u_int8_t ih_pr; /* protocol */ 229 u_int16_t ih_len; /* protocol length */ 230 struct in_addr ih_src; /* source internet address */ 231 struct in_addr ih_dst; /* destination internet address */ 232 232 }; 233 233 … … 241 241 struct ipq_t { 242 242 #ifndef VBOX_WITH_BSD_REASS 243 ipqp_32 next,prev;/* to other reass headers */243 ipqp_32 next,prev; /* to other reass headers */ 244 244 #else /* VBOX_WITH_BSD_REASS */ 245 245 TAILQ_ENTRY(ipq_t) ipq_list; 246 246 #endif /* VBOX_WITH_BSD_REASS */ 247 u_int8_t ipq_ttl;/* time for reass q to live */248 u_int8_t ipq_p;/* protocol of this fragment */249 u_int16_t ipq_id;/* sequence id for reassembly */247 u_int8_t ipq_ttl; /* time for reass q to live */ 248 u_int8_t ipq_p; /* protocol of this fragment */ 249 u_int16_t ipq_id; /* sequence id for reassembly */ 250 250 #ifndef VBOX_WITH_BSD_REASS 251 251 ipasfragp_32 ipq_next,ipq_prev; /* to ip headers of fragments */ 252 252 #else /* VBOX_WITH_BSD_REASS */ 253 253 u_int8_t ipq_nfrags; /* # of fragments in this packet */ … … 255 255 #endif /* VBOX_WITH_BSD_REASS */ 256 256 257 structin_addr ipq_src,ipq_dst;257 struct in_addr ipq_src,ipq_dst; 258 258 }; 259 259 … … 275 275 * Note: ipf_next must be at same offset as ipq_next above 276 276 */ 277 struct 277 struct ipasfrag { 278 278 #ifdef WORDS_BIGENDIAN 279 u_intip_v:4,280 279 u_int ip_v:4, 280 ip_hl:4; 281 281 #else 282 282 #ifdef _MSC_VER 283 u_int8_tip_hl:4,284 #else 285 u_intip_hl:4,286 #endif 287 283 u_int8_t ip_hl:4, 284 #else 285 u_int ip_hl:4, 286 #endif 287 ip_v:4; 288 288 #endif 289 289 /* BUG : u_int changed to u_int8_t. 290 290 * sizeof(u_int)==4 on linux 2.0 291 292 u_int8_t ipf_mff; 293 294 295 u_int16_tip_len;296 u_int16_tip_id;297 u_int16_tip_off;298 u_int8_tip_ttl;299 u_int8_tip_p;300 u_int16_tip_sum;301 ipasfragp_32 ipf_next;/* next fragment */302 ipasfragp_32 ipf_prev;/* previous fragment */291 */ 292 u_int8_t ipf_mff; /* XXX overlays ip_tos: use low bit 293 * to avoid destroying tos (PPPDTRuu); 294 * copied from (ip_off&IP_MF) */ 295 u_int16_t ip_len; 296 u_int16_t ip_id; 297 u_int16_t ip_off; 298 u_int8_t ip_ttl; 299 u_int8_t ip_p; 300 u_int16_t ip_sum; 301 ipasfragp_32 ipf_next; /* next fragment */ 302 ipasfragp_32 ipf_prev; /* previous fragment */ 303 303 }; 304 304 … … 309 309 * is in m_len. 310 310 */ 311 #define MAX_IPOPTLEN 311 #define MAX_IPOPTLEN 40 312 312 313 313 struct ipoption { 314 struct in_addr ipopt_dst;/* first-hop dst if source routed */315 int8_t ipopt_list[MAX_IPOPTLEN];/* options proper */314 struct in_addr ipopt_dst; /* first-hop dst if source routed */ 315 int8_t ipopt_list[MAX_IPOPTLEN]; /* options proper */ 316 316 }; 317 317 … … 321 321 */ 322 322 323 struct 324 u_long ips_total;/* total packets received */325 u_long ips_badsum;/* checksum bad */326 u_long ips_tooshort;/* packet too short */327 u_long ips_toosmall;/* not enough data */328 u_long ips_badhlen;/* ip header length < data size */329 u_long ips_badlen;/* ip length < ip header length */330 u_long ips_fragments;/* fragments received */331 u_long ips_fragdropped;/* frags dropped (dups, out of space) */332 u_long ips_fragtimeout;/* fragments timed out */333 u_long ips_forward;/* packets forwarded */334 u_long ips_cantforward;/* packets rcvd for unreachable dest */335 u_long ips_redirectsent;/* packets forwarded on same net */336 u_long ips_noproto;/* unknown or unsupported protocol */337 u_long ips_delivered;/* datagrams delivered to upper level*/338 u_long ips_localout;/* total ip packets generated here */339 u_long ips_odropped;/* lost packets due to nobufs, etc. */340 u_long ips_reassembled;/* total packets reassembled ok */341 u_long ips_fragmented;/* datagrams successfully fragmented */342 u_long ips_ofragments;/* output fragments created */343 u_long ips_cantfrag;/* don't fragment flag was set, etc. */344 u_long ips_badoptions;/* error in option processing */345 u_long ips_noroute;/* packets discarded due to no route */346 u_long ips_badvers;/* ip version != 4 */347 u_long ips_rawout;/* total raw ip packets generated */348 u_long ips_unaligned;/* times the ip packet was not aligned */349 }; 350 351 352 #endif 323 struct ipstat_t { 324 u_long ips_total; /* total packets received */ 325 u_long ips_badsum; /* checksum bad */ 326 u_long ips_tooshort; /* packet too short */ 327 u_long ips_toosmall; /* not enough data */ 328 u_long ips_badhlen; /* ip header length < data size */ 329 u_long ips_badlen; /* ip length < ip header length */ 330 u_long ips_fragments; /* fragments received */ 331 u_long ips_fragdropped; /* frags dropped (dups, out of space) */ 332 u_long ips_fragtimeout; /* fragments timed out */ 333 u_long ips_forward; /* packets forwarded */ 334 u_long ips_cantforward; /* packets rcvd for unreachable dest */ 335 u_long ips_redirectsent; /* packets forwarded on same net */ 336 u_long ips_noproto; /* unknown or unsupported protocol */ 337 u_long ips_delivered; /* datagrams delivered to upper level*/ 338 u_long ips_localout; /* total ip packets generated here */ 339 u_long ips_odropped; /* lost packets due to nobufs, etc. */ 340 u_long ips_reassembled; /* total packets reassembled ok */ 341 u_long ips_fragmented; /* datagrams successfully fragmented */ 342 u_long ips_ofragments; /* output fragments created */ 343 u_long ips_cantfrag; /* don't fragment flag was set, etc. */ 344 u_long ips_badoptions; /* error in option processing */ 345 u_long ips_noroute; /* packets discarded due to no route */ 346 u_long ips_badvers; /* ip version != 4 */ 347 u_long ips_rawout; /* total raw ip packets generated */ 348 u_long ips_unaligned; /* times the ip packet was not aligned */ 349 }; 350 351 352 #endif -
trunk/src/VBox/Devices/Network/slirp/ip_icmp.c
r8010 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)ip_icmp.c8.2 (Berkeley) 1/4/9433 * @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94 34 34 * ip_icmp.c,v 1.7 1995/05/30 08:09:42 rgrimes Exp 35 35 */ … … 46 46 static const int icmp_flush[19] = { 47 47 /* ECHO REPLY (0) */ 0, 48 49 48 1, 49 1, 50 50 /* DEST UNREACH (3) */ 1, 51 51 /* SOURCE QUENCH (4)*/ 1, 52 52 /* REDIRECT (5) */ 1, 53 54 53 1, 54 1, 55 55 /* ECHO (8) */ 0, 56 56 /* ROUTERADVERT (9) */ 1, … … 104 104 m->m_data -= hlen; 105 105 106 /* 106 /* icmpstat.icps_inhist[icp->icmp_type]++; */ 107 107 /* code = icp->icmp_code; */ 108 108 … … 111 111 case ICMP_ECHO: 112 112 icp->icmp_type = ICMP_ECHOREPLY; 113 ip->ip_len += hlen; 113 ip->ip_len += hlen; /* since ip_input subtracts this */ 114 114 if (ip->ip_dst.s_addr == alias_addr.s_addr) { 115 115 icmp_reflect(pData, m); … … 119 119 if ((so = socreate()) == NULL) goto freeit; 120 120 if(udp_attach(pData, so) == -1) { 121 122 123 124 125 121 DEBUG_MISC((dfd,"icmp_input udp_attach errno = %d-%s\n", 122 errno,strerror(errno))); 123 sofree(pData, so); 124 m_free(pData, m); 125 goto end_error; 126 126 } 127 127 so->so_m = m; … … 137 137 addr.sin_family = AF_INET; 138 138 if ((so->so_faddr.s_addr & htonl(pData->netmask)) == special_addr.s_addr) { 139 140 141 142 143 144 145 146 147 148 139 /* It's an alias */ 140 switch(ntohl(so->so_faddr.s_addr) & ~pData->netmask) { 141 case CTL_DNS: 142 addr.sin_addr = dns_addr; 143 break; 144 case CTL_ALIAS: 145 default: 146 addr.sin_addr = loopback_addr; 147 break; 148 } 149 149 } else { 150 150 addr.sin_addr = so->so_faddr; 151 151 } 152 152 addr.sin_port = so->so_fport; 153 153 if(sendto(so->s, icmp_ping_msg, strlen(icmp_ping_msg), 0, 154 155 156 157 158 154 (struct sockaddr *)&addr, sizeof(addr)) == -1) { 155 DEBUG_MISC((dfd,"icmp_input udp sendto tx errno = %d-%s\n", 156 errno,strerror(errno))); 157 icmp_error(pData, m, ICMP_UNREACH,ICMP_UNREACH_NET, 0,strerror(errno)); 158 udp_detach(pData, so); 159 159 } 160 160 } /* if ip->ip_dst.s_addr == alias_addr.s_addr */ … … 184 184 185 185 /* 186 * 186 * Send an ICMP message in response to a situation 187 187 * 188 * RFC 1122: 3.2.2MUST send at least the IP header and 8 bytes of header. MAY send more (we do).189 * 190 * 191 * 192 * 188 * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. MAY send more (we do). 189 * MUST NOT change this header information. 190 * MUST NOT reply to a multicast/broadcast IP address. 191 * MUST NOT reply to a multicast/broadcast MAC address. 192 * MUST reply to only the first fragment. 193 193 */ 194 194 /* … … 232 232 icp = (struct icmp *)((char *)ip + shlen); 233 233 /* 234 * 235 * 234 * Assume any unknown ICMP type is an error. This isn't 235 * specified by the RFC, but think about it.. 236 236 */ 237 237 if(icp->icmp_type>18 || icmp_flush[icp->icmp_type]) goto end_error; … … 345 345 */ 346 346 memmove((caddr_t)(ip + 1), (caddr_t)ip + hlen, 347 347 (unsigned )(m->m_len - hlen)); 348 348 hlen -= optlen; 349 349 ip->ip_hl = hlen >> 2; -
trunk/src/VBox/Devices/Network/slirp/ip_input.c
r14390 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)ip_input.c8.2 (Berkeley) 1/4/9433 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 34 34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp 35 35 */ … … 55 55 { 56 56 #ifndef VBOX_WITH_BSD_REASS 57 57 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq); 58 58 #else /* !VBOX_WITH_BSD_REASS */ 59 59 int i = 0; … … 64 64 nipq = 0; 65 65 #endif /* VBOX_WITH_BSD_REASS */ 66 67 68 66 ip_currid = tt.tv_sec & 0xffff; 67 udp_init(pData); 68 tcp_init(pData); 69 69 } 70 70 … … 76 76 ip_input(PNATState pData, struct mbuf *m) 77 77 { 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 78 register struct ip *ip; 79 int hlen; 80 81 DEBUG_CALL("ip_input"); 82 DEBUG_ARG("m = %lx", (long)m); 83 DEBUG_ARG("m_len = %d", m->m_len); 84 85 ipstat.ips_total++; 86 87 if (m->m_len < sizeof (struct ip)) { 88 ipstat.ips_toosmall++; 89 return; 90 } 91 92 ip = mtod(m, struct ip *); 93 94 if (ip->ip_v != IPVERSION) { 95 ipstat.ips_badvers++; 96 goto bad; 97 } 98 99 hlen = ip->ip_hl << 2; 100 if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */ 101 ipstat.ips_badhlen++; /* or packet too short */ 102 goto bad; 103 } 104 104 105 105 /* keep ip header intact for ICMP reply 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 106 * ip->ip_sum = cksum(m, hlen); 107 * if (ip->ip_sum) { 108 */ 109 if(cksum(m,hlen)) { 110 ipstat.ips_badsum++; 111 goto bad; 112 } 113 114 /* 115 * Convert fields to host representation. 116 */ 117 NTOHS(ip->ip_len); 118 if (ip->ip_len < hlen) { 119 ipstat.ips_badlen++; 120 goto bad; 121 } 122 NTOHS(ip->ip_id); 123 NTOHS(ip->ip_off); 124 125 /* 126 * Check that the amount of data in the buffers 127 * is as at least much as the IP header would have us expect. 128 * Trim mbufs if longer than we expect. 129 * Drop packet if shorter than we expect. 130 */ 131 if (m->m_len < ip->ip_len) { 132 ipstat.ips_tooshort++; 133 goto bad; 134 } 135 /* Should drop packet if mbuf too long? hmmm... */ 136 if (m->m_len > ip->ip_len) 137 m_adj(m, ip->ip_len - m->m_len); 138 139 /* check ip_ttl for a correct ICMP reply */ 140 if(ip->ip_ttl==0 || ip->ip_ttl==1) { 141 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl"); 142 goto bad; 143 } 144 145 /* 146 * Process options and, if not destined for us, 147 * ship it on. ip_dooptions returns 1 when an 148 * error was detected (causing an icmp message 149 * to be sent and the original packet to be freed). 150 */ 151 151 /* We do no IP options */ 152 /* 153 * 154 */ 155 156 157 158 159 160 161 162 163 152 /* if (hlen > sizeof (struct ip) && ip_dooptions(m)) 153 * goto next; 154 */ 155 /* 156 * If offset or IP_MF are set, must reassemble. 157 * Otherwise, nothing need be done. 158 * (We could look in the reassembly queue to see 159 * if the packet was previously fragmented, 160 * but it's not worth the time; just let them time out.) 161 * 162 * XXX This should fail, don't fragment yet 163 */ 164 164 #ifndef VBOX_WITH_BSD_REASS 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 165 if (ip->ip_off &~ IP_DF) { 166 register struct ipq_t *fp; 167 /* 168 * Look for queue of fragments 169 * of this datagram. 170 */ 171 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); fp != &ipq; 172 fp = u32_to_ptr(pData, fp->next, struct ipq_t *)) 173 if (ip->ip_id == fp->ipq_id && 174 ip->ip_src.s_addr == fp->ipq_src.s_addr && 175 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 176 ip->ip_p == fp->ipq_p) 177 goto found; 178 fp = 0; 179 found: 180 181 /* 182 * Adjust ip_len to not reflect header, 183 * set ip_mff if more fragments are expected, 184 * convert offset of this to bytes. 185 */ 186 ip->ip_len -= hlen; 187 if (ip->ip_off & IP_MF) 188 ((struct ipasfrag *)ip)->ipf_mff |= 1; 189 else 190 ((struct ipasfrag *)ip)->ipf_mff &= ~1; 191 192 ip->ip_off <<= 3; 193 194 /* 195 * If datagram marked as having more fragments 196 * or if this is not the first fragment, 197 * attempt reassembly; if it succeeds, proceed. 198 */ 199 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) { 200 ipstat.ips_fragments++; 201 ip = ip_reass(pData, (struct ipasfrag *)ip, fp); 202 if (ip == 0) 203 return; 204 ipstat.ips_reassembled++; 205 m = dtom(pData, ip); 206 } else 207 if (fp) 208 ip_freef(pData, fp); 209 210 } else 211 ip->ip_len -= hlen; 212 212 #else /* !VBOX_WITH_BSD_REASS */ 213 213 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { … … 219 219 } 220 220 else 221 221 ip->ip_len -= hlen; 222 222 #endif /* VBOX_WITH_BSD_REASS */ 223 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 224 /* 225 * Switch out to protocol's input routine. 226 */ 227 ipstat.ips_delivered++; 228 switch (ip->ip_p) { 229 case IPPROTO_TCP: 230 tcp_input(pData, m, hlen, (struct socket *)NULL); 231 break; 232 case IPPROTO_UDP: 233 udp_input(pData, m, hlen); 234 break; 235 case IPPROTO_ICMP: 236 icmp_input(pData, m, hlen); 237 break; 238 default: 239 ipstat.ips_noproto++; 240 m_free(pData, m); 241 } 242 return; 243 243 bad: 244 245 244 m_freem(pData, m); 245 return; 246 246 } 247 247 … … 256 256 ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp) 257 257 { 258 259 260 261 262 263 264 265 266 267 268 269 270 258 register struct mbuf *m = dtom(pData, ip); 259 register struct ipasfrag *q; 260 int hlen = ip->ip_hl << 2; 261 int i, next; 262 263 DEBUG_CALL("ip_reass"); 264 DEBUG_ARG("ip = %lx", (long)ip); 265 DEBUG_ARG("fp = %lx", (long)fp); 266 DEBUG_ARG("m = %lx", (long)m); 267 268 /* 269 * Presence of header sizes in mbufs 270 * would confuse code below. 271 271 * Fragment m_data is concatenated. 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 272 */ 273 m->m_data += hlen; 274 m->m_len -= hlen; 275 276 /* 277 * If first fragment to arrive, create a reassembly queue. 278 */ 279 if (fp == 0) { 280 struct mbuf *t; 281 if ((t = m_get(pData)) == NULL) goto dropfrag; 282 fp = mtod(t, struct ipq_t *); 283 insque_32(pData, fp, &ipq); 284 fp->ipq_ttl = IPFRAGTTL; 285 fp->ipq_p = ip->ip_p; 286 fp->ipq_id = ip->ip_id; 287 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp); 288 fp->ipq_src = ((struct ip *)ip)->ip_src; 289 fp->ipq_dst = ((struct ip *)ip)->ip_dst; 290 q = (struct ipasfrag *)fp; 291 goto insert; 292 } 293 294 /* 295 * Find a segment which begins after this one does. 296 */ 297 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp; 298 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) 299 if (q->ip_off > ip->ip_off) 300 break; 301 302 /* 303 * If there is a preceding segment, it may provide some of 304 * our data already. If so, drop the data from the incoming 305 * segment. If it provides all of our data, drop us. 306 */ 307 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp) { 308 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off + 309 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off; 310 if (i > 0) { 311 if (i >= ip->ip_len) 312 goto dropfrag; 313 m_adj(dtom(pData, ip), i); 314 ip->ip_off += i; 315 ip->ip_len -= i; 316 } 317 } 318 319 /* 320 * While we overlap succeeding segments trim them or, 321 * if they are completely covered, dequeue them. 322 */ 323 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) { 324 i = (ip->ip_off + ip->ip_len) - q->ip_off; 325 if (i < q->ip_len) { 326 q->ip_len -= i; 327 q->ip_off += i; 328 m_adj(dtom(pData, q), i); 329 break; 330 } 331 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *); 332 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))); 333 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)); 334 } 335 335 336 336 insert: 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 337 /* 338 * Stick new segment in its place; 339 * check for complete reassembly. 340 */ 341 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)); 342 next = 0; 343 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp; 344 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) { 345 if (q->ip_off != next) 346 return (0); 347 next += q->ip_len; 348 } 349 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1) 350 return (0); 351 352 /* 353 * Reassembly is complete; concatenate fragments. 354 */ 355 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); 356 m = dtom(pData, q); 357 358 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *); 359 while (q != (struct ipasfrag *)fp) { 360 struct mbuf *t; 361 t = dtom(pData, q); 362 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *); 363 m_cat(pData, m, t); 364 } 365 366 /* 367 * Create header for new ip packet by 368 * modifying header of first packet; 369 * dequeue and discard fragment reassembly header. 370 * Make header visible. 371 */ 372 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); 373 374 /* 375 * If the fragments concatenated to an mbuf that's 376 * bigger than the total size of the fragment, then and 377 * m_ext buffer was alloced. But fp->ipq_next points to 378 * the old buffer (in the mbuf), so we must point ip 379 * into the new buffer. 380 */ 381 if (m->m_flags & M_EXT) { 382 int delta; 383 delta = (char *)ip - m->m_dat; 384 ip = (struct ipasfrag *)(m->m_ext + delta); 385 } 386 387 /* DEBUG_ARG("ip = %lx", (long)ip); 388 * ip=(struct ipasfrag *)m->m_data; */ 389 390 ip->ip_len = next; 391 ip->ipf_mff &= ~1; 392 ((struct ip *)ip)->ip_src = fp->ipq_src; 393 ((struct ip *)ip)->ip_dst = fp->ipq_dst; 394 remque_32(pData, fp); 395 (void) m_free(pData, dtom(pData, fp)); 396 m = dtom(pData, ip); 397 m->m_len += (ip->ip_hl << 2); 398 m->m_data -= (ip->ip_hl << 2); 399 400 return ((struct ip *)ip); 401 401 402 402 dropfrag: 403 404 405 403 ipstat.ips_fragdropped++; 404 m_freem(pData, m); 405 return (0); 406 406 } 407 407 … … 413 413 ip_freef(PNATState pData, struct ipq_t *fp) 414 414 { 415 416 417 418 419 420 421 422 423 424 415 register struct ipasfrag *q, *p; 416 417 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp; 418 q = p) { 419 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *); 420 ip_deq(pData, q); 421 m_freem(pData, dtom(pData, q)); 422 } 423 remque_32(pData, fp); 424 (void) m_free(pData, dtom(pData, fp)); 425 425 } 426 426 #else /* !VBOX_WITH_BSD_REASS */ … … 718 718 ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev) 719 719 { 720 721 722 723 724 725 720 DEBUG_CALL("ip_enq"); 721 DEBUG_ARG("prev = %lx", (long)prev); 722 p->ipf_prev = ptr_to_u32(pData, prev); 723 p->ipf_next = prev->ipf_next; 724 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p); 725 prev->ipf_next = ptr_to_u32(pData, p); 726 726 } 727 727 … … 732 732 ip_deq(PNATState pData, register struct ipasfrag *p) 733 733 { 734 735 736 737 738 734 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *); 735 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *); 736 u32ptr_done(pData, prev->ipf_next, p); 737 prev->ipf_next = p->ipf_next; 738 next->ipf_prev = p->ipf_prev; 739 739 } 740 740 … … 747 747 ip_slowtimo(PNATState pData) 748 748 { 749 749 register struct ipq_t *fp; 750 750 751 751 #ifndef VBOX_WITH_BSD_REASS 752 753 754 755 756 757 758 759 760 761 762 763 764 765 752 DEBUG_CALL("ip_slowtimo"); 753 754 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); 755 if (fp == 0) 756 return; 757 758 while (fp != &ipq) { 759 --fp->ipq_ttl; 760 fp = u32_to_ptr(pData, fp->next, struct ipq_t *); 761 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0) { 762 ipstat.ips_fragtimeout++; 763 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *)); 764 } 765 } 766 766 #else /* !VBOX_WITH_BSD_REASS */ 767 767 /* XXX: the fragment expiration is the same but requier … … 811 811 int 812 812 ip_dooptions(m) 813 813 struct mbuf *m; 814 814 { 815 816 817 818 819 /* 820 821 815 register struct ip *ip = mtod(m, struct ip *); 816 register u_char *cp; 817 register struct ip_timestamp *ipt; 818 register struct in_ifaddr *ia; 819 /* int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; */ 820 int opt, optlen, cnt, off, code, type, forward = 0; 821 struct in_addr *sin, dst; 822 822 typedef u_int32_t n_time; 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 off--;/ * 0 origin * /877 878 879 880 881 882 883 884 885 886 887 888 889 890 #define INAstruct in_ifaddr *891 #define SAstruct sockaddr *892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 off--;* 0 origin *920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 823 n_time ntime; 824 825 dst = ip->ip_dst; 826 cp = (u_char *)(ip + 1); 827 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 828 for (; cnt > 0; cnt -= optlen, cp += optlen) { 829 opt = cp[IPOPT_OPTVAL]; 830 if (opt == IPOPT_EOL) 831 break; 832 if (opt == IPOPT_NOP) 833 optlen = 1; 834 else { 835 optlen = cp[IPOPT_OLEN]; 836 if (optlen <= 0 || optlen > cnt) { 837 code = &cp[IPOPT_OLEN] - (u_char *)ip; 838 goto bad; 839 } 840 } 841 switch (opt) { 842 843 default: 844 break; 845 846 /* 847 * Source routing with record. 848 * Find interface with current destination address. 849 * If none on this machine then drop if strictly routed, 850 * or do nothing if loosely routed. 851 * Record interface address and bring up next address 852 * component. If strictly routed make sure next 853 * address is on directly accessible net. 854 */ 855 case IPOPT_LSRR: 856 case IPOPT_SSRR: 857 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 858 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 859 goto bad; 860 } 861 ipaddr.sin_addr = ip->ip_dst; 862 ia = (struct in_ifaddr *) 863 ifa_ifwithaddr((struct sockaddr *)&ipaddr); 864 if (ia == 0) { 865 if (opt == IPOPT_SSRR) { 866 type = ICMP_UNREACH; 867 code = ICMP_UNREACH_SRCFAIL; 868 goto bad; 869 } 870 /* 871 * Loose routing, and not at next destination 872 * yet; nothing to do except forward. 873 */ 874 break; 875 } 876 off--; / * 0 origin * / 877 if (off > optlen - sizeof(struct in_addr)) { 878 /* 879 * End of source route. Should be for us. 880 */ 881 save_rte(cp, ip->ip_src); 882 break; 883 } 884 /* 885 * locate outgoing interface 886 */ 887 bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr, 888 sizeof(ipaddr.sin_addr)); 889 if (opt == IPOPT_SSRR) { 890 #define INA struct in_ifaddr * 891 #define SA struct sockaddr * 892 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0) 893 ia = (INA)ifa_ifwithnet((SA)&ipaddr); 894 } else 895 ia = ip_rtaddr(ipaddr.sin_addr); 896 if (ia == 0) { 897 type = ICMP_UNREACH; 898 code = ICMP_UNREACH_SRCFAIL; 899 goto bad; 900 } 901 ip->ip_dst = ipaddr.sin_addr; 902 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr), 903 (caddr_t)(cp + off), sizeof(struct in_addr)); 904 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 905 /* 906 * Let ip_intr's mcast routing check handle mcast pkts 907 */ 908 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr)); 909 break; 910 911 case IPOPT_RR: 912 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { 913 code = &cp[IPOPT_OFFSET] - (u_char *)ip; 914 goto bad; 915 } 916 /* 917 * If no space remains, ignore. 918 */ 919 off--; * 0 origin * 920 if (off > optlen - sizeof(struct in_addr)) 921 break; 922 bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr, 923 sizeof(ipaddr.sin_addr)); 924 /* 925 * locate outgoing interface; if we're the destination, 926 * use the incoming interface (should be same). 927 */ 928 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 && 929 (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) { 930 type = ICMP_UNREACH; 931 code = ICMP_UNREACH_HOST; 932 goto bad; 933 } 934 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr), 935 (caddr_t)(cp + off), sizeof(struct in_addr)); 936 cp[IPOPT_OFFSET] += sizeof(struct in_addr); 937 break; 938 939 case IPOPT_TS: 940 code = cp - (u_char *)ip; 941 ipt = (struct ip_timestamp *)cp; 942 if (ipt->ipt_len < 5) 943 goto bad; 944 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) { 945 if (++ipt->ipt_oflw == 0) 946 goto bad; 947 break; 948 } 949 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1); 950 switch (ipt->ipt_flg) { 951 952 case IPOPT_TS_TSONLY: 953 break; 954 955 case IPOPT_TS_TSANDADDR: 956 if (ipt->ipt_ptr + sizeof(n_time) + 957 sizeof(struct in_addr) > ipt->ipt_len) 958 goto bad; 959 ipaddr.sin_addr = dst; 960 ia = (INA)ifaof_ i f p foraddr((SA)&ipaddr, 961 m->m_pkthdr.rcvif); 962 if (ia == 0) 963 continue; 964 bcopy((caddr_t)&IA_SIN(ia)->sin_addr, 965 (caddr_t)sin, sizeof(struct in_addr)); 966 ipt->ipt_ptr += sizeof(struct in_addr); 967 break; 968 969 case IPOPT_TS_PRESPEC: 970 if (ipt->ipt_ptr + sizeof(n_time) + 971 sizeof(struct in_addr) > ipt->ipt_len) 972 goto bad; 973 bcopy((caddr_t)sin, (caddr_t)&ipaddr.sin_addr, 974 sizeof(struct in_addr)); 975 if (ifa_ifwithaddr((SA)&ipaddr) == 0) 976 continue; 977 ipt->ipt_ptr += sizeof(struct in_addr); 978 break; 979 980 default: 981 goto bad; 982 } 983 ntime = iptime(); 984 bcopy((caddr_t)&ntime, (caddr_t)cp + ipt->ipt_ptr - 1, 985 sizeof(n_time)); 986 ipt->ipt_ptr += sizeof(n_time); 987 } 988 } 989 if (forward) { 990 ip_forward(m, 1); 991 return (1); 992 } 993 } 994 } 995 return (0); 996 996 bad: 997 997 /* ip->ip_len -= ip->ip_hl << 2; XXX icmp_error adds in hdr length */ 998 998 999 999 /* Not yet */ 1000 1001 1002 1003 1000 icmp_error(m, type, code, 0, 0); 1001 1002 ipstat.ips_badoptions++; 1003 return (1); 1004 1004 } 1005 1005 … … 1015 1015 void 1016 1016 ip_stripoptions(m, mopt) 1017 1018 1017 register struct mbuf *m; 1018 struct mbuf *mopt; 1019 1019 { 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1020 register int i; 1021 struct ip *ip = mtod(m, struct ip *); 1022 register caddr_t opts; 1023 int olen; 1024 1025 olen = (ip->ip_hl<<2) - sizeof (struct ip); 1026 opts = (caddr_t)(ip + 1); 1027 i = m->m_len - (sizeof (struct ip) + olen); 1028 memcpy(opts, opts + olen, (unsigned)i); 1029 m->m_len -= olen; 1030 1031 ip->ip_hl = sizeof(struct ip) >> 2; 1032 1032 } -
trunk/src/VBox/Devices/Network/slirp/ip_output.c
r1076 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)ip_output.c8.3 (Berkeley) 1/21/9433 * @(#)ip_output.c 8.3 (Berkeley) 1/21/94 34 34 * ip_output.c,v 1.9 1994/11/16 10:17:10 jkh Exp 35 35 */ … … 55 55 ip_output(PNATState pData, struct socket *so, struct mbuf *m0) 56 56 { 57 58 59 60 61 62 63 64 65 66 67 /* 68 * 69 * 70 * 71 */ 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 /* 88 * 89 * 90 * 91 */ 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 57 register struct ip *ip; 58 register struct mbuf *m = m0; 59 register int hlen = sizeof(struct ip ); 60 int len, off, error = 0; 61 62 DEBUG_CALL("ip_output"); 63 DEBUG_ARG("so = %lx", (long)so); 64 DEBUG_ARG("m0 = %lx", (long)m0); 65 66 /* We do no options */ 67 /* if (opt) { 68 * m = ip_insertoptions(m, opt, &len); 69 * hlen = len; 70 * } 71 */ 72 ip = mtod(m, struct ip *); 73 /* 74 * Fill in IP header. 75 */ 76 ip->ip_v = IPVERSION; 77 ip->ip_off &= IP_DF; 78 ip->ip_id = htons(ip_currid++); 79 ip->ip_hl = hlen >> 2; 80 ipstat.ips_localout++; 81 82 /* 83 * Verify that we have any chance at all of being able to queue 84 * the packet or packet fragments 85 */ 86 /* XXX Hmmm... */ 87 /* if (if_queued > if_thresh && towrite <= 0) { 88 * error = ENOBUFS; 89 * goto bad; 90 * } 91 */ 92 93 /* 94 * If small enough for interface, can just send directly. 95 */ 96 if ((u_int16_t)ip->ip_len <= if_mtu) { 97 ip->ip_len = htons((u_int16_t)ip->ip_len); 98 ip->ip_off = htons((u_int16_t)ip->ip_off); 99 ip->ip_sum = 0; 100 ip->ip_sum = cksum(m, hlen); 101 102 if_output(pData, so, m); 103 goto done; 104 } 105 106 /* 107 * Too large for interface; fragment if possible. 108 * Must be able to put at least 8 bytes per fragment. 109 */ 110 if (ip->ip_off & IP_DF) { 111 error = -1; 112 ipstat.ips_cantfrag++; 113 goto bad; 114 } 115 116 len = (if_mtu - hlen) &~ 7; /* ip databytes per packet */ 117 if (len < 8) { 118 error = -1; 119 goto bad; 120 } 121 121 122 122 { 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 /* 146 * 147 * 148 * 149 */ 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 123 int mhlen, firstlen = len; 124 struct mbuf **mnext = &m->m_nextpkt; 125 126 /* 127 * Loop through length of segment after first fragment, 128 * make new header and copy data of each part and link onto chain. 129 */ 130 m0 = m; 131 mhlen = sizeof (struct ip); 132 for (off = hlen + len; off < (u_int16_t)ip->ip_len; off += len) { 133 register struct ip *mhip; 134 m = m_get(pData); 135 if (m == 0) { 136 error = -1; 137 ipstat.ips_odropped++; 138 goto sendorfree; 139 } 140 m->m_data += if_maxlinkhdr; 141 mhip = mtod(m, struct ip *); 142 *mhip = *ip; 143 144 /* No options */ 145 /* if (hlen > sizeof (struct ip)) { 146 * mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); 147 * mhip->ip_hl = mhlen >> 2; 148 * } 149 */ 150 m->m_len = mhlen; 151 mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF); 152 if (ip->ip_off & IP_MF) 153 mhip->ip_off |= IP_MF; 154 if (off + len >= (u_int16_t)ip->ip_len) 155 len = (u_int16_t)ip->ip_len - off; 156 else 157 mhip->ip_off |= IP_MF; 158 mhip->ip_len = htons((u_int16_t)(len + mhlen)); 159 160 if (m_copy(m, m0, off, len) < 0) { 161 error = -1; 162 goto sendorfree; 163 } 164 165 mhip->ip_off = htons((u_int16_t)mhip->ip_off); 166 mhip->ip_sum = 0; 167 mhip->ip_sum = cksum(m, mhlen); 168 *mnext = m; 169 mnext = &m->m_nextpkt; 170 ipstat.ips_ofragments++; 171 } 172 /* 173 * Update first fragment by trimming what's been copied out 174 * and updating header, then send each fragment (in order). 175 */ 176 m = m0; 177 m_adj(m, hlen + firstlen - (u_int16_t)ip->ip_len); 178 ip->ip_len = htons((u_int16_t)m->m_len); 179 ip->ip_off = htons((u_int16_t)(ip->ip_off | IP_MF)); 180 ip->ip_sum = 0; 181 ip->ip_sum = cksum(m, hlen); 182 182 sendorfree: 183 184 185 186 187 188 189 190 191 192 193 183 for (m = m0; m; m = m0) { 184 m0 = m->m_nextpkt; 185 m->m_nextpkt = 0; 186 if (error == 0) 187 if_output(pData, so, m); 188 else 189 m_freem(pData, m); 190 } 191 192 if (error == 0) 193 ipstat.ips_fragmented++; 194 194 } 195 195 196 196 done: 197 197 return (error); 198 198 199 199 bad: 200 201 200 m_freem(pData, m0); 201 goto done; 202 202 } -
trunk/src/VBox/Devices/Network/slirp/mbuf.c
r13984 r14470 22 22 m_init(PNATState pData) 23 23 { 24 25 24 m_freelist.m_next = m_freelist.m_prev = &m_freelist; 25 m_usedlist.m_next = m_usedlist.m_prev = &m_usedlist; 26 26 mbuf_alloced = 0; 27 27 msize_init(pData); 28 28 } 29 29 … … 31 31 msize_init(PNATState pData) 32 32 { 33 34 35 36 37 38 33 /* 34 * Find a nice value for msize 35 * XXX if_maxlinkhdr already in mtu 36 */ 37 msize = (if_mtu>if_mru?if_mtu:if_mru) + 38 if_maxlinkhdr + sizeof(struct m_hdr ) + 6; 39 39 } 40 40 … … 50 50 m_get(PNATState pData) 51 51 { 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 52 register struct mbuf *m; 53 int flags = 0; 54 55 DEBUG_CALL("m_get"); 56 57 if (m_freelist.m_next == &m_freelist) { 58 m = (struct mbuf *)malloc(msize); 59 if (m == NULL) goto end_error; 60 mbuf_alloced++; 61 if (mbuf_alloced > mbuf_thresh) 62 flags = M_DOFREE; 63 if (mbuf_alloced > mbuf_max) 64 mbuf_max = mbuf_alloced; 65 } else { 66 m = m_freelist.m_next; 67 remque(pData, m); 68 } 69 70 /* Insert it in the used list */ 71 insque(pData, m,&m_usedlist); 72 m->m_flags = (flags | M_USEDLIST); 73 74 /* Initialise it */ 75 m->m_size = msize - sizeof(struct m_hdr); 76 m->m_data = m->m_dat; 77 m->m_len = 0; 78 m->m_nextpkt = 0; 79 m->m_prevpkt = 0; 80 80 end_error: 81 82 81 DEBUG_ARG("m = %lx", (long )m); 82 return m; 83 83 } 84 84 … … 91 91 92 92 if(m) { 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 93 /* Remove from m_usedlist */ 94 if (m->m_flags & M_USEDLIST) 95 remque(pData, m); 96 97 /* If it's M_EXT, free() it */ 98 if (m->m_flags & M_EXT) 99 free(m->m_ext); 100 101 /* 102 * Either free() it or put it on the free list 103 */ 104 if (m->m_flags & M_DOFREE) { 105 u32ptr_done(pData, ptr_to_u32(pData, m), m); 106 free(m); 107 mbuf_alloced--; 108 } else if ((m->m_flags & M_FREELIST) == 0) { 109 insque(pData, m,&m_freelist); 110 m->m_flags = M_FREELIST; /* Clobber other flags */ 111 } 112 112 } /* if(m) */ 113 113 } … … 121 121 m_cat(PNATState pData, register struct mbuf *m, register struct mbuf *n) 122 122 { 123 124 125 126 127 128 129 130 131 132 123 /* 124 * If there's no room, realloc 125 */ 126 if (M_FREEROOM(m) < n->m_len) 127 m_inc(m,m->m_size+MINCSIZE); 128 129 memcpy(m->m_data+m->m_len, n->m_data, n->m_len); 130 m->m_len += n->m_len; 131 132 m_free(pData, n); 133 133 } 134 134 … … 140 140 int size; 141 141 { 142 143 144 142 int datasize; 143 144 /* some compiles throw up on gotos. This one we can fake. */ 145 145 if(m->m_size>size) return; 146 146 147 147 if (m->m_flags & M_EXT) { 148 149 150 /* 151 * 152 */ 153 148 datasize = m->m_data - m->m_ext; 149 m->m_ext = (char *)realloc(m->m_ext,size); 150 /* if (m->m_ext == NULL) 151 * return (struct mbuf *)NULL; 152 */ 153 m->m_data = m->m_ext + datasize; 154 154 } else { 155 156 157 158 /* 159 * 160 */ 161 162 163 164 165 155 char *dat; 156 datasize = m->m_data - m->m_dat; 157 dat = (char *)malloc(size); 158 /* if (dat == NULL) 159 * return (struct mbuf *)NULL; 160 */ 161 memcpy(dat, m->m_dat, m->m_size); 162 163 m->m_ext = dat; 164 m->m_data = m->m_ext + datasize; 165 m->m_flags |= M_EXT; 166 166 } 167 167 … … 174 174 void 175 175 m_adj(m, len) 176 177 178 { 179 180 181 182 183 184 185 186 187 188 189 176 struct mbuf *m; 177 int len; 178 { 179 if (m == NULL) 180 return; 181 if (len >= 0) { 182 /* Trim from head */ 183 m->m_data += len; 184 m->m_len -= len; 185 } else { 186 /* Trim from tail */ 187 len = -len; 188 m->m_len -= len; 189 } 190 190 } 191 191 … … 196 196 int 197 197 m_copy(n, m, off, len) 198 199 200 { 201 202 203 204 205 206 198 struct mbuf *n, *m; 199 int off, len; 200 { 201 if (len > M_FREEROOM(n)) 202 return -1; 203 204 memcpy((n->m_data + n->m_len), (m->m_data + off), len); 205 n->m_len += len; 206 return 0; 207 207 } 208 208 … … 216 216 dtom(PNATState pData, void *dat) 217 217 { 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 } 238 218 struct mbuf *m; 219 220 DEBUG_CALL("dtom"); 221 DEBUG_ARG("dat = %lx", (long )dat); 222 223 /* bug corrected for M_EXT buffers */ 224 for (m = m_usedlist.m_next; m != &m_usedlist; m = m->m_next) { 225 if (m->m_flags & M_EXT) { 226 if( (char *)dat>=m->m_ext && (char *)dat<(m->m_ext + m->m_size) ) 227 return m; 228 } else { 229 if( (char *)dat >= m->m_dat && (char *)dat<(m->m_dat + m->m_size) ) 230 return m; 231 } 232 } 233 234 DEBUG_ERROR((dfd, "dtom failed")); 235 236 return (struct mbuf *)0; 237 } 238 -
trunk/src/VBox/Devices/Network/slirp/mbuf.h
r14390 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)mbuf.h8.3 (Berkeley) 1/21/9433 * @(#)mbuf.h 8.3 (Berkeley) 1/21/94 34 34 * mbuf.h,v 1.9 1994/11/14 13:54:20 bde Exp 35 35 */ … … 41 41 42 42 43 #define MINCSIZE 4096 43 #define MINCSIZE 4096 /* Amount to increase mbuf if too small */ 44 44 45 45 /* 46 46 * Macros for type conversion 47 * mtod(m,t) - 48 * dtom(x) - 47 * mtod(m,t) - convert mbuf pointer to data pointer of correct type 48 * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX) 49 49 */ 50 #define mtod(m,t) 51 /* #define dtom(x)((struct mbuf *)((int)(x) & ~(M_SIZE-1))) */50 #define mtod(m,t) ((t)(m)->m_data) 51 /* #define dtom(x) ((struct mbuf *)((int)(x) & ~(M_SIZE-1))) */ 52 52 53 53 /* XXX About mbufs for slirp: … … 62 62 /* header at beginning of each mbuf: */ 63 63 struct m_hdr { 64 struct mbuf *mh_next;/* Linked list of mbufs */65 structmbuf *mh_prev;66 struct mbuf *mh_nextpkt;/* Next packet in queue/record */67 structmbuf *mh_prevpkt; /* Flags aren't used in the output queue */68 int mh_flags;/* Misc flags */64 struct mbuf *mh_next; /* Linked list of mbufs */ 65 struct mbuf *mh_prev; 66 struct mbuf *mh_nextpkt; /* Next packet in queue/record */ 67 struct mbuf *mh_prevpkt; /* Flags aren't used in the output queue */ 68 int mh_flags; /* Misc flags */ 69 69 70 int mh_size;/* Size of data */71 structsocket *mh_so;70 int mh_size; /* Size of data */ 71 struct socket *mh_so; 72 72 73 caddr_t mh_data;/* Location of data */74 int mh_len;/* Amount of data in this mbuf */73 caddr_t mh_data; /* Location of data */ 74 int mh_len; /* Amount of data in this mbuf */ 75 75 #ifdef VBOX_WITH_BSD_REASS 76 76 void *header; /*XXX: in real BSD sources this field lays in pkthdr structure*/ … … 82 82 */ 83 83 #define M_ROOM(m) ((m->m_flags & M_EXT)? \ 84 85 86 84 (((m)->m_ext + (m)->m_size) - (m)->m_data) \ 85 : \ 86 (((m)->m_dat + (m)->m_size) - (m)->m_data)) 87 87 88 88 /* … … 93 93 94 94 struct mbuf { 95 structm_hdr m_hdr;96 97 charm_dat_[1]; /* ANSI don't like 0 sized arrays */98 char*m_ext_;99 95 struct m_hdr m_hdr; 96 union M_dat { 97 char m_dat_[1]; /* ANSI don't like 0 sized arrays */ 98 char *m_ext_; 99 } M_dat; 100 100 }; 101 101 102 #define m_next 103 #define m_prev 104 #define m_nextpkt 105 #define m_prevpkt 106 #define m_flags 107 #define m_lenm_hdr.mh_len108 #define m_datam_hdr.mh_data109 #define m_size 110 #define m_dat 111 #define m_ext 112 #define m_so 102 #define m_next m_hdr.mh_next 103 #define m_prev m_hdr.mh_prev 104 #define m_nextpkt m_hdr.mh_nextpkt 105 #define m_prevpkt m_hdr.mh_prevpkt 106 #define m_flags m_hdr.mh_flags 107 #define m_len m_hdr.mh_len 108 #define m_data m_hdr.mh_data 109 #define m_size m_hdr.mh_size 110 #define m_dat M_dat.m_dat_ 111 #define m_ext M_dat.m_ext_ 112 #define m_so m_hdr.mh_so 113 113 114 114 #define ifq_prev m_prev … … 118 118 #define ifq_so m_so 119 119 120 #define M_EXT 0x01/* m_ext points to more (malloced) data */121 #define M_FREELIST 0x02/* mbuf is on free list */122 #define M_USEDLIST 0x04/* XXX mbuf is on used list (for dtom()) */123 #define M_DOFREE 0x08/* when m_free is called on the mbuf, free()124 120 #define M_EXT 0x01 /* m_ext points to more (malloced) data */ 121 #define M_FREELIST 0x02 /* mbuf is on free list */ 122 #define M_USEDLIST 0x04 /* XXX mbuf is on used list (for dtom()) */ 123 #define M_DOFREE 0x08 /* when m_free is called on the mbuf, free() 124 * it rather than putting it on the free list */ 125 125 #ifdef VBOX_WITH_BSD_REASS 126 126 #define M_FRAG 0x0800 /* packet is a fragment of a larger packet */ … … 134 134 135 135 struct mbstat { 136 int mbs_alloced;/* Number of mbufs allocated */136 int mbs_alloced; /* Number of mbufs allocated */ 137 137 138 138 }; 139 139 140 extern struct 140 extern struct mbstat mbstat; 141 141 extern int mbuf_alloced; 142 142 extern struct mbuf m_freelist, m_usedlist; -
trunk/src/VBox/Devices/Network/slirp/misc.c
r14331 r14470 12 12 int 13 13 inet_aton(cp, ia) 14 15 16 { 17 18 19 20 21 14 const char *cp; 15 struct in_addr *ia; 16 { 17 u_int32_t addr = inet_addr(cp); 18 if (addr == 0xffffffff) 19 return 0; 20 ia->s_addr = addr; 21 return 1; 22 22 } 23 23 #endif … … 29 29 getouraddr(PNATState pData) 30 30 { 31 32 33 34 31 char buff[256]; 32 struct hostent *he = NULL; 33 34 if (gethostname(buff,256) == 0) 35 35 { 36 36 he = gethostbyname(buff); … … 56 56 57 57 struct quehead_32 { 58 59 58 u_int32_t qh_link; 59 u_int32_t qh_rlink; 60 60 }; 61 61 … … 63 63 insque_32(PNATState pData, void *a, void *b) 64 64 { 65 66 67 68 69 70 65 register struct quehead_32 *element = (struct quehead_32 *) a; 66 register struct quehead_32 *head = (struct quehead_32 *) b; 67 struct quehead_32 *link = u32_to_ptr(pData, head->qh_link, struct quehead_32 *); 68 69 element->qh_link = head->qh_link; 70 element->qh_rlink = ptr_to_u32(pData, head); 71 71 Assert(link->qh_rlink == element->qh_rlink); 72 72 link->qh_rlink = head->qh_link = ptr_to_u32(pData, element); 73 73 } 74 74 … … 76 76 remque_32(PNATState pData, void *a) 77 77 { 78 79 80 81 82 83 84 85 78 register struct quehead_32 *element = (struct quehead_32 *) a; 79 struct quehead_32 *link = u32_to_ptr(pData, element->qh_link, struct quehead_32 *); 80 struct quehead_32 *rlink = u32_to_ptr(pData, element->qh_rlink, struct quehead_32 *); 81 82 u32ptr_done(pData, link->qh_rlink, element); 83 link->qh_rlink = element->qh_rlink; 84 rlink->qh_link = element->qh_link; 85 element->qh_rlink = 0; 86 86 } 87 87 … … 89 89 90 90 struct quehead { 91 92 91 struct quehead *qh_link; 92 struct quehead *qh_rlink; 93 93 }; 94 94 … … 96 96 insque(PNATState pData, void *a, void *b) 97 97 { 98 99 100 101 102 103 104 98 register struct quehead *element = (struct quehead *) a; 99 register struct quehead *head = (struct quehead *) b; 100 element->qh_link = head->qh_link; 101 head->qh_link = (struct quehead *)element; 102 element->qh_rlink = (struct quehead *)head; 103 ((struct quehead *)(element->qh_link))->qh_rlink 104 = (struct quehead *)element; 105 105 } 106 106 … … 120 120 int 121 121 add_exec(ex_ptr, do_pty, exec, addr, port) 122 123 124 125 126 127 { 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 122 struct ex_list **ex_ptr; 123 int do_pty; 124 char *exec; 125 int addr; 126 int port; 127 { 128 struct ex_list *tmp_ptr; 129 130 /* First, check if the port is "bound" */ 131 for (tmp_ptr = *ex_ptr; tmp_ptr; tmp_ptr = tmp_ptr->ex_next) { 132 if (port == tmp_ptr->ex_fport && addr == tmp_ptr->ex_addr) 133 return -1; 134 } 135 136 tmp_ptr = *ex_ptr; 137 *ex_ptr = (struct ex_list *)malloc(sizeof(struct ex_list)); 138 (*ex_ptr)->ex_fport = port; 139 (*ex_ptr)->ex_addr = addr; 140 (*ex_ptr)->ex_pty = do_pty; 141 (*ex_ptr)->ex_exec = strdup(exec); 142 (*ex_ptr)->ex_next = tmp_ptr; 143 return 0; 144 144 } 145 145 … … 155 155 char * 156 156 strerror(error) 157 158 { 159 160 161 162 157 int error; 158 { 159 if (error < sys_nerr) 160 return sys_errlist[error]; 161 else 162 return "Unknown error."; 163 163 } 164 164 … … 169 169 char * 170 170 strdup(str) 171 172 { 173 174 175 176 177 178 171 const char *str; 172 { 173 char *bptr; 174 175 bptr = (char *)malloc(strlen(str)+1); 176 strcpy(bptr, str); 177 178 return bptr; 179 179 } 180 180 #endif … … 191 191 int 192 192 vsprintf_len(string, format, args) 193 194 195 196 { 197 198 193 char *string; 194 const char *format; 195 va_list args; 196 { 197 vsprintf(string, format, args); 198 return strlen(string); 199 199 } 200 200 … … 206 206 #endif 207 207 { 208 208 va_list args; 209 209 #ifdef __STDC__ 210 211 #else 212 213 214 215 216 217 #endif 218 219 210 va_start(args, format); 211 #else 212 char *string; 213 char *format; 214 va_start(args); 215 string = va_arg(args, char *); 216 format = va_arg(args, char *); 217 #endif 218 vsprintf(string, format, args); 219 return strlen(string); 220 220 } 221 221 … … 224 224 void 225 225 u_sleep(usec) 226 227 { 228 229 230 231 232 233 234 235 236 226 int usec; 227 { 228 struct timeval t; 229 fd_set fdset; 230 231 FD_ZERO(&fdset); 232 233 t.tv_sec = 0; 234 t.tv_usec = usec * 1000; 235 236 select(0, &fdset, &fdset, &fdset, &t); 237 237 } 238 238 … … 243 243 void 244 244 fd_nonblock(fd) 245 245 int fd; 246 246 { 247 247 #ifdef FIONBIO 248 249 250 251 #else 252 253 254 255 256 248 int opt = 1; 249 250 ioctlsocket(fd, FIONBIO, &opt); 251 #else 252 int opt; 253 254 opt = fcntl(fd, F_GETFL, 0); 255 opt |= O_NONBLOCK; 256 fcntl(fd, F_SETFL, opt); 257 257 #endif 258 258 } … … 260 260 void 261 261 fd_block(fd) 262 262 int fd; 263 263 { 264 264 #ifdef FIONBIO 265 266 267 268 #else 269 270 271 272 273 274 #endif 275 } 276 265 int opt = 0; 266 267 ioctlsocket(fd, FIONBIO, &opt); 268 #else 269 int opt; 270 271 opt = fcntl(fd, F_GETFL, 0); 272 opt &= ~O_NONBLOCK; 273 fcntl(fd, F_SETFL, opt); 274 #endif 275 } 276 -
trunk/src/VBox/Devices/Network/slirp/queue.h
r14434 r14470 1 1 /*- 2 2 * Copyright (c) 1991, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 27 27 * SUCH DAMAGE. 28 28 * 29 * @(#)queue.h8.5 (Berkeley) 8/20/9429 * @(#)queue.h 8.5 (Berkeley) 8/20/94 30 30 * $FreeBSD: src/sys/sys/queue.h,v 1.68 2006/10/24 11:20:29 ru Exp $ 31 31 */ 32 32 33 33 #ifndef _SYS_QUEUE_H_ 34 #define 34 #define _SYS_QUEUE_H_ 35 35 36 36 #include <iprt/cdefs.h> … … 84 84 * 85 85 * 86 * SLIST LIST STAILQTAILQ87 * _HEAD + + ++88 * _HEAD_INITIALIZER + + ++89 * _ENTRY + + ++90 * _INIT + + ++91 * _EMPTY + + ++92 * _FIRST + + ++93 * _NEXT + + ++94 * _PREV - - -+95 * _LAST - - ++96 * _FOREACH + + ++97 * _FOREACH_SAFE + + ++98 * _FOREACH_REVERSE - - -+99 * _FOREACH_REVERSE_SAFE - - -+100 * _INSERT_HEAD + + ++101 * _INSERT_BEFORE - + -+102 * _INSERT_AFTER + + ++103 * _INSERT_TAIL - - ++104 * _CONCAT - - ++105 * _REMOVE_HEAD + - +-106 * _REMOVE + + ++86 * SLIST LIST STAILQ TAILQ 87 * _HEAD + + + + 88 * _HEAD_INITIALIZER + + + + 89 * _ENTRY + + + + 90 * _INIT + + + + 91 * _EMPTY + + + + 92 * _FIRST + + + + 93 * _NEXT + + + + 94 * _PREV - - - + 95 * _LAST - - + + 96 * _FOREACH + + + + 97 * _FOREACH_SAFE + + + + 98 * _FOREACH_REVERSE - - - + 99 * _FOREACH_REVERSE_SAFE - - - + 100 * _INSERT_HEAD + + + + 101 * _INSERT_BEFORE - + - + 102 * _INSERT_AFTER + + + + 103 * _INSERT_TAIL - - + + 104 * _CONCAT - - + + 105 * _REMOVE_HEAD + - + - 106 * _REMOVE + + + + 107 107 * 108 108 */ … … 110 110 /* Store the last 2 places the queue element or head was altered */ 111 111 struct qm_trace { 112 113 114 115 112 char * lastfile; 113 int lastline; 114 char * prevfile; 115 int prevline; 116 116 }; 117 117 118 #define TRACEBUFstruct qm_trace trace;119 #define TRASHIT(x)do {(x) = (void *)-1;} while (0)120 121 #define QMD_TRACE_HEAD(head) do {\122 (head)->trace.prevline = (head)->trace.lastline;\123 (head)->trace.prevfile = (head)->trace.lastfile;\124 (head)->trace.lastline = __LINE__;\125 (head)->trace.lastfile = __FILE__;\126 } while (0) 127 128 #define QMD_TRACE_ELEM(elem) do {\129 (elem)->trace.prevline = (elem)->trace.lastline;\130 (elem)->trace.prevfile = (elem)->trace.lastfile;\131 (elem)->trace.lastline = __LINE__;\132 (elem)->trace.lastfile = __FILE__;\118 #define TRACEBUF struct qm_trace trace; 119 #define TRASHIT(x) do {(x) = (void *)-1;} while (0) 120 121 #define QMD_TRACE_HEAD(head) do { \ 122 (head)->trace.prevline = (head)->trace.lastline; \ 123 (head)->trace.prevfile = (head)->trace.lastfile; \ 124 (head)->trace.lastline = __LINE__; \ 125 (head)->trace.lastfile = __FILE__; \ 126 } while (0) 127 128 #define QMD_TRACE_ELEM(elem) do { \ 129 (elem)->trace.prevline = (elem)->trace.lastline; \ 130 (elem)->trace.prevfile = (elem)->trace.lastfile; \ 131 (elem)->trace.lastline = __LINE__; \ 132 (elem)->trace.lastfile = __FILE__; \ 133 133 } while (0) 134 134 135 135 #else 136 #define 137 #define 138 #define 139 #define 140 #endif 136 #define QMD_TRACE_ELEM(elem) 137 #define QMD_TRACE_HEAD(head) 138 #define TRACEBUF 139 #define TRASHIT(x) 140 #endif /* QUEUE_MACRO_DEBUG */ 141 141 142 142 /* 143 143 * Singly-linked List declarations. 144 144 */ 145 #define SLIST_HEAD(name, type)\146 struct name { 147 struct type *slh_first; /* first element */\148 } 149 150 #define SLIST_HEAD_INITIALIZER(head)\151 152 153 #define SLIST_ENTRY(type)\154 struct { 155 struct type *sle_next; /* next element */\145 #define SLIST_HEAD(name, type) \ 146 struct name { \ 147 struct type *slh_first; /* first element */ \ 148 } 149 150 #define SLIST_HEAD_INITIALIZER(head) \ 151 { NULL } 152 153 #define SLIST_ENTRY(type) \ 154 struct { \ 155 struct type *sle_next; /* next element */ \ 156 156 } 157 157 … … 159 159 * Singly-linked List functions. 160 160 */ 161 #define SLIST_EMPTY(head)((head)->slh_first == NULL)162 163 #define SLIST_FIRST(head)((head)->slh_first)164 165 #define SLIST_FOREACH(var, head, field)\166 for ((var) = SLIST_FIRST((head));\167 (var);\168 169 170 #define SLIST_FOREACH_SAFE(var, head, field, tvar)\171 for ((var) = SLIST_FIRST((head));\172 (var) && ((tvar) = SLIST_NEXT((var), field), 1);\173 174 175 #define SLIST_FOREACH_PREVPTR(var, varp, head, field)\176 for ((varp) = &SLIST_FIRST((head));\177 ((var) = *(varp)) != NULL;\178 179 180 #define SLIST_INIT(head) do {\181 SLIST_FIRST((head)) = NULL;\182 } while (0) 183 184 #define SLIST_INSERT_AFTER(slistelm, elm, field) do {\185 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field);\186 SLIST_NEXT((slistelm), field) = (elm);\187 } while (0) 188 189 #define SLIST_INSERT_HEAD(head, elm, field) do {\190 SLIST_NEXT((elm), field) = SLIST_FIRST((head));\191 SLIST_FIRST((head)) = (elm);\192 } while (0) 193 194 #define SLIST_NEXT(elm, field)((elm)->field.sle_next)195 196 #define SLIST_REMOVE(head, elm, type, field) do {\197 if (SLIST_FIRST((head)) == (elm)) {\198 SLIST_REMOVE_HEAD((head), field);\199 }\200 else {\201 struct type *curelm = SLIST_FIRST((head));\202 while (SLIST_NEXT(curelm, field) != (elm))\203 curelm = SLIST_NEXT(curelm, field);\204 SLIST_NEXT(curelm, field) =\205 SLIST_NEXT(SLIST_NEXT(curelm, field), field);\206 }\207 TRASHIT((elm)->field.sle_next);\208 } while (0) 209 210 #define SLIST_REMOVE_HEAD(head, field) do {\211 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field);\161 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 162 163 #define SLIST_FIRST(head) ((head)->slh_first) 164 165 #define SLIST_FOREACH(var, head, field) \ 166 for ((var) = SLIST_FIRST((head)); \ 167 (var); \ 168 (var) = SLIST_NEXT((var), field)) 169 170 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ 171 for ((var) = SLIST_FIRST((head)); \ 172 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ 173 (var) = (tvar)) 174 175 #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ 176 for ((varp) = &SLIST_FIRST((head)); \ 177 ((var) = *(varp)) != NULL; \ 178 (varp) = &SLIST_NEXT((var), field)) 179 180 #define SLIST_INIT(head) do { \ 181 SLIST_FIRST((head)) = NULL; \ 182 } while (0) 183 184 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 185 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ 186 SLIST_NEXT((slistelm), field) = (elm); \ 187 } while (0) 188 189 #define SLIST_INSERT_HEAD(head, elm, field) do { \ 190 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ 191 SLIST_FIRST((head)) = (elm); \ 192 } while (0) 193 194 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 195 196 #define SLIST_REMOVE(head, elm, type, field) do { \ 197 if (SLIST_FIRST((head)) == (elm)) { \ 198 SLIST_REMOVE_HEAD((head), field); \ 199 } \ 200 else { \ 201 struct type *curelm = SLIST_FIRST((head)); \ 202 while (SLIST_NEXT(curelm, field) != (elm)) \ 203 curelm = SLIST_NEXT(curelm, field); \ 204 SLIST_NEXT(curelm, field) = \ 205 SLIST_NEXT(SLIST_NEXT(curelm, field), field); \ 206 } \ 207 TRASHIT((elm)->field.sle_next); \ 208 } while (0) 209 210 #define SLIST_REMOVE_HEAD(head, field) do { \ 211 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ 212 212 } while (0) 213 213 … … 215 215 * Singly-linked Tail queue declarations. 216 216 */ 217 #define STAILQ_HEAD(name, type)\218 struct name { 219 struct type *stqh_first;/* first element */\220 struct type **stqh_last;/* addr of last next element */\221 } 222 223 #define STAILQ_HEAD_INITIALIZER(head)\224 225 226 #define STAILQ_ENTRY(type)\227 struct { 228 struct type *stqe_next; /* next element */\217 #define STAILQ_HEAD(name, type) \ 218 struct name { \ 219 struct type *stqh_first;/* first element */ \ 220 struct type **stqh_last;/* addr of last next element */ \ 221 } 222 223 #define STAILQ_HEAD_INITIALIZER(head) \ 224 { NULL, &(head).stqh_first } 225 226 #define STAILQ_ENTRY(type) \ 227 struct { \ 228 struct type *stqe_next; /* next element */ \ 229 229 } 230 230 … … 232 232 * Singly-linked Tail queue functions. 233 233 */ 234 #define STAILQ_CONCAT(head1, head2) do {\235 if (!STAILQ_EMPTY((head2))) {\236 *(head1)->stqh_last = (head2)->stqh_first;\237 (head1)->stqh_last = (head2)->stqh_last;\238 STAILQ_INIT((head2));\239 }\240 } while (0) 241 242 #define STAILQ_EMPTY(head)((head)->stqh_first == NULL)243 244 #define STAILQ_FIRST(head)((head)->stqh_first)245 246 #define STAILQ_FOREACH(var, head, field)\247 for((var) = STAILQ_FIRST((head));\248 (var);\249 250 251 252 #define STAILQ_FOREACH_SAFE(var, head, field, tvar)\253 for ((var) = STAILQ_FIRST((head));\254 (var) && ((tvar) = STAILQ_NEXT((var), field), 1);\255 256 257 #define STAILQ_INIT(head) do {\258 STAILQ_FIRST((head)) = NULL;\259 (head)->stqh_last = &STAILQ_FIRST((head));\260 } while (0) 261 262 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {\263 264 (head)->stqh_last = &STAILQ_NEXT((elm), field);\265 STAILQ_NEXT((tqelm), field) = (elm);\266 } while (0) 267 268 #define STAILQ_INSERT_HEAD(head, elm, field) do {\269 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL)\270 (head)->stqh_last = &STAILQ_NEXT((elm), field);\271 STAILQ_FIRST((head)) = (elm);\272 } while (0) 273 274 #define STAILQ_INSERT_TAIL(head, elm, field) do {\275 STAILQ_NEXT((elm), field) = NULL;\276 *(head)->stqh_last = (elm);\277 (head)->stqh_last = &STAILQ_NEXT((elm), field);\278 } while (0) 279 280 #define STAILQ_LAST(head, type, field)\281 (STAILQ_EMPTY((head)) ?\282 NULL :\283 ((struct type *)(void *)\284 285 286 #define STAILQ_NEXT(elm, field)((elm)->field.stqe_next)287 288 #define STAILQ_REMOVE(head, elm, type, field) do {\289 if (STAILQ_FIRST((head)) == (elm)) {\290 STAILQ_REMOVE_HEAD((head), field);\291 }\292 else {\293 struct type *curelm = STAILQ_FIRST((head));\294 while (STAILQ_NEXT(curelm, field) != (elm))\295 curelm = STAILQ_NEXT(curelm, field);\296 if ((STAILQ_NEXT(curelm, field) =\297 298 299 }\300 TRASHIT((elm)->field.stqe_next);\301 } while (0) 302 303 #define STAILQ_REMOVE_HEAD(head, field) do {\304 if ((STAILQ_FIRST((head)) =\305 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL)\306 (head)->stqh_last = &STAILQ_FIRST((head));\234 #define STAILQ_CONCAT(head1, head2) do { \ 235 if (!STAILQ_EMPTY((head2))) { \ 236 *(head1)->stqh_last = (head2)->stqh_first; \ 237 (head1)->stqh_last = (head2)->stqh_last; \ 238 STAILQ_INIT((head2)); \ 239 } \ 240 } while (0) 241 242 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 243 244 #define STAILQ_FIRST(head) ((head)->stqh_first) 245 246 #define STAILQ_FOREACH(var, head, field) \ 247 for((var) = STAILQ_FIRST((head)); \ 248 (var); \ 249 (var) = STAILQ_NEXT((var), field)) 250 251 252 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ 253 for ((var) = STAILQ_FIRST((head)); \ 254 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 255 (var) = (tvar)) 256 257 #define STAILQ_INIT(head) do { \ 258 STAILQ_FIRST((head)) = NULL; \ 259 (head)->stqh_last = &STAILQ_FIRST((head)); \ 260 } while (0) 261 262 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ 263 if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ 264 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 265 STAILQ_NEXT((tqelm), field) = (elm); \ 266 } while (0) 267 268 #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 269 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ 270 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 271 STAILQ_FIRST((head)) = (elm); \ 272 } while (0) 273 274 #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 275 STAILQ_NEXT((elm), field) = NULL; \ 276 *(head)->stqh_last = (elm); \ 277 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 278 } while (0) 279 280 #define STAILQ_LAST(head, type, field) \ 281 (STAILQ_EMPTY((head)) ? \ 282 NULL : \ 283 ((struct type *)(void *) \ 284 ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) 285 286 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) 287 288 #define STAILQ_REMOVE(head, elm, type, field) do { \ 289 if (STAILQ_FIRST((head)) == (elm)) { \ 290 STAILQ_REMOVE_HEAD((head), field); \ 291 } \ 292 else { \ 293 struct type *curelm = STAILQ_FIRST((head)); \ 294 while (STAILQ_NEXT(curelm, field) != (elm)) \ 295 curelm = STAILQ_NEXT(curelm, field); \ 296 if ((STAILQ_NEXT(curelm, field) = \ 297 STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\ 298 (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ 299 } \ 300 TRASHIT((elm)->field.stqe_next); \ 301 } while (0) 302 303 #define STAILQ_REMOVE_HEAD(head, field) do { \ 304 if ((STAILQ_FIRST((head)) = \ 305 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ 306 (head)->stqh_last = &STAILQ_FIRST((head)); \ 307 307 } while (0) 308 308 … … 310 310 * List declarations. 311 311 */ 312 #define LIST_HEAD(name, type)\313 struct name { 314 struct type *lh_first; /* first element */\315 } 316 317 #define LIST_HEAD_INITIALIZER(head)\318 319 320 #define LIST_ENTRY(type)\321 struct { 322 struct type *le_next; /* next element */\323 struct type **le_prev; /* address of previous next element */\312 #define LIST_HEAD(name, type) \ 313 struct name { \ 314 struct type *lh_first; /* first element */ \ 315 } 316 317 #define LIST_HEAD_INITIALIZER(head) \ 318 { NULL } 319 320 #define LIST_ENTRY(type) \ 321 struct { \ 322 struct type *le_next; /* next element */ \ 323 struct type **le_prev; /* address of previous next element */ \ 324 324 } 325 325 … … 329 329 330 330 #if (defined(_KERNEL) && defined(INVARIANTS)) 331 #define QMD_LIST_CHECK_HEAD(head, field) do {\332 if (LIST_FIRST((head)) != NULL &&\333 LIST_FIRST((head))->field.le_prev !=\334 &LIST_FIRST((head)))\335 panic("Bad list head %p first->prev != head", (head));\336 } while (0) 337 338 #define QMD_LIST_CHECK_NEXT(elm, field) do {\339 if (LIST_NEXT((elm), field) != NULL &&\340 LIST_NEXT((elm), field)->field.le_prev !=\341 &((elm)->field.le_next))\342 panic("Bad link elm %p next->prev != elm", (elm));\343 } while (0) 344 345 #define QMD_LIST_CHECK_PREV(elm, field) do {\346 if (*(elm)->field.le_prev != (elm))\347 panic("Bad link elm %p prev->next != elm", (elm));\331 #define QMD_LIST_CHECK_HEAD(head, field) do { \ 332 if (LIST_FIRST((head)) != NULL && \ 333 LIST_FIRST((head))->field.le_prev != \ 334 &LIST_FIRST((head))) \ 335 panic("Bad list head %p first->prev != head", (head)); \ 336 } while (0) 337 338 #define QMD_LIST_CHECK_NEXT(elm, field) do { \ 339 if (LIST_NEXT((elm), field) != NULL && \ 340 LIST_NEXT((elm), field)->field.le_prev != \ 341 &((elm)->field.le_next)) \ 342 panic("Bad link elm %p next->prev != elm", (elm)); \ 343 } while (0) 344 345 #define QMD_LIST_CHECK_PREV(elm, field) do { \ 346 if (*(elm)->field.le_prev != (elm)) \ 347 panic("Bad link elm %p prev->next != elm", (elm)); \ 348 348 } while (0) 349 349 #else 350 #define 351 #define 352 #define 350 #define QMD_LIST_CHECK_HEAD(head, field) 351 #define QMD_LIST_CHECK_NEXT(elm, field) 352 #define QMD_LIST_CHECK_PREV(elm, field) 353 353 #endif /* (_KERNEL && INVARIANTS) */ 354 354 355 #define LIST_EMPTY(head)((head)->lh_first == NULL)356 357 #define LIST_FIRST(head)((head)->lh_first)358 359 #define LIST_FOREACH(var, head, field)\360 for ((var) = LIST_FIRST((head));\361 (var);\362 363 364 #define LIST_FOREACH_SAFE(var, head, field, tvar)\365 for ((var) = LIST_FIRST((head));\366 (var) && ((tvar) = LIST_NEXT((var), field), 1);\367 368 369 #define LIST_INIT(head) do {\370 LIST_FIRST((head)) = NULL;\371 } while (0) 372 373 #define LIST_INSERT_AFTER(listelm, elm, field) do {\374 QMD_LIST_CHECK_NEXT(listelm, field);\375 376 LIST_NEXT((listelm), field)->field.le_prev =\377 &LIST_NEXT((elm), field);\378 LIST_NEXT((listelm), field) = (elm);\379 (elm)->field.le_prev = &LIST_NEXT((listelm), field);\380 } while (0) 381 382 #define LIST_INSERT_BEFORE(listelm, elm, field) do {\383 QMD_LIST_CHECK_PREV(listelm, field);\384 (elm)->field.le_prev = (listelm)->field.le_prev;\385 LIST_NEXT((elm), field) = (listelm);\386 *(listelm)->field.le_prev = (elm);\387 (listelm)->field.le_prev = &LIST_NEXT((elm), field);\388 } while (0) 389 390 #define LIST_INSERT_HEAD(head, elm, field) do {\391 QMD_LIST_CHECK_HEAD((head), field);\392 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL)\393 394 LIST_FIRST((head)) = (elm);\395 (elm)->field.le_prev = &LIST_FIRST((head));\396 } while (0) 397 398 #define LIST_NEXT(elm, field)((elm)->field.le_next)399 400 #define LIST_REMOVE(elm, field) do {\401 QMD_LIST_CHECK_NEXT(elm, field);\402 QMD_LIST_CHECK_PREV(elm, field);\403 if (LIST_NEXT((elm), field) != NULL)\404 LIST_NEXT((elm), field)->field.le_prev =\405 (elm)->field.le_prev;\406 *(elm)->field.le_prev = LIST_NEXT((elm), field);\407 TRASHIT((elm)->field.le_next);\408 TRASHIT((elm)->field.le_prev);\355 #define LIST_EMPTY(head) ((head)->lh_first == NULL) 356 357 #define LIST_FIRST(head) ((head)->lh_first) 358 359 #define LIST_FOREACH(var, head, field) \ 360 for ((var) = LIST_FIRST((head)); \ 361 (var); \ 362 (var) = LIST_NEXT((var), field)) 363 364 #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 365 for ((var) = LIST_FIRST((head)); \ 366 (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 367 (var) = (tvar)) 368 369 #define LIST_INIT(head) do { \ 370 LIST_FIRST((head)) = NULL; \ 371 } while (0) 372 373 #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 374 QMD_LIST_CHECK_NEXT(listelm, field); \ 375 if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ 376 LIST_NEXT((listelm), field)->field.le_prev = \ 377 &LIST_NEXT((elm), field); \ 378 LIST_NEXT((listelm), field) = (elm); \ 379 (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ 380 } while (0) 381 382 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 383 QMD_LIST_CHECK_PREV(listelm, field); \ 384 (elm)->field.le_prev = (listelm)->field.le_prev; \ 385 LIST_NEXT((elm), field) = (listelm); \ 386 *(listelm)->field.le_prev = (elm); \ 387 (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ 388 } while (0) 389 390 #define LIST_INSERT_HEAD(head, elm, field) do { \ 391 QMD_LIST_CHECK_HEAD((head), field); \ 392 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ 393 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ 394 LIST_FIRST((head)) = (elm); \ 395 (elm)->field.le_prev = &LIST_FIRST((head)); \ 396 } while (0) 397 398 #define LIST_NEXT(elm, field) ((elm)->field.le_next) 399 400 #define LIST_REMOVE(elm, field) do { \ 401 QMD_LIST_CHECK_NEXT(elm, field); \ 402 QMD_LIST_CHECK_PREV(elm, field); \ 403 if (LIST_NEXT((elm), field) != NULL) \ 404 LIST_NEXT((elm), field)->field.le_prev = \ 405 (elm)->field.le_prev; \ 406 *(elm)->field.le_prev = LIST_NEXT((elm), field); \ 407 TRASHIT((elm)->field.le_next); \ 408 TRASHIT((elm)->field.le_prev); \ 409 409 } while (0) 410 410 … … 412 412 * Tail queue declarations. 413 413 */ 414 #define TAILQ_HEAD(name, type)\415 struct name { 416 struct type *tqh_first; /* first element */\417 struct type **tqh_last; /* addr of last next element */\418 TRACEBUF\419 } 420 421 #define TAILQ_HEAD_INITIALIZER(head)\422 423 424 #define TAILQ_ENTRY(type)\425 struct { 426 struct type *tqe_next; /* next element */\427 struct type **tqe_prev; /* address of previous next element */\428 TRACEBUF\414 #define TAILQ_HEAD(name, type) \ 415 struct name { \ 416 struct type *tqh_first; /* first element */ \ 417 struct type **tqh_last; /* addr of last next element */ \ 418 TRACEBUF \ 419 } 420 421 #define TAILQ_HEAD_INITIALIZER(head) \ 422 { NULL, &(head).tqh_first } 423 424 #define TAILQ_ENTRY(type) \ 425 struct { \ 426 struct type *tqe_next; /* next element */ \ 427 struct type **tqe_prev; /* address of previous next element */ \ 428 TRACEBUF \ 429 429 } 430 430 … … 433 433 */ 434 434 #if (defined(_KERNEL) && defined(INVARIANTS)) 435 #define QMD_TAILQ_CHECK_HEAD(head, field) do {\436 if (!TAILQ_EMPTY(head) &&\437 TAILQ_FIRST((head))->field.tqe_prev !=\438 &TAILQ_FIRST((head)))\439 panic("Bad tailq head %p first->prev != head", (head));\440 } while (0) 441 442 #define QMD_TAILQ_CHECK_TAIL(head, field) do {\443 if (*(head)->tqh_last != NULL)\444 panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head));\445 } while (0) 446 447 #define QMD_TAILQ_CHECK_NEXT(elm, field) do {\448 if (TAILQ_NEXT((elm), field) != NULL &&\449 TAILQ_NEXT((elm), field)->field.tqe_prev !=\450 &((elm)->field.tqe_next))\451 panic("Bad link elm %p next->prev != elm", (elm));\452 } while (0) 453 454 #define QMD_TAILQ_CHECK_PREV(elm, field) do {\455 if (*(elm)->field.tqe_prev != (elm))\456 panic("Bad link elm %p prev->next != elm", (elm));\435 #define QMD_TAILQ_CHECK_HEAD(head, field) do { \ 436 if (!TAILQ_EMPTY(head) && \ 437 TAILQ_FIRST((head))->field.tqe_prev != \ 438 &TAILQ_FIRST((head))) \ 439 panic("Bad tailq head %p first->prev != head", (head)); \ 440 } while (0) 441 442 #define QMD_TAILQ_CHECK_TAIL(head, field) do { \ 443 if (*(head)->tqh_last != NULL) \ 444 panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \ 445 } while (0) 446 447 #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \ 448 if (TAILQ_NEXT((elm), field) != NULL && \ 449 TAILQ_NEXT((elm), field)->field.tqe_prev != \ 450 &((elm)->field.tqe_next)) \ 451 panic("Bad link elm %p next->prev != elm", (elm)); \ 452 } while (0) 453 454 #define QMD_TAILQ_CHECK_PREV(elm, field) do { \ 455 if (*(elm)->field.tqe_prev != (elm)) \ 456 panic("Bad link elm %p prev->next != elm", (elm)); \ 457 457 } while (0) 458 458 #else 459 #define 460 #define 461 #define 462 #define 459 #define QMD_TAILQ_CHECK_HEAD(head, field) 460 #define QMD_TAILQ_CHECK_TAIL(head, headname) 461 #define QMD_TAILQ_CHECK_NEXT(elm, field) 462 #define QMD_TAILQ_CHECK_PREV(elm, field) 463 463 #endif /* (_KERNEL && INVARIANTS) */ 464 464 465 #define TAILQ_CONCAT(head1, head2, field) do {\466 if (!TAILQ_EMPTY(head2)) {\467 *(head1)->tqh_last = (head2)->tqh_first;\468 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;\469 (head1)->tqh_last = (head2)->tqh_last;\470 TAILQ_INIT((head2));\471 QMD_TRACE_HEAD(head1);\472 QMD_TRACE_HEAD(head2);\473 }\474 } while (0) 475 476 #define TAILQ_EMPTY(head)((head)->tqh_first == NULL)477 478 #define TAILQ_FIRST(head)((head)->tqh_first)479 480 #define TAILQ_FOREACH(var, head, field)\481 for ((var) = TAILQ_FIRST((head));\482 (var);\483 484 485 #define TAILQ_FOREACH_SAFE(var, head, field, tvar)\486 for ((var) = TAILQ_FIRST((head));\487 (var) && ((tvar) = TAILQ_NEXT((var), field), 1);\488 489 490 #define TAILQ_FOREACH_REVERSE(var, head, headname, field)\491 for ((var) = TAILQ_LAST((head), headname);\492 (var);\493 494 495 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)\496 for ((var) = TAILQ_LAST((head), headname);\497 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1);\498 499 500 #define TAILQ_INIT(head) do {\501 TAILQ_FIRST((head)) = NULL;\502 (head)->tqh_last = &TAILQ_FIRST((head));\503 QMD_TRACE_HEAD(head);\504 } while (0) 505 506 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do {\507 QMD_TAILQ_CHECK_NEXT(listelm, field);\508 509 TAILQ_NEXT((elm), field)->field.tqe_prev =\510 &TAILQ_NEXT((elm), field);\511 else {\512 (head)->tqh_last = &TAILQ_NEXT((elm), field);\513 QMD_TRACE_HEAD(head);\514 }\515 TAILQ_NEXT((listelm), field) = (elm);\516 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field);\517 QMD_TRACE_ELEM(&(elm)->field);\518 QMD_TRACE_ELEM(&listelm->field);\519 } while (0) 520 521 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do {\522 QMD_TAILQ_CHECK_PREV(listelm, field);\523 (elm)->field.tqe_prev = (listelm)->field.tqe_prev;\524 TAILQ_NEXT((elm), field) = (listelm);\525 *(listelm)->field.tqe_prev = (elm);\526 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field);\527 QMD_TRACE_ELEM(&(elm)->field);\528 QMD_TRACE_ELEM(&listelm->field);\529 } while (0) 530 531 #define TAILQ_INSERT_HEAD(head, elm, field) do {\532 QMD_TAILQ_CHECK_HEAD(head, field);\533 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL)\534 TAILQ_FIRST((head))->field.tqe_prev =\535 &TAILQ_NEXT((elm), field);\536 else\537 (head)->tqh_last = &TAILQ_NEXT((elm), field);\538 TAILQ_FIRST((head)) = (elm);\539 (elm)->field.tqe_prev = &TAILQ_FIRST((head));\540 QMD_TRACE_HEAD(head);\541 QMD_TRACE_ELEM(&(elm)->field);\542 } while (0) 543 544 #define TAILQ_INSERT_TAIL(head, elm, field) do {\545 QMD_TAILQ_CHECK_TAIL(head, field);\546 TAILQ_NEXT((elm), field) = NULL;\547 (elm)->field.tqe_prev = (head)->tqh_last;\548 *(head)->tqh_last = (elm);\549 (head)->tqh_last = &TAILQ_NEXT((elm), field);\550 QMD_TRACE_HEAD(head);\551 QMD_TRACE_ELEM(&(elm)->field);\552 } while (0) 553 554 #define TAILQ_LAST(head, headname)\555 556 557 #define 558 559 #define TAILQ_PREV(elm, headname, field)\560 561 562 #define TAILQ_REMOVE(head, elm, field) do {\563 QMD_TAILQ_CHECK_NEXT(elm, field);\564 QMD_TAILQ_CHECK_PREV(elm, field);\565 if ((TAILQ_NEXT((elm), field)) != NULL)\566 TAILQ_NEXT((elm), field)->field.tqe_prev =\567 (elm)->field.tqe_prev;\568 else {\569 (head)->tqh_last = (elm)->field.tqe_prev;\570 QMD_TRACE_HEAD(head);\571 }\572 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field);\573 TRASHIT((elm)->field.tqe_next);\574 TRASHIT((elm)->field.tqe_prev);\575 QMD_TRACE_ELEM(&(elm)->field);\465 #define TAILQ_CONCAT(head1, head2, field) do { \ 466 if (!TAILQ_EMPTY(head2)) { \ 467 *(head1)->tqh_last = (head2)->tqh_first; \ 468 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ 469 (head1)->tqh_last = (head2)->tqh_last; \ 470 TAILQ_INIT((head2)); \ 471 QMD_TRACE_HEAD(head1); \ 472 QMD_TRACE_HEAD(head2); \ 473 } \ 474 } while (0) 475 476 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 477 478 #define TAILQ_FIRST(head) ((head)->tqh_first) 479 480 #define TAILQ_FOREACH(var, head, field) \ 481 for ((var) = TAILQ_FIRST((head)); \ 482 (var); \ 483 (var) = TAILQ_NEXT((var), field)) 484 485 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ 486 for ((var) = TAILQ_FIRST((head)); \ 487 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ 488 (var) = (tvar)) 489 490 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 491 for ((var) = TAILQ_LAST((head), headname); \ 492 (var); \ 493 (var) = TAILQ_PREV((var), headname, field)) 494 495 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ 496 for ((var) = TAILQ_LAST((head), headname); \ 497 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ 498 (var) = (tvar)) 499 500 #define TAILQ_INIT(head) do { \ 501 TAILQ_FIRST((head)) = NULL; \ 502 (head)->tqh_last = &TAILQ_FIRST((head)); \ 503 QMD_TRACE_HEAD(head); \ 504 } while (0) 505 506 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 507 QMD_TAILQ_CHECK_NEXT(listelm, field); \ 508 if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ 509 TAILQ_NEXT((elm), field)->field.tqe_prev = \ 510 &TAILQ_NEXT((elm), field); \ 511 else { \ 512 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 513 QMD_TRACE_HEAD(head); \ 514 } \ 515 TAILQ_NEXT((listelm), field) = (elm); \ 516 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ 517 QMD_TRACE_ELEM(&(elm)->field); \ 518 QMD_TRACE_ELEM(&listelm->field); \ 519 } while (0) 520 521 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 522 QMD_TAILQ_CHECK_PREV(listelm, field); \ 523 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 524 TAILQ_NEXT((elm), field) = (listelm); \ 525 *(listelm)->field.tqe_prev = (elm); \ 526 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ 527 QMD_TRACE_ELEM(&(elm)->field); \ 528 QMD_TRACE_ELEM(&listelm->field); \ 529 } while (0) 530 531 #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 532 QMD_TAILQ_CHECK_HEAD(head, field); \ 533 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ 534 TAILQ_FIRST((head))->field.tqe_prev = \ 535 &TAILQ_NEXT((elm), field); \ 536 else \ 537 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 538 TAILQ_FIRST((head)) = (elm); \ 539 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ 540 QMD_TRACE_HEAD(head); \ 541 QMD_TRACE_ELEM(&(elm)->field); \ 542 } while (0) 543 544 #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 545 QMD_TAILQ_CHECK_TAIL(head, field); \ 546 TAILQ_NEXT((elm), field) = NULL; \ 547 (elm)->field.tqe_prev = (head)->tqh_last; \ 548 *(head)->tqh_last = (elm); \ 549 (head)->tqh_last = &TAILQ_NEXT((elm), field); \ 550 QMD_TRACE_HEAD(head); \ 551 QMD_TRACE_ELEM(&(elm)->field); \ 552 } while (0) 553 554 #define TAILQ_LAST(head, headname) \ 555 (*(((struct headname *)((head)->tqh_last))->tqh_last)) 556 557 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 558 559 #define TAILQ_PREV(elm, headname, field) \ 560 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 561 562 #define TAILQ_REMOVE(head, elm, field) do { \ 563 QMD_TAILQ_CHECK_NEXT(elm, field); \ 564 QMD_TAILQ_CHECK_PREV(elm, field); \ 565 if ((TAILQ_NEXT((elm), field)) != NULL) \ 566 TAILQ_NEXT((elm), field)->field.tqe_prev = \ 567 (elm)->field.tqe_prev; \ 568 else { \ 569 (head)->tqh_last = (elm)->field.tqe_prev; \ 570 QMD_TRACE_HEAD(head); \ 571 } \ 572 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ 573 TRASHIT((elm)->field.tqe_next); \ 574 TRASHIT((elm)->field.tqe_prev); \ 575 QMD_TRACE_ELEM(&(elm)->field); \ 576 576 } while (0) 577 577 … … 585 585 586 586 struct quehead { 587 588 587 struct quehead *qh_link; 588 struct quehead *qh_rlink; 589 589 }; 590 590 … … 594 594 insque(void *a, void *b) 595 595 { 596 597 598 599 600 601 602 596 struct quehead *element = (struct quehead *)a, 597 *head = (struct quehead *)b; 598 599 element->qh_link = head->qh_link; 600 element->qh_rlink = head; 601 head->qh_link = element; 602 element->qh_link->qh_rlink = element; 603 603 } 604 604 … … 606 606 remque(void *a) 607 607 { 608 609 610 611 612 608 struct quehead *element = (struct quehead *)a; 609 610 element->qh_link->qh_rlink = element->qh_rlink; 611 element->qh_rlink->qh_link = element->qh_link; 612 element->qh_rlink = 0; 613 613 } 614 614 615 615 #else /* !__CC_SUPPORTS___INLINE */ 616 616 617 void 618 void 617 void insque(void *a, void *b); 618 void remque(void *a); 619 619 620 620 #endif /* __CC_SUPPORTS___INLINE */ -
trunk/src/VBox/Devices/Network/slirp/sbuf.c
r1076 r14470 12 12 * sbspace(struct sockbuff *sb) 13 13 * { 14 * 14 * return SB_DATALEN - sb->sb_cc; 15 15 * } 16 16 */ … … 18 18 void 19 19 sbfree(sb) 20 20 struct sbuf *sb; 21 21 { 22 22 free(sb->sb_data); 23 23 } 24 24 25 25 void 26 26 sbdrop(sb, num) 27 28 27 struct sbuf *sb; 28 int num; 29 29 { 30 31 32 33 34 35 36 37 38 39 30 /* 31 * We can only drop how much we have 32 * This should never succeed 33 */ 34 if(num > sb->sb_cc) 35 num = sb->sb_cc; 36 sb->sb_cc -= num; 37 sb->sb_rptr += num; 38 if(sb->sb_rptr >= sb->sb_data + sb->sb_datalen) 39 sb->sb_rptr -= sb->sb_datalen; 40 40 41 41 } … … 43 43 void 44 44 sbreserve(sb, size) 45 46 45 struct sbuf *sb; 46 int size; 47 47 { 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 48 if (sb->sb_data) { 49 /* Already alloced, realloc if necessary */ 50 if (sb->sb_datalen != size) { 51 sb->sb_wptr = sb->sb_rptr = sb->sb_data = (char *)realloc(sb->sb_data, size); 52 sb->sb_cc = 0; 53 if (sb->sb_wptr) 54 sb->sb_datalen = size; 55 else 56 sb->sb_datalen = 0; 57 } 58 } else { 59 sb->sb_wptr = sb->sb_rptr = sb->sb_data = (char *)malloc(size); 60 sb->sb_cc = 0; 61 if (sb->sb_wptr) 62 sb->sb_datalen = size; 63 else 64 sb->sb_datalen = 0; 65 } 66 66 } 67 67 … … 75 75 sbappend(PNATState pData, struct socket *so, struct mbuf *m) 76 76 { 77 77 int ret = 0; 78 78 79 80 81 82 79 DEBUG_CALL("sbappend"); 80 DEBUG_ARG("so = %lx", (long)so); 81 DEBUG_ARG("m = %lx", (long)m); 82 DEBUG_ARG("m->m_len = %d", m->m_len); 83 83 84 85 86 87 88 84 /* Shouldn't happen, but... e.g. foreign host closes connection */ 85 if (m->m_len <= 0) { 86 m_free(pData, m); 87 return; 88 } 89 89 90 91 92 93 94 95 96 97 98 99 100 90 /* 91 * If there is urgent data, call sosendoob 92 * if not all was sent, sowrite will take care of the rest 93 * (The rest of this function is just an optimisation) 94 */ 95 if (so->so_urgc) { 96 sbappendsb(&so->so_rcv, m); 97 m_free(pData, m); 98 sosendoob(so); 99 return; 100 } 101 101 102 103 104 105 106 107 102 /* 103 * We only write if there's nothing in the buffer, 104 * ottherwise it'll arrive out of order, and hence corrupt 105 */ 106 if (!so->so_rcv.sb_cc) 107 ret = send(so->s, m->m_data, m->m_len, 0); 108 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 109 if (ret <= 0) { 110 /* 111 * Nothing was written 112 * It's possible that the socket has closed, but 113 * we don't need to check because if it has closed, 114 * it will be detected in the normal way by soread() 115 */ 116 sbappendsb(&so->so_rcv, m); 117 } else if (ret != m->m_len) { 118 /* 119 * Something was written, but not everything.. 120 * sbappendsb the rest 121 */ 122 m->m_len -= ret; 123 m->m_data += ret; 124 sbappendsb(&so->so_rcv, m); 125 } /* else */ 126 /* Whatever happened, we free the mbuf */ 127 m_free(pData, m); 128 128 } 129 129 … … 134 134 void 135 135 sbappendsb(sb, m) 136 137 136 struct sbuf *sb; 137 struct mbuf *m; 138 138 { 139 139 int len, n, nn; 140 140 141 141 len = m->m_len; 142 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 143 if (sb->sb_wptr < sb->sb_rptr) { 144 n = sb->sb_rptr - sb->sb_wptr; 145 if (n > len) n = len; 146 memcpy(sb->sb_wptr, m->m_data, n); 147 } else { 148 /* Do the right edge first */ 149 n = sb->sb_data + sb->sb_datalen - sb->sb_wptr; 150 if (n > len) n = len; 151 memcpy(sb->sb_wptr, m->m_data, n); 152 len -= n; 153 if (len) { 154 /* Now the left edge */ 155 nn = sb->sb_rptr - sb->sb_data; 156 if (nn > len) nn = len; 157 memcpy(sb->sb_data,m->m_data+n,nn); 158 n += nn; 159 } 160 } 161 161 162 163 164 165 162 sb->sb_cc += n; 163 sb->sb_wptr += n; 164 if (sb->sb_wptr >= sb->sb_data + sb->sb_datalen) 165 sb->sb_wptr -= sb->sb_datalen; 166 166 } 167 167 … … 173 173 void 174 174 sbcopy(sb, off, len, to) 175 176 177 178 175 struct sbuf *sb; 176 int off; 177 int len; 178 char *to; 179 179 { 180 180 char *from; 181 181 182 183 184 182 from = sb->sb_rptr + off; 183 if (from >= sb->sb_data + sb->sb_datalen) 184 from -= sb->sb_datalen; 185 185 186 187 188 189 190 191 192 193 194 195 196 197 186 if (from < sb->sb_wptr) { 187 if (len > sb->sb_cc) len = sb->sb_cc; 188 memcpy(to,from,len); 189 } else { 190 /* re-use off */ 191 off = (sb->sb_data + sb->sb_datalen) - from; 192 if (off > len) off = len; 193 memcpy(to,from,off); 194 len -= off; 195 if (len) 196 memcpy(to+off,sb->sb_data,len); 197 } 198 198 } 199 199 -
trunk/src/VBox/Devices/Network/slirp/sbuf.h
r1076 r14470 13 13 14 14 struct sbuf { 15 u_int sb_cc;/* actual chars in buffer */16 u_int sb_datalen;/* Length of data */17 char *sb_wptr;/* write pointer. points to where the next18 19 char *sb_rptr;/* read pointer. points to where the next20 21 char *sb_data;/* Actual data */15 u_int sb_cc; /* actual chars in buffer */ 16 u_int sb_datalen; /* Length of data */ 17 char *sb_wptr; /* write pointer. points to where the next 18 * bytes should be written in the sbuf */ 19 char *sb_rptr; /* read pointer. points to where the next 20 * byte should be read from the sbuf */ 21 char *sb_data; /* Actual data */ 22 22 }; 23 23 -
trunk/src/VBox/Devices/Network/slirp/slirp.c
r14391 r14470 227 227 } 228 228 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 229 229 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL); 230 230 #endif 231 231 #endif … … 365 365 static void updtime(PNATState pData) 366 366 { 367 368 369 370 371 372 373 367 gettimeofday(&tt, 0); 368 369 curtime = (u_int)tt.tv_sec * (u_int)1000; 370 curtime += (u_int)tt.tv_usec / (u_int)1000; 371 372 if ((tt.tv_usec % 1000) >= 500) 373 curtime++; 374 374 } 375 375 #endif … … 393 393 394 394 nfds = *pnfds; 395 396 397 398 399 400 401 402 403 395 /* 396 * First, TCP sockets 397 */ 398 do_slowtimo = 0; 399 if (link_up) { 400 /* 401 * *_slowtimo needs calling if there are IP fragments 402 * in the fragment queue, or there are TCP connections active 403 */ 404 404 #ifndef VBOX_WITH_BSD_REASS 405 406 405 do_slowtimo = ((tcb.so_next != &tcb) || 406 ((struct ipasfrag *)&ipq != u32_to_ptr(pData, ipq.next, struct ipasfrag *))); 407 407 #else /* !VBOX_WITH_BSD_REASS */ 408 408 /* XXX: triggering of fragment expiration should be the same but use … … 421 421 STAM_REL_COUNTER_RESET(&pData->StatTCPHot); 422 422 423 424 423 for (so = tcb.so_next; so != &tcb; so = so_next) { 424 so_next = so->so_next; 425 425 426 426 STAM_REL_COUNTER_INC(&pData->StatTCP); 427 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 428 /* 429 * See if we need a tcp_fasttimo 430 */ 431 if (time_fasttimo == 0 && so->so_tcpcb->t_flags & TF_DELACK) 432 time_fasttimo = curtime; /* Flag when we want a fasttimo */ 433 434 /* 435 * NOFDREF can include still connecting to local-host, 436 * newly socreated() sockets etc. Don't want to select these. 437 */ 438 if (so->so_state & SS_NOFDREF || so->s == -1) 439 continue; 440 441 /* 442 * Set for reading sockets which are accepting 443 */ 444 if (so->so_state & SS_FACCEPTCONN) { 445 445 STAM_REL_COUNTER_INC(&pData->StatTCPHot); 446 446 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 447 447 FD_SET(so->s, readfds); 448 449 #else 450 448 UPD_NFDS(so->s); 449 #else 450 rc = WSAEventSelect(so->s, VBOX_SOCKET_EVENT, FD_READ|FD_WRITE|FD_ACCEPT|FD_CONNECT|FD_OOB); 451 451 if (rc == SOCKET_ERROR) 452 452 { … … 456 456 } 457 457 #endif 458 459 460 461 462 463 464 458 continue; 459 } 460 461 /* 462 * Set for writing sockets which are connecting 463 */ 464 if (so->so_state & SS_ISFCONNECTING) { 465 465 STAM_REL_COUNTER_INC(&pData->StatTCPHot); 466 466 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 467 468 469 #else 470 467 FD_SET(so->s, writefds); 468 UPD_NFDS(so->s); 469 #else 470 rc = WSAEventSelect(so->s, VBOX_SOCKET_EVENT, FD_READ|FD_WRITE|FD_ACCEPT|FD_CONNECT|FD_OOB); 471 471 if (rc == SOCKET_ERROR) 472 472 goto socket_error; 473 473 #endif 474 475 476 477 478 479 480 481 474 continue; 475 } 476 477 /* 478 * Set for writing if we are connected, can send more, and 479 * we have something to send 480 */ 481 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc) { 482 482 STAM_REL_COUNTER_INC(&pData->StatTCPHot); 483 483 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 484 485 486 #else 487 484 FD_SET(so->s, writefds); 485 UPD_NFDS(so->s); 486 #else 487 rc = WSAEventSelect(so->s, VBOX_SOCKET_EVENT, FD_READ|FD_WRITE|FD_ACCEPT|FD_CONNECT|FD_OOB); 488 488 if (rc == SOCKET_ERROR) 489 489 goto socket_error; 490 491 #endif 492 493 494 495 496 497 498 490 continue; /*XXX: we're using the widest mask for event*/ 491 #endif 492 } 493 494 /* 495 * Set for reading (and urgent data) if we are connected, can 496 * receive more, and we have room for it XXX /2 ? 497 */ 498 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2))) { 499 499 STAM_REL_COUNTER_INC(&pData->StatTCPHot); 500 500 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 501 502 503 504 #else 505 501 FD_SET(so->s, readfds); 502 FD_SET(so->s, xfds); 503 UPD_NFDS(so->s); 504 #else 505 rc = WSAEventSelect(so->s, VBOX_SOCKET_EVENT, FD_OOB|FD_READ|FD_WRITE|FD_ACCEPT|FD_CONNECT); 506 506 if (rc == SOCKET_ERROR) 507 507 goto socket_error; 508 509 #endif 510 508 continue; /*XXX: we're using the widest mask for event*/ 509 #endif 510 } 511 511 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 512 512 rc = WSAEventSelect(so->s, NULL, 0); 513 513 if (rc == SOCKET_ERROR) 514 514 goto socket_error; 515 515 #endif 516 517 518 519 520 516 } 517 518 /* 519 * UDP sockets 520 */ 521 521 STAM_REL_COUNTER_RESET(&pData->StatUDP); 522 522 STAM_REL_COUNTER_RESET(&pData->StatUDPHot); 523 523 524 525 524 for (so = udb.so_next; so != &udb; so = so_next) { 525 so_next = so->so_next; 526 526 527 527 STAM_REL_COUNTER_INC(&pData->StatUDP); 528 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 529 /* 530 * See if it's timed out 531 */ 532 if (so->so_expire) { 533 if (so->so_expire <= curtime) { 534 udp_detach(pData, so); 535 continue; 536 } else 537 do_slowtimo = 1; /* Let socket expire */ 538 } 539 540 /* 541 * When UDP packets are received from over the 542 * link, they're sendto()'d straight away, so 543 * no need for setting for writing 544 * Limit the number of packets queued by this session 545 * to 4. Note that even though we try and limit this 546 * to 4 packets, the session could have more queued 547 * if the packets needed to be fragmented 548 * (XXX <= 4 ?) 549 */ 550 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4) { 551 551 STAM_REL_COUNTER_INC(&pData->StatUDPHot); 552 552 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 553 554 555 #else 556 553 FD_SET(so->s, readfds); 554 UPD_NFDS(so->s); 555 #else 556 rc = WSAEventSelect(so->s, VBOX_SOCKET_EVENT, FD_READ|FD_WRITE|FD_OOB|FD_ACCEPT); 557 557 if (rc == SOCKET_ERROR) 558 558 goto socket_error; 559 560 #endif 561 559 continue; 560 #endif 561 } 562 562 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 563 563 else … … 568 568 } 569 569 #endif 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 570 } 571 } 572 573 /* 574 * Setup timeout to use minimum CPU usage, especially when idle 575 */ 576 577 /* 578 * First, see the timeout needed by *timo 579 */ 580 timeout.tv_sec = 0; 581 timeout.tv_usec = -1; 582 /* 583 * If a slowtimo is needed, set timeout to 500ms from the last 584 * slow timeout. If a fast timeout is needed, set timeout within 585 * 200ms of when it was requested. 586 */ 587 if (do_slowtimo) { 588 /* XXX + 10000 because some select()'s aren't that accurate */ 589 timeout.tv_usec = ((500 - (curtime - last_slowtimo)) * 1000) + 10000; 590 if (timeout.tv_usec < 0) 591 timeout.tv_usec = 0; 592 else if (timeout.tv_usec > 510000) 593 timeout.tv_usec = 510000; 594 595 /* Can only fasttimo if we also slowtimo */ 596 if (time_fasttimo) { 597 tmp_time = (200 - (curtime - time_fasttimo)) * 1000; 598 if (tmp_time < 0) 599 tmp_time = 0; 600 601 /* Choose the smallest of the 2 */ 602 if (tmp_time < timeout.tv_usec) 603 timeout.tv_usec = (u_int)tmp_time; 604 } 605 } 606 606 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 607 607 *pnfds = nfds; … … 618 618 int ret; 619 619 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 620 621 620 WSANETWORKEVENTS NetworkEvents; 621 int rc; 622 622 int error; 623 623 int timer_update = (readfds == NULL && writefds == NULL && xfds == NULL); 624 624 #endif 625 625 STAM_REL_PROFILE_START(&pData->StatPoll, a); 626 626 627 628 629 630 631 632 633 634 627 /* Update time */ 628 updtime(pData); 629 630 /* 631 * See if anything has timed out 632 */ 633 if (link_up) { 634 if (time_fasttimo && ((curtime - time_fasttimo) >= 2)) { 635 635 STAM_REL_PROFILE_START(&pData->StatFastTimer, a); 636 637 636 tcp_fasttimo(pData); 637 time_fasttimo = 0; 638 638 STAM_REL_PROFILE_STOP(&pData->StatFastTimer, a); 639 640 639 } 640 if (do_slowtimo && ((curtime - last_slowtimo) >= 499)) { 641 641 STAM_REL_PROFILE_START(&pData->StatSlowTimer, a); 642 643 644 642 ip_slowtimo(pData); 643 tcp_slowtimo(pData); 644 last_slowtimo = curtime; 645 645 STAM_REL_PROFILE_STOP(&pData->StatSlowTimer, a); 646 647 646 } 647 } 648 648 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 649 650 #endif 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 649 if (timer_update) return; 650 #endif 651 652 /* 653 * Check sockets 654 */ 655 if (link_up) { 656 /* 657 * Check TCP sockets 658 */ 659 for (so = tcb.so_next; so != &tcb; so = so_next) { 660 so_next = so->so_next; 661 662 /* 663 * FD_ISSET is meaningless on these sockets 664 * (and they can crash the program) 665 */ 666 if (so->so_state & SS_NOFDREF || so->s == -1) 667 continue; 668 668 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 669 669 rc = WSAEnumNetworkEvents(so->s, VBOX_SOCKET_EVENT, &NetworkEvents); 670 670 if (rc == SOCKET_ERROR) 671 671 { … … 676 676 #endif 677 677 678 679 680 681 682 678 /* 679 * Check for URG data 680 * This will soread as well, so no need to 681 * test for readfds below if this succeeds 682 */ 683 683 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 684 684 if (FD_ISSET(so->s, xfds)) 685 685 #else 686 686 /* out-of-band data */ 687 688 #endif 689 690 691 692 687 if ((NetworkEvents.lNetworkEvents & FD_OOB) && NetworkEvents.iErrorCode[FD_OOB_BIT] == 0) 688 #endif 689 sorecvoob(pData, so); 690 /* 691 * Check sockets for reading 692 */ 693 693 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 694 695 #else 696 697 #endif 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 694 else if (FD_ISSET(so->s, readfds)) { 695 #else 696 else if ((NetworkEvents.lNetworkEvents & FD_READ) && (NetworkEvents.iErrorCode[FD_READ_BIT] == 0)) { 697 #endif 698 /* 699 * Check for incoming connections 700 */ 701 if (so->so_state & SS_FACCEPTCONN) { 702 tcp_connect(pData, so); 703 continue; 704 } /* else */ 705 ret = soread(pData, so); 706 707 /* Output it if we read something */ 708 if (ret > 0) 709 tcp_output(pData, sototcpcb(so)); 710 } 711 712 /* 713 * Check sockets for writing 714 */ 715 715 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 716 717 #else 718 719 #endif 720 721 722 723 724 725 716 if (FD_ISSET(so->s, writefds)) { 717 #else 718 if ((NetworkEvents.lNetworkEvents & FD_WRITE) && (NetworkEvents.iErrorCode[FD_WRITE_BIT] == 0)) { 719 #endif 720 /* 721 * Check for non-blocking, still-connecting sockets 722 */ 723 if (so->so_state & SS_ISFCONNECTING) { 724 /* Connected */ 725 so->so_state &= ~SS_ISFCONNECTING; 726 726 727 727 /* … … 732 732 */ 733 733 #ifndef RT_OS_OS2 734 735 736 737 738 739 740 741 742 743 744 745 746 #endif 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 734 ret = send(so->s, (const char *)&ret, 0, 0); 735 if (ret < 0) { 736 /* XXXXX Must fix, zero bytes is a NOP */ 737 if (errno == EAGAIN || errno == EWOULDBLOCK || 738 errno == EINPROGRESS || errno == ENOTCONN) { 739 continue; 740 } 741 742 /* else failed */ 743 so->so_state = SS_NOFDREF; 744 } 745 /* else so->so_state &= ~SS_ISFCONNECTING; */ 746 #endif 747 748 /* 749 * Continue tcp_input 750 */ 751 tcp_input(pData, (struct mbuf *)NULL, sizeof(struct ip), so); 752 /* continue; */ 753 } else 754 ret = sowrite(pData, so); 755 /* 756 * XXXXX If we wrote something (a lot), there 757 * could be a need for a window update. 758 * In the worst case, the remote will send 759 * a window probe to get things going again 760 */ 761 } 762 763 /* 764 * Probe a still-connecting, non-blocking socket 765 * to check if it's still alive 766 */ 767 767 #ifdef PROBE_CONN 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 #endif 799 800 801 802 803 804 805 806 807 768 if (so->so_state & SS_ISFCONNECTING) { 769 ret = recv(so->s, (char *)&ret, 0,0); 770 771 if (ret < 0) { 772 /* XXX */ 773 if (errno == EAGAIN || errno == EWOULDBLOCK || 774 errno == EINPROGRESS || errno == ENOTCONN) { 775 continue; /* Still connecting, continue */ 776 } 777 778 /* else failed */ 779 so->so_state = SS_NOFDREF; 780 781 /* tcp_input will take care of it */ 782 } else { 783 ret = send(so->s, &ret, 0,0); 784 if (ret < 0) { 785 /* XXX */ 786 if (errno == EAGAIN || errno == EWOULDBLOCK || 787 errno == EINPROGRESS || errno == ENOTCONN) { 788 continue; 789 } 790 /* else failed */ 791 so->so_state = SS_NOFDREF; 792 } else 793 so->so_state &= ~SS_ISFCONNECTING; 794 795 } 796 tcp_input((struct mbuf *)NULL, sizeof(struct ip),so); 797 } /* SS_ISFCONNECTING */ 798 #endif 799 } 800 801 /* 802 * Now UDP sockets. 803 * Incoming packets are sent straight away, they're not buffered. 804 * Incoming UDP data isn't buffered either. 805 */ 806 for (so = udb.so_next; so != &udb; so = so_next) { 807 so_next = so->so_next; 808 808 809 809 #if defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS) 810 810 rc = WSAEnumNetworkEvents(so->s, VBOX_SOCKET_EVENT, &NetworkEvents); 811 811 if (rc == SOCKET_ERROR) 812 812 { … … 817 817 #endif 818 818 #if !defined(VBOX_WITH_SIMPLIFIED_SLIRP_SYNC) || !defined(RT_OS_WINDOWS) 819 820 #else 821 819 if (so->s != -1 && FD_ISSET(so->s, readfds)) { 820 #else 821 if ((NetworkEvents.lNetworkEvents & FD_READ) && (NetworkEvents.iErrorCode[FD_READ_BIT] == 0)) { 822 822 #endif 823 823 sorecvfrom(pData, so); 824 824 } 825 826 827 828 829 830 831 832 825 } 826 } 827 828 /* 829 * See if we can start outputting 830 */ 831 if (if_queued && link_up) 832 if_start(pData); 833 833 834 834 STAM_REL_PROFILE_STOP(&pData->StatPoll, a); … … 838 838 #define ETH_HLEN 14 839 839 840 #define ETH_P_IP 0x0800 /* Internet Protocol packet*/841 #define ETH_P_ARP 0x0806 /* Address Resolution packet*/842 843 #define ARPOP_REQUEST 1 /* ARP request*/844 #define ARPOP_REPLY 2 /* ARP reply*/840 #define ETH_P_IP 0x0800 /* Internet Protocol packet */ 841 #define ETH_P_ARP 0x0806 /* Address Resolution packet */ 842 843 #define ARPOP_REQUEST 1 /* ARP request */ 844 #define ARPOP_REPLY 2 /* ARP reply */ 845 845 846 846 struct ethhdr 847 847 { 848 unsigned char h_dest[ETH_ALEN]; /* destination eth addr*/849 unsigned char h_source[ETH_ALEN]; /* source ether addr*/850 unsigned short h_proto; /* packet type ID field*/848 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 849 unsigned char h_source[ETH_ALEN]; /* source ether addr */ 850 unsigned short h_proto; /* packet type ID field */ 851 851 }; 852 852 853 853 struct arphdr 854 854 { 855 unsigned short ar_hrd; /* format of hardware address*/856 unsigned short ar_pro; /* format of protocol address*/857 unsigned char ar_hln; /* length of hardware address*/858 unsigned char ar_pln; /* length of protocol address*/859 unsigned short ar_op; /* ARP opcode (command)*/860 861 862 *Ethernet looks like this : This bit is variable sized however...863 864 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address*/865 unsigned char ar_sip[4]; /* sender IP address*/866 unsigned char ar_tha[ETH_ALEN]; /* target hardware address*/867 unsigned char ar_tip[4]; /* target IP address*/855 unsigned short ar_hrd; /* format of hardware address */ 856 unsigned short ar_pro; /* format of protocol address */ 857 unsigned char ar_hln; /* length of hardware address */ 858 unsigned char ar_pln; /* length of protocol address */ 859 unsigned short ar_op; /* ARP opcode (command) */ 860 861 /* 862 * Ethernet looks like this : This bit is variable sized however... 863 */ 864 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ 865 unsigned char ar_sip[4]; /* sender IP address */ 866 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ 867 unsigned char ar_tip[4]; /* target IP address */ 868 868 }; 869 869 … … 933 933 break; 934 934 case ETH_P_IP: 935 935 /* Update time. Important if the network is very quiet, as otherwise 936 936 * the first outgoing connection gets an incorrect timestamp. */ 937 937 updtime(pData); 938 938 939 939 m = m_get(pData); … … 1005 1005 HANDLE *slirp_get_events(PNATState pData) 1006 1006 { 1007 1007 return pData->phEvents; 1008 1008 } 1009 1009 void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index) 1010 1010 { 1011 1011 pData->phEvents[index] = hEvent; 1012 1012 } 1013 1013 #endif -
trunk/src/VBox/Devices/Network/slirp/socket.c
r14180 r14470 17 17 so_init() 18 18 { 19 19 /* Nothing yet */ 20 20 } 21 21 … … 23 23 struct socket * 24 24 solookup(head, laddr, lport, faddr, fport) 25 26 27 28 29 30 { 31 32 33 34 35 36 37 38 39 40 41 42 43 25 struct socket *head; 26 struct in_addr laddr; 27 u_int lport; 28 struct in_addr faddr; 29 u_int fport; 30 { 31 struct socket *so; 32 33 for (so = head->so_next; so != head; so = so->so_next) { 34 if (so->so_lport == lport && 35 so->so_laddr.s_addr == laddr.s_addr && 36 so->so_faddr.s_addr == faddr.s_addr && 37 so->so_fport == fport) 38 break; 39 } 40 41 if (so == head) 42 return (struct socket *)NULL; 43 return so; 44 44 45 45 } … … 71 71 { 72 72 if (so->so_emu==EMU_RSH && so->extra) { 73 74 73 sofree(pData, so->extra); 74 so->extra=NULL; 75 75 } 76 76 if (so == tcp_last_so) … … 96 96 soread(PNATState pData, struct socket *so) 97 97 { 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 98 int n, nn, lss, total; 99 struct sbuf *sb = &so->so_snd; 100 int len = sb->sb_datalen - sb->sb_cc; 101 struct iovec iov[2]; 102 int mss = so->so_tcpcb->t_maxseg; 103 104 DEBUG_CALL("soread"); 105 DEBUG_ARG("so = %lx", (long )so); 106 107 /* 108 * No need to check if there's enough room to read. 109 * soread wouldn't have been called if there weren't 110 */ 111 112 len = sb->sb_datalen - sb->sb_cc; 113 114 iov[0].iov_base = sb->sb_wptr; 115 115 iov[1].iov_base = 0; 116 116 iov[1].iov_len = 0; 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 117 if (sb->sb_wptr < sb->sb_rptr) { 118 iov[0].iov_len = sb->sb_rptr - sb->sb_wptr; 119 /* Should never succeed, but... */ 120 if (iov[0].iov_len > len) 121 iov[0].iov_len = len; 122 if (iov[0].iov_len > mss) 123 iov[0].iov_len -= iov[0].iov_len%mss; 124 n = 1; 125 } else { 126 iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_wptr; 127 /* Should never succeed, but... */ 128 if (iov[0].iov_len > len) iov[0].iov_len = len; 129 len -= iov[0].iov_len; 130 if (len) { 131 iov[1].iov_base = sb->sb_data; 132 iov[1].iov_len = sb->sb_rptr - sb->sb_data; 133 if(iov[1].iov_len > len) 134 iov[1].iov_len = len; 135 total = iov[0].iov_len + iov[1].iov_len; 136 if (total > mss) { 137 lss = total%mss; 138 if (iov[1].iov_len > lss) { 139 iov[1].iov_len -= lss; 140 n = 2; 141 } else { 142 lss -= iov[1].iov_len; 143 iov[0].iov_len -= lss; 144 n = 1; 145 } 146 } else 147 n = 2; 148 } else { 149 if (iov[0].iov_len > mss) 150 iov[0].iov_len -= iov[0].iov_len%mss; 151 n = 1; 152 } 153 } 154 154 155 155 #ifdef HAVE_READV 156 157 156 nn = readv(so->s, (struct iovec *)iov, n); 157 DEBUG_MISC((dfd, " ... read nn = %d bytes\n", nn)); 158 158 #else 159 160 #endif 161 162 163 164 165 166 167 168 169 170 159 nn = recv(so->s, iov[0].iov_base, iov[0].iov_len,0); 160 #endif 161 if (nn <= 0) { 162 if (nn < 0 && (errno == EINTR || errno == EAGAIN)) 163 return 0; 164 else { 165 DEBUG_MISC((dfd, " --- soread() disconnected, nn = %d, errno = %d-%s\n", nn, errno,strerror(errno))); 166 sofcantrcvmore(so); 167 tcp_sockclosed(pData, sototcpcb(so)); 168 return -1; 169 } 170 } 171 171 172 172 #ifndef HAVE_READV 173 174 175 176 177 178 179 180 181 182 173 /* 174 * If there was no error, try and read the second time round 175 * We read again if n = 2 (ie, there's another part of the buffer) 176 * and we read as much as we could in the first read 177 * We don't test for <= 0 this time, because there legitimately 178 * might not be any more data (since the socket is non-blocking), 179 * a close will be detected on next iteration. 180 * A return of -1 wont (shouldn't) happen, since it didn't happen above 181 */ 182 if (n == 2 && nn == iov[0].iov_len) { 183 183 int ret; 184 184 ret = recv(so->s, iov[1].iov_base, iov[1].iov_len,0); … … 187 187 } 188 188 189 190 #endif 191 192 193 194 195 196 197 189 DEBUG_MISC((dfd, " ... read nn = %d bytes\n", nn)); 190 #endif 191 192 /* Update fields */ 193 sb->sb_cc += nn; 194 sb->sb_wptr += nn; 195 if (sb->sb_wptr >= (sb->sb_data + sb->sb_datalen)) 196 sb->sb_wptr -= sb->sb_datalen; 197 return nn; 198 198 } 199 199 … … 208 208 sorecvoob(PNATState pData, struct socket *so) 209 209 { 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 210 struct tcpcb *tp = sototcpcb(so); 211 212 DEBUG_CALL("sorecvoob"); 213 DEBUG_ARG("so = %lx", (long)so); 214 215 /* 216 * We take a guess at how much urgent data has arrived. 217 * In most situations, when urgent data arrives, the next 218 * read() should get all the urgent data. This guess will 219 * be wrong however if more data arrives just after the 220 * urgent data, or the read() doesn't return all the 221 * urgent data. 222 */ 223 soread(pData, so); 224 tp->snd_up = tp->snd_una + so->so_snd.sb_cc; 225 tp->t_force = 1; 226 tcp_output(pData, tp); 227 tp->t_force = 0; 228 228 } 229 229 … … 234 234 int 235 235 sosendoob(so) 236 237 { 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 236 struct socket *so; 237 { 238 struct sbuf *sb = &so->so_rcv; 239 char buff[2048]; /* XXX Shouldn't be sending more oob data than this */ 240 241 int n, len; 242 243 DEBUG_CALL("sosendoob"); 244 DEBUG_ARG("so = %lx", (long)so); 245 DEBUG_ARG("sb->sb_cc = %d", sb->sb_cc); 246 247 if (so->so_urgc > 2048) 248 so->so_urgc = 2048; /* XXXX */ 249 250 if (sb->sb_rptr < sb->sb_wptr) { 251 /* We can send it directly */ 252 n = send(so->s, sb->sb_rptr, so->so_urgc, (MSG_OOB)); /* |MSG_DONTWAIT)); */ 253 so->so_urgc -= n; 254 255 DEBUG_MISC((dfd, " --- sent %d bytes urgent data, %d urgent bytes left\n", n, so->so_urgc)); 256 } else { 257 /* 258 * Since there's no sendv or sendtov like writev, 259 * we must copy all data to a linear buffer then 260 * send it all 261 */ 262 len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr; 263 if (len > so->so_urgc) len = so->so_urgc; 264 memcpy(buff, sb->sb_rptr, len); 265 so->so_urgc -= len; 266 if (so->so_urgc) { 267 n = sb->sb_wptr - sb->sb_data; 268 if (n > so->so_urgc) n = so->so_urgc; 269 memcpy((buff + len), sb->sb_data, n); 270 so->so_urgc -= n; 271 len += n; 272 } 273 n = send(so->s, buff, len, (MSG_OOB)); /* |MSG_DONTWAIT)); */ 274 274 #ifdef DEBUG 275 276 277 #endif 278 279 280 281 282 283 284 285 286 275 if (n != len) 276 DEBUG_ERROR((dfd, "Didn't send all data urgently XXXXX\n")); 277 #endif 278 DEBUG_MISC((dfd, " ---2 sent %d bytes urgent data, %d urgent bytes left\n", n, so->so_urgc)); 279 } 280 281 sb->sb_cc -= n; 282 sb->sb_rptr += n; 283 if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen)) 284 sb->sb_rptr -= sb->sb_datalen; 285 286 return n; 287 287 } 288 288 … … 294 294 sowrite(PNATState pData, struct socket *so) 295 295 { 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 296 int n,nn; 297 struct sbuf *sb = &so->so_rcv; 298 int len = sb->sb_cc; 299 struct iovec iov[2]; 300 301 DEBUG_CALL("sowrite"); 302 DEBUG_ARG("so = %lx", (long)so); 303 304 if (so->so_urgc) { 305 sosendoob(so); 306 if (sb->sb_cc == 0) 307 return 0; 308 } 309 310 /* 311 * No need to check if there's something to write, 312 * sowrite wouldn't have been called otherwise 313 */ 314 314 315 315 len = sb->sb_cc; 316 316 317 317 iov[0].iov_base = sb->sb_rptr; 318 318 iov[1].iov_base = 0; 319 319 iov[1].iov_len = 0; 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 320 if (sb->sb_rptr < sb->sb_wptr) { 321 iov[0].iov_len = sb->sb_wptr - sb->sb_rptr; 322 /* Should never succeed, but... */ 323 if (iov[0].iov_len > len) iov[0].iov_len = len; 324 n = 1; 325 } else { 326 iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr; 327 if (iov[0].iov_len > len) iov[0].iov_len = len; 328 len -= iov[0].iov_len; 329 if (len) { 330 iov[1].iov_base = sb->sb_data; 331 iov[1].iov_len = sb->sb_wptr - sb->sb_data; 332 if (iov[1].iov_len > len) iov[1].iov_len = len; 333 n = 2; 334 } else 335 n = 1; 336 } 337 /* Check if there's urgent data to send, and if so, send it */ 338 338 339 339 #ifdef HAVE_READV 340 341 342 340 nn = writev(so->s, (const struct iovec *)iov, n); 341 342 DEBUG_MISC((dfd, " ... wrote nn = %d bytes\n", nn)); 343 343 #else 344 345 #endif 346 347 348 349 350 351 352 353 354 355 356 344 nn = send(so->s, iov[0].iov_base, iov[0].iov_len,0); 345 #endif 346 /* This should never happen, but people tell me it does *shrug* */ 347 if (nn < 0 && (errno == EAGAIN || errno == EINTR)) 348 return 0; 349 350 if (nn <= 0) { 351 DEBUG_MISC((dfd, " --- sowrite disconnected, so->so_state = %x, errno = %d\n", 352 so->so_state, errno)); 353 sofcantsendmore(so); 354 tcp_sockclosed(pData, sototcpcb(so)); 355 return -1; 356 } 357 357 358 358 #ifndef HAVE_READV 359 359 if (n == 2 && nn == iov[0].iov_len) { 360 360 int ret; 361 361 ret = send(so->s, iov[1].iov_base, iov[1].iov_len,0); … … 366 366 #endif 367 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 368 /* Update sbuf */ 369 sb->sb_cc -= nn; 370 sb->sb_rptr += nn; 371 if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen)) 372 sb->sb_rptr -= sb->sb_datalen; 373 374 /* 375 * If in DRAIN mode, and there's no more data, set 376 * it CANTSENDMORE 377 */ 378 if ((so->so_state & SS_FWDRAIN) && sb->sb_cc == 0) 379 sofcantsendmore(so); 380 381 return nn; 382 382 } 383 383 … … 388 388 sorecvfrom(PNATState pData, struct socket *so) 389 389 { 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 } else {/* A "normal" UDP packet */420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 /*if (m->m_len == len) {469 *m_inc(m, MINCSIZE);470 *m->m_len = 0;471 *}472 473 474 475 476 477 478 479 480 390 struct sockaddr_in addr; 391 socklen_t addrlen = sizeof(struct sockaddr_in); 392 393 DEBUG_CALL("sorecvfrom"); 394 DEBUG_ARG("so = %lx", (long)so); 395 396 if (so->so_type == IPPROTO_ICMP) { /* This is a "ping" reply */ 397 char buff[256]; 398 int len; 399 400 len = recvfrom(so->s, buff, 256, 0, 401 (struct sockaddr *)&addr, &addrlen); 402 /* XXX Check if reply is "correct"? */ 403 404 if(len == -1 || len == 0) { 405 u_char code=ICMP_UNREACH_PORT; 406 407 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; 408 else if(errno == ENETUNREACH) code=ICMP_UNREACH_NET; 409 410 DEBUG_MISC((dfd," udp icmp rx errno = %d-%s\n", 411 errno,strerror(errno))); 412 icmp_error(pData, so->so_m, ICMP_UNREACH,code, 0,strerror(errno)); 413 } else { 414 icmp_reflect(pData, so->so_m); 415 so->so_m = 0; /* Don't m_free() it again! */ 416 } 417 /* No need for this socket anymore, udp_detach it */ 418 udp_detach(pData, so); 419 } else { /* A "normal" UDP packet */ 420 struct mbuf *m; 421 int len, n; 422 423 if (!(m = m_get(pData))) return; 424 m->m_data += if_maxlinkhdr; 425 426 /* 427 * XXX Shouldn't FIONREAD packets destined for port 53, 428 * but I don't know the max packet size for DNS lookups 429 */ 430 len = M_FREEROOM(m); 431 /* if (so->so_fport != htons(53)) { */ 432 ioctlsocket(so->s, FIONREAD, &n); 433 434 if (n > len) { 435 n = (m->m_data - m->m_dat) + m->m_len + n + 1; 436 m_inc(m, n); 437 len = M_FREEROOM(m); 438 } 439 /* } */ 440 441 m->m_len = recvfrom(so->s, m->m_data, len, 0, 442 (struct sockaddr *)&addr, &addrlen); 443 DEBUG_MISC((dfd, " did recvfrom %d, errno = %d-%s\n", 444 m->m_len, errno,strerror(errno))); 445 if(m->m_len<0) { 446 u_char code=ICMP_UNREACH_PORT; 447 448 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; 449 else if(errno == ENETUNREACH) code=ICMP_UNREACH_NET; 450 451 DEBUG_MISC((dfd," rx error, tx icmp ICMP_UNREACH:%i\n", code)); 452 icmp_error(pData, so->so_m, ICMP_UNREACH,code, 0,strerror(errno)); 453 m_free(pData, m); 454 } else { 455 /* 456 * Hack: domain name lookup will be used the most for UDP, 457 * and since they'll only be used once there's no need 458 * for the 4 minute (or whatever) timeout... So we time them 459 * out much quicker (10 seconds for now...) 460 */ 461 if (so->so_expire) { 462 if (so->so_fport == htons(53)) 463 so->so_expire = curtime + SO_EXPIREFAST; 464 else 465 so->so_expire = curtime + SO_EXPIRE; 466 } 467 468 /* if (m->m_len == len) { 469 * m_inc(m, MINCSIZE); 470 * m->m_len = 0; 471 * } 472 */ 473 474 /* 475 * If this packet was destined for CTL_ADDR, 476 * make it look like that's where it came from, done by udp_output 477 */ 478 udp_output(pData, so, m, &addr); 479 } /* rx error */ 480 } /* if ping packet */ 481 481 } 482 482 … … 487 487 sosendto(PNATState pData, struct socket *so, struct mbuf *m) 488 488 { 489 490 489 int ret; 490 struct sockaddr_in addr; 491 491 #if 0 492 492 struct sockaddr_in host_addr; 493 493 #endif 494 494 495 496 497 495 DEBUG_CALL("sosendto"); 496 DEBUG_ARG("so = %lx", (long)so); 497 DEBUG_ARG("m = %lx", (long)m); 498 498 499 499 addr.sin_family = AF_INET; 500 501 500 if ((so->so_faddr.s_addr & htonl(pData->netmask)) == special_addr.s_addr) { 501 /* It's an alias */ 502 502 uint32_t last_byte = ntohl(so->so_faddr.s_addr) & ~pData->netmask; 503 503 switch(last_byte) { 504 504 #if 0 505 505 /* handle this case at 'default:' */ … … 520 520 break; 521 521 #endif 522 522 case CTL_DNS: 523 523 if (!get_dns_addr(pData, &dns_addr)) 524 524 addr.sin_addr = dns_addr; 525 525 else 526 526 addr.sin_addr = loopback_addr; 527 528 529 527 break; 528 case CTL_ALIAS: 529 default: 530 530 if (last_byte == ~pData->netmask) 531 531 addr.sin_addr.s_addr = INADDR_BROADCAST; 532 532 else 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 533 addr.sin_addr = loopback_addr; 534 break; 535 } 536 } else 537 addr.sin_addr = so->so_faddr; 538 addr.sin_port = so->so_fport; 539 540 DEBUG_MISC((dfd, " sendto()ing, addr.sin_port=%d, addr.sin_addr.s_addr=%.16s\n", ntohs(addr.sin_port), inet_ntoa(addr.sin_addr))); 541 542 /* Don't care what port we get */ 543 ret = sendto(so->s, m->m_data, m->m_len, 0, 544 (struct sockaddr *)&addr, sizeof (struct sockaddr)); 545 if (ret < 0) 546 return -1; 547 548 /* 549 * Kill the socket if there's no reply in 4 minutes, 550 * but only if it's an expirable socket 551 */ 552 if (so->so_expire) 553 so->so_expire = curtime + SO_EXPIRE; 554 so->so_state = SS_ISFCONNECTED; /* So that it gets select()ed */ 555 return 0; 556 556 } 557 557 … … 562 562 solisten(PNATState pData, u_int port, u_int32_t laddr, u_int lport, int flags) 563 563 { 564 565 564 struct sockaddr_in addr; 565 struct socket *so; 566 566 socklen_t addrlen = sizeof(addr); 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 567 int s, opt = 1; 568 569 DEBUG_CALL("solisten"); 570 DEBUG_ARG("port = %d", port); 571 DEBUG_ARG("laddr = %x", laddr); 572 DEBUG_ARG("lport = %d", lport); 573 DEBUG_ARG("flags = %x", flags); 574 575 if ((so = socreate()) == NULL) { 576 /* free(so); Not sofree() ??? free(NULL) == NOP */ 577 return NULL; 578 } 579 580 /* Don't tcp_attach... we don't need so_snd nor so_rcv */ 581 if ((so->so_tcpcb = tcp_newtcpcb(pData, so)) == NULL) { 582 free(so); 583 return NULL; 584 } 585 insque(pData, so,&tcb); 586 587 /* 588 * SS_FACCEPTONCE sockets must time out. 589 */ 590 if (flags & SS_FACCEPTONCE) 591 so->so_tcpcb->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT*2; 592 593 so->so_state = (SS_FACCEPTCONN|flags); 594 so->so_lport = lport; /* Kept in network format */ 595 so->so_laddr.s_addr = laddr; /* Ditto */ 596 597 addr.sin_family = AF_INET; 598 addr.sin_addr.s_addr = INADDR_ANY; 599 addr.sin_port = port; 600 601 if (((s = socket(AF_INET,SOCK_STREAM,0)) < 0) || 602 (setsockopt(s,SOL_SOCKET,SO_REUSEADDR,(char *)&opt,sizeof(int)) < 0) || 603 (bind(s,(struct sockaddr *)&addr, sizeof(addr)) < 0) || 604 (listen(s,1) < 0)) { 605 605 #ifdef RT_OS_WINDOWS 606 607 608 609 610 606 int tmperrno = WSAGetLastError(); /* Don't clobber the real reason we failed */ 607 closesocket(s); 608 sofree(pData, so); 609 /* Restore the real errno */ 610 WSASetLastError(tmperrno); 611 611 #else 612 612 int tmperrno = errno; /* Don't clobber the real reason we failed */ 613 614 615 616 617 #endif 618 619 620 621 622 623 624 625 626 627 628 629 630 613 close(s); 614 sofree(pData, so); 615 /* Restore the real errno */ 616 errno = tmperrno; 617 #endif 618 return NULL; 619 } 620 setsockopt(s,SOL_SOCKET,SO_OOBINLINE,(char *)&opt,sizeof(int)); 621 622 getsockname(s,(struct sockaddr *)&addr,&addrlen); 623 so->so_fport = addr.sin_port; 624 if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr) 625 so->so_faddr = alias_addr; 626 else 627 so->so_faddr = addr.sin_addr; 628 629 so->s = s; 630 return so; 631 631 } 632 632 … … 638 638 void 639 639 sorwakeup(so) 640 641 { 642 /* 643 /* 640 struct socket *so; 641 { 642 /* sowrite(so); */ 643 /* FD_CLR(so->s,&writefds); */ 644 644 } 645 645 … … 651 651 void 652 652 sowwakeup(so) 653 654 { 655 653 struct socket *so; 654 { 655 /* Nothing, yet */ 656 656 } 657 657 … … 664 664 void 665 665 soisfconnecting(so) 666 667 { 668 669 670 666 register struct socket *so; 667 { 668 so->so_state &= ~(SS_NOFDREF|SS_ISFCONNECTED|SS_FCANTRCVMORE| 669 SS_FCANTSENDMORE|SS_FWDRAIN); 670 so->so_state |= SS_ISFCONNECTING; /* Clobber other states */ 671 671 } 672 672 … … 675 675 register struct socket *so; 676 676 { 677 678 677 so->so_state &= ~(SS_ISFCONNECTING|SS_FWDRAIN|SS_NOFDREF); 678 so->so_state |= SS_ISFCONNECTED; /* Clobber other states */ 679 679 } 680 680 681 681 void 682 682 sofcantrcvmore(so) 683 684 { 685 686 687 688 689 690 691 692 683 struct socket *so; 684 { 685 if ((so->so_state & SS_NOFDREF) == 0) { 686 shutdown(so->s,0); 687 } 688 so->so_state &= ~(SS_ISFCONNECTING); 689 if (so->so_state & SS_FCANTSENDMORE) 690 so->so_state = SS_NOFDREF; /* Don't select it */ /* XXX close() here as well? */ 691 else 692 so->so_state |= SS_FCANTRCVMORE; 693 693 } 694 694 695 695 void 696 696 sofcantsendmore(so) 697 698 { 699 697 struct socket *so; 698 { 699 if ((so->so_state & SS_NOFDREF) == 0) { 700 700 shutdown(so->s,1); /* send FIN to fhost */ 701 702 703 704 705 706 701 } 702 so->so_state &= ~(SS_ISFCONNECTING); 703 if (so->so_state & SS_FCANTRCVMORE) 704 so->so_state = SS_NOFDREF; /* as above */ 705 else 706 so->so_state |= SS_FCANTSENDMORE; 707 707 } 708 708 709 709 void 710 710 soisfdisconnected(so) 711 712 { 713 /* 714 /* 715 /* 716 717 718 711 struct socket *so; 712 { 713 /* so->so_state &= ~(SS_ISFCONNECTING|SS_ISFCONNECTED); */ 714 /* close(so->s); */ 715 /* so->so_state = SS_ISFDISCONNECTED; */ 716 /* 717 * XXX Do nothing ... ? 718 */ 719 719 } 720 720 … … 725 725 void 726 726 sofwdrain(so) 727 728 { 729 730 731 732 733 } 734 727 struct socket *so; 728 { 729 if (so->so_rcv.sb_cc) 730 so->so_state |= SS_FWDRAIN; 731 else 732 sofcantsendmore(so); 733 } 734 -
trunk/src/VBox/Devices/Network/slirp/socket.h
r14329 r14470 23 23 int s; /* The actual socket */ 24 24 25 26 struct mbuf *so_m; 27 28 29 struct tcpiphdr *so_ti; 30 25 /* XXX union these with not-yet-used sbuf params */ 26 struct mbuf *so_m; /* Pointer to the original SYN packet, 27 * for non-blocking connect()'s, and 28 * PING reply's */ 29 struct tcpiphdr *so_ti; /* Pointer to the original ti within 30 * so_mconn, for non-blocking connections */ 31 31 int so_urgc; 32 struct in_addr so_faddr; 33 struct in_addr so_laddr; 34 u_int16_t so_fport; 35 u_int16_t so_lport; 32 struct in_addr so_faddr; /* foreign host table entry */ 33 struct in_addr so_laddr; /* local host table entry */ 34 u_int16_t so_fport; /* foreign port */ 35 u_int16_t so_lport; /* local port */ 36 36 37 u_int8_t so_iptos;/* Type of service */38 u_int8_t so_emu;/* Is the socket emulated? */37 u_int8_t so_iptos; /* Type of service */ 38 u_int8_t so_emu; /* Is the socket emulated? */ 39 39 40 u_char so_type;/* Type of socket, UDP or TCP */41 int so_state;/* internal state flags SS_*, below */40 u_char so_type; /* Type of socket, UDP or TCP */ 41 int so_state; /* internal state flags SS_*, below */ 42 42 43 struct tcpcb *so_tcpcb;/* pointer to TCP protocol control block */44 u_int so_expire;/* When the socket will expire */43 struct tcpcb *so_tcpcb; /* pointer to TCP protocol control block */ 44 u_int so_expire; /* When the socket will expire */ 45 45 46 int so_queued;/* Number of packets queued from this socket */47 int so_nqueued;/* Number of packets queued in a row48 49 46 int so_queued; /* Number of packets queued from this socket */ 47 int so_nqueued; /* Number of packets queued in a row 48 * Used to determine when to "downgrade" a session 49 * from fastq to batchq */ 50 50 51 struct sbuf so_rcv; 52 struct sbuf so_snd; 53 void * extra; 51 struct sbuf so_rcv; /* Receive buffer */ 52 struct sbuf so_snd; /* Send buffer */ 53 void * extra; /* Extra pointer */ 54 54 }; 55 55 … … 59 59 * local host means the host on the other end of the modem) 60 60 */ 61 #define SS_NOFDREF 0x001/* No fd reference */61 #define SS_NOFDREF 0x001 /* No fd reference */ 62 62 63 #define SS_ISFCONNECTING 0x002/* Socket is connecting to peer (non-blocking connect()'s) */64 #define SS_ISFCONNECTED 0x004/* Socket is connected to peer */65 #define SS_FCANTRCVMORE 0x008/* Socket can't receive more from peer (for half-closes) */66 #define SS_FCANTSENDMORE 0x010/* Socket can't send more to peer (for half-closes) */67 /* #define SS_ISFDISCONNECTED 0x020*//* Socket has disconnected from peer, in 2MSL state */68 #define SS_FWDRAIN 0x040/* We received a FIN, drain data and set SS_FCANTSENDMORE */63 #define SS_ISFCONNECTING 0x002 /* Socket is connecting to peer (non-blocking connect()'s) */ 64 #define SS_ISFCONNECTED 0x004 /* Socket is connected to peer */ 65 #define SS_FCANTRCVMORE 0x008 /* Socket can't receive more from peer (for half-closes) */ 66 #define SS_FCANTSENDMORE 0x010 /* Socket can't send more to peer (for half-closes) */ 67 /* #define SS_ISFDISCONNECTED 0x020*/ /* Socket has disconnected from peer, in 2MSL state */ 68 #define SS_FWDRAIN 0x040 /* We received a FIN, drain data and set SS_FCANTSENDMORE */ 69 69 70 /* #define SS_CTL 71 #define SS_FACCEPTCONN 0x100/* Socket is accepting connections from a host on the internet */72 #define SS_FACCEPTONCE 0x200/* If set, the SS_FACCEPTCONN socket will die after one accept */70 /* #define SS_CTL 0x080 */ 71 #define SS_FACCEPTCONN 0x100 /* Socket is accepting connections from a host on the internet */ 72 #define SS_FACCEPTONCE 0x200 /* If set, the SS_FACCEPTCONN socket will die after one accept */ 73 73 74 74 extern struct socket tcb; … … 77 77 #if defined(DECLARE_IOVEC) && !defined(HAVE_READV) 78 78 struct iovec { 79 80 79 char *iov_base; 80 size_t iov_len; 81 81 }; 82 82 #endif -
trunk/src/VBox/Devices/Network/slirp/tcp_input.c
r14390 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)tcp_input.c8.5 (Berkeley) 4/10/9433 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94 34 34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp 35 35 */ … … 47 47 48 48 49 #define TCP_PAWS_IDLE 49 #define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ) 50 50 51 51 /* for modulo comparisons of timestamps */ 52 #define TSTMP_LT(a,b) 53 #define TSTMP_GEQ(a,b) 52 #define TSTMP_LT(a,b) ((int)((a)-(b)) < 0) 53 #define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0) 54 54 55 55 #ifndef VBOX_WITH_BSD_TCP_REASS … … 78 78 tcpstat.tcps_rcvbyte += (ti)->ti_len;\ 79 79 if (so->so_emu) { \ 80 81 82 80 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \ 81 } else \ 82 sbappend((pData), (so), (m)); \ 83 83 /* sorwakeup(so); */ \ 84 84 } else {\ 85 85 (flags) = tcp_reass((pData), (tp), (ti), (m)); \ 86 86 tp->t_flags |= TF_ACKNOW; \ … … 88 88 } 89 89 #else 90 #define 91 92 93 94 95 96 97 98 99 100 101 102 103 /* 104 105 106 107 90 #define TCP_REASS(pData, tp, ti, m, so, flags) { \ 91 if ((ti)->ti_seq == (tp)->rcv_nxt && \ 92 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \ 93 (tp)->t_state == TCPS_ESTABLISHED) { \ 94 tp->t_flags |= TF_DELACK; \ 95 (tp)->rcv_nxt += (ti)->ti_len; \ 96 flags = (ti)->ti_flags & TH_FIN; \ 97 tcpstat.tcps_rcvpack++;\ 98 tcpstat.tcps_rcvbyte += (ti)->ti_len;\ 99 if (so->so_emu) { \ 100 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \ 101 } else \ 102 sbappend((pData), (so), (m)); \ 103 /* sorwakeup(so); */ \ 104 } else { \ 105 (flags) = tcp_reass((pData), (tp), (ti), (m)); \ 106 tp->t_flags |= TF_ACKNOW; \ 107 } \ 108 108 } 109 109 #endif … … 112 112 tcp_reass(PNATState pData, register struct tcpcb *tp, register struct tcpiphdr *ti, struct mbuf *m) 113 113 { 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 114 register struct tcpiphdr *q; 115 struct socket *so = tp->t_socket; 116 int flags; 117 118 /* 119 * Call with ti==0 after become established to 120 * force pre-ESTABLISHED data up to user socket. 121 */ 122 if (ti == 0) 123 goto present; 124 125 /* 126 * Find a segment which begins after this one does. 127 */ 128 for (q = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *); q != (struct tcpiphdr *)tp; 129 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *)) 130 if (SEQ_GT(q->ti_seq, ti->ti_seq)) 131 break; 132 133 /* 134 * If there is a preceding segment, it may provide some of 135 * our data already. If so, drop the data from the incoming 136 * segment. If it provides all of our data, drop us. 137 */ 138 if (u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *) != (struct tcpiphdr *)tp) { 139 register int i; 140 q = u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *); 141 /* conversion to int (in i) handles seq wraparound */ 142 i = q->ti_seq + q->ti_len - ti->ti_seq; 143 if (i > 0) { 144 if (i >= ti->ti_len) { 145 tcpstat.tcps_rcvduppack++; 146 tcpstat.tcps_rcvdupbyte += ti->ti_len; 147 m_freem(pData, m); 148 /* 149 * Try to present any queued data 150 * at the left window edge to the user. 151 * This is needed after the 3-WHS 152 * completes. 153 */ 154 goto present; /* ??? */ 155 } 156 m_adj(m, i); 157 ti->ti_len -= i; 158 ti->ti_seq += i; 159 } 160 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *); 161 } 162 tcpstat.tcps_rcvoopack++; 163 tcpstat.tcps_rcvoobyte += ti->ti_len; 164 REASS_MBUF_SET(ti, m); /* XXX */ 165 166 /* 167 * While we overlap succeeding segments trim them or, 168 * if they are completely covered, dequeue them. 169 */ 170 while (q != (struct tcpiphdr *)tp) { 171 register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq; 172 if (i <= 0) 173 break; 174 if (i < q->ti_len) { 175 q->ti_seq += i; 176 q->ti_len -= i; 177 m_adj(REASS_MBUF_GET(q), i); 178 break; 179 } 180 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *); 181 m = REASS_MBUF_GET(u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *)); 182 remque_32(pData, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *)); 183 m_freem(pData, m); 184 } 185 186 /* 187 * Stick new segment in its place. 188 */ 189 insque_32(pData, ti, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *)); 190 190 191 191 present: 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 /* 210 192 /* 193 * Present data to user, advancing rcv_nxt through 194 * completed sequence space. 195 */ 196 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 197 return (0); 198 ti = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *); 199 if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt) 200 return (0); 201 if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len) 202 return (0); 203 do { 204 tp->rcv_nxt += ti->ti_len; 205 flags = ti->ti_flags & TH_FIN; 206 remque_32(pData, ti); 207 m = REASS_MBUF_GET(ti); /* XXX */ 208 ti = u32_to_ptr(pData, ti->ti_next, struct tcpiphdr *); 209 /* if (so->so_state & SS_FCANTRCVMORE) */ 210 if (so->so_state & SS_FCANTSENDMORE) 211 211 m_freem(pData, m); 212 213 214 215 216 217 218 219 /* 220 212 else { 213 if (so->so_emu) { 214 if (tcp_emu(pData, so,m)) sbappend(pData, so, m); 215 } else 216 sbappend(pData, so, m); 217 } 218 } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt); 219 /* sorwakeup(so); */ 220 return (flags); 221 221 } 222 222 … … 231 231 #else /* !TCP_ACK_HACK */ 232 232 #define DELAY_ACK(tp, ign) \ 233 233 tp->t_flags |= TF_DELACK; 234 234 #endif /* TCP_ACK_HACK */ 235 235 … … 243 243 tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 244 244 { 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 goto present;/* ??? */329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 245 struct tseg_qent *q; 246 struct tseg_qent *p = NULL; 247 struct tseg_qent *nq; 248 struct tseg_qent *te = NULL; 249 struct socket *so = tp->t_socket; 250 int flags; 251 252 /* 253 * XXX: tcp_reass() is rather inefficient with its data structures 254 * and should be rewritten (see NetBSD for optimizations). While 255 * doing that it should move to its own file tcp_reass.c. 256 */ 257 258 /* 259 * Call with th==NULL after become established to 260 * force pre-ESTABLISHED data up to user socket. 261 */ 262 if (th == NULL) 263 goto present; 264 265 /* 266 * Limit the number of segments in the reassembly queue to prevent 267 * holding on to too many segments (and thus running out of mbufs). 268 * Make sure to let the missing segment through which caused this 269 * queue. Always keep one global queue entry spare to be able to 270 * process the missing segment. 271 */ 272 if (th->th_seq != tp->rcv_nxt && 273 (tcp_reass_qsize + 1 >= tcp_reass_maxseg || 274 tp->t_segqlen >= tcp_reass_maxqlen)) { 275 tcp_reass_overflows++; 276 tcpstat.tcps_rcvmemdrop++; 277 m_freem(pData, m); 278 *tlenp = 0; 279 return (0); 280 } 281 282 /* 283 * Allocate a new queue entry. If we can't, or hit the zone limit 284 * just drop the pkt. 285 */ 286 te = malloc(sizeof(struct tseg_qent)); 287 if (te == NULL) { 288 tcpstat.tcps_rcvmemdrop++; 289 m_freem(pData, m); 290 *tlenp = 0; 291 return (0); 292 } 293 tp->t_segqlen++; 294 tcp_reass_qsize++; 295 296 /* 297 * Find a segment which begins after this one does. 298 */ 299 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 300 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 301 break; 302 p = q; 303 } 304 305 /* 306 * If there is a preceding segment, it may provide some of 307 * our data already. If so, drop the data from the incoming 308 * segment. If it provides all of our data, drop us. 309 */ 310 if (p != NULL) { 311 int i; 312 /* conversion to int (in i) handles seq wraparound */ 313 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 314 if (i > 0) { 315 if (i >= *tlenp) { 316 tcpstat.tcps_rcvduppack++; 317 tcpstat.tcps_rcvdupbyte += *tlenp; 318 m_freem(pData, m); 319 free(te); 320 tp->t_segqlen--; 321 tcp_reass_qsize--; 322 /* 323 * Try to present any queued data 324 * at the left window edge to the user. 325 * This is needed after the 3-WHS 326 * completes. 327 */ 328 goto present; /* ??? */ 329 } 330 m_adj(m, i); 331 *tlenp -= i; 332 th->th_seq += i; 333 } 334 } 335 tcpstat.tcps_rcvoopack++; 336 tcpstat.tcps_rcvoobyte += *tlenp; 337 338 /* 339 * While we overlap succeeding segments trim them or, 340 * if they are completely covered, dequeue them. 341 */ 342 while (q) { 343 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 344 if (i <= 0) 345 break; 346 if (i < q->tqe_len) { 347 q->tqe_th->th_seq += i; 348 q->tqe_len -= i; 349 m_adj(q->tqe_m, i); 350 break; 351 } 352 353 nq = LIST_NEXT(q, tqe_q); 354 LIST_REMOVE(q, tqe_q); 355 m_freem(pData, q->tqe_m); 356 free(q); 357 tp->t_segqlen--; 358 tcp_reass_qsize--; 359 q = nq; 360 } 361 362 /* Insert the new segment queue entry into place. */ 363 te->tqe_m = m; 364 te->tqe_th = th; 365 te->tqe_len = *tlenp; 366 367 if (p == NULL) { 368 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 369 } else { 370 LIST_INSERT_AFTER(p, te, tqe_q); 371 } 372 372 373 373 present: 374 375 376 377 378 379 380 381 382 383 384 385 386 387 374 /* 375 * Present data to user, advancing rcv_nxt through 376 * completed sequence space. 377 */ 378 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 379 return (0); 380 q = LIST_FIRST(&tp->t_segq); 381 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) 382 return (0); 383 do { 384 tp->rcv_nxt += q->tqe_len; 385 flags = q->tqe_th->th_flags & TH_FIN; 386 nq = LIST_NEXT(q, tqe_q); 387 LIST_REMOVE(q, tqe_q); 388 388 /* XXX: This place should be checked for the same code in 389 389 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE 390 390 */ 391 392 393 394 395 396 397 398 399 400 391 if (so->so_state & SS_FCANTSENDMORE) 392 m_freem(pData, q->tqe_m); 393 else 394 sbappend(pData, so, q->tqe_m); 395 free(q); 396 tp->t_segqlen--; 397 tcp_reass_qsize--; 398 q = nq; 399 } while (q && q->tqe_th->th_seq == tp->rcv_nxt); 400 return (flags); 401 401 } 402 402 #endif /* VBOX_WITH_BSD_TCP_REASS */ … … 409 409 tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso) 410 410 { 411 412 413 414 415 416 417 418 419 420 /* 421 422 423 424 /* 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 * pull out TCP options and adjust length.XXX489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 /* 509 * 510 * 511 * 512 * 513 * 514 * 515 * 516 * 517 * 518 */ 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 411 struct ip save_ip, *ip; 412 register struct tcpiphdr *ti; 413 caddr_t optp = NULL; 414 int optlen = 0; 415 int len, tlen, off; 416 register struct tcpcb *tp = 0; 417 register int tiflags; 418 struct socket *so = 0; 419 int todrop, acked, ourfinisacked, needoutput = 0; 420 /* int dropsocket = 0; */ 421 int iss = 0; 422 u_long tiwin; 423 int ret; 424 /* int ts_present = 0; */ 425 int mbuf_freed = 0; 426 427 DEBUG_CALL("tcp_input"); 428 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n", 429 (long )m, iphlen, (long )inso )); 430 431 /* 432 * If called with m == 0, then we're continuing the connect 433 */ 434 if (m == NULL) { 435 so = inso; 436 437 /* Re-set a few variables */ 438 tp = sototcpcb(so); 439 m = so->so_m; 440 so->so_m = 0; 441 ti = so->so_ti; 442 tiwin = ti->ti_win; 443 tiflags = ti->ti_flags; 444 445 goto cont_conn; 446 } 447 448 449 tcpstat.tcps_rcvtotal++; 450 /* 451 * Get IP and TCP header together in first mbuf. 452 * Note: IP leaves IP header in first mbuf. 453 */ 454 ti = mtod(m, struct tcpiphdr *); 455 if (iphlen > sizeof(struct ip )) { 456 ip_stripoptions(m, (struct mbuf *)0); 457 iphlen=sizeof(struct ip ); 458 } 459 /* XXX Check if too short */ 460 461 462 /* 463 * Save a copy of the IP header in case we want restore it 464 * for sending an ICMP error message in response. 465 */ 466 ip=mtod(m, struct ip *); 467 save_ip = *ip; 468 save_ip.ip_len+= iphlen; 469 470 /* 471 * Checksum extended TCP header and data. 472 */ 473 tlen = ((struct ip *)ti)->ip_len; 474 ti->ti_next = ti->ti_prev = 0; 475 ti->ti_x1 = 0; 476 ti->ti_len = htons((u_int16_t)tlen); 477 len = sizeof(struct ip ) + tlen; 478 /* keep checksum for ICMP reply 479 * ti->ti_sum = cksum(m, len); 480 * if (ti->ti_sum) { */ 481 if(cksum(m, len)) { 482 tcpstat.tcps_rcvbadsum++; 483 goto drop; 484 } 485 486 /* 487 * Check that TCP offset makes sense, 488 * pull out TCP options and adjust length. XXX 489 */ 490 off = ti->ti_off << 2; 491 if (off < sizeof (struct tcphdr) || off > tlen) { 492 tcpstat.tcps_rcvbadoff++; 493 goto drop; 494 } 495 tlen -= off; 496 ti->ti_len = tlen; 497 if (off > sizeof (struct tcphdr)) { 498 optlen = off - sizeof (struct tcphdr); 499 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr); 500 501 /* 502 * Do quick retrieval of timestamp options ("options 503 * prediction?"). If timestamp is the only option and it's 504 * formatted as recommended in RFC 1323 appendix A, we 505 * quickly get the values now and not bother calling 506 * tcp_dooptions(), etc. 507 */ 508 /* if ((optlen == TCPOLEN_TSTAMP_APPA || 509 * (optlen > TCPOLEN_TSTAMP_APPA && 510 * optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && 511 * *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && 512 * (ti->ti_flags & TH_SYN) == 0) { 513 * ts_present = 1; 514 * ts_val = ntohl(*(u_int32_t *)(optp + 4)); 515 * ts_ecr = ntohl(*(u_int32_t *)(optp + 8)); 516 * optp = NULL; / * we've parsed the options * / 517 * } 518 */ 519 } 520 tiflags = ti->ti_flags; 521 522 /* 523 * Convert TCP protocol specific fields to host format. 524 */ 525 NTOHL(ti->ti_seq); 526 NTOHL(ti->ti_ack); 527 NTOHS(ti->ti_win); 528 NTOHS(ti->ti_urp); 529 530 /* 531 * Drop TCP, IP headers and TCP options. 532 */ 533 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 534 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 535 536 /* 537 * Locate pcb for segment. 538 */ 539 539 findso: 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 /*tcp_last_so = so; */ /* XXX ? */580 /*tp = sototcpcb(so); */581 582 583 584 585 586 587 588 589 590 591 592 540 so = tcp_last_so; 541 if (so->so_fport != ti->ti_dport || 542 so->so_lport != ti->ti_sport || 543 so->so_laddr.s_addr != ti->ti_src.s_addr || 544 so->so_faddr.s_addr != ti->ti_dst.s_addr) { 545 so = solookup(&tcb, ti->ti_src, ti->ti_sport, 546 ti->ti_dst, ti->ti_dport); 547 if (so) 548 tcp_last_so = so; 549 ++tcpstat.tcps_socachemiss; 550 } 551 552 /* 553 * If the state is CLOSED (i.e., TCB does not exist) then 554 * all data in the incoming segment is discarded. 555 * If the TCB exists but is in CLOSED state, it is embryonic, 556 * but should either do a listen or a connect soon. 557 * 558 * state == CLOSED means we've done socreate() but haven't 559 * attached it to a protocol yet... 560 * 561 * XXX If a TCB does not exist, and the TH_SYN flag is 562 * the only flag set, then create a session, mark it 563 * as if it was LISTENING, and continue... 564 */ 565 if (so == 0) { 566 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN) 567 goto dropwithreset; 568 569 if ((so = socreate()) == NULL) 570 goto dropwithreset; 571 if (tcp_attach(pData, so) < 0) { 572 free(so); /* Not sofree (if it failed, it's not insqued) */ 573 goto dropwithreset; 574 } 575 576 sbreserve(&so->so_snd, tcp_sndspace); 577 sbreserve(&so->so_rcv, tcp_rcvspace); 578 579 /* tcp_last_so = so; */ /* XXX ? */ 580 /* tp = sototcpcb(so); */ 581 582 so->so_laddr = ti->ti_src; 583 so->so_lport = ti->ti_sport; 584 so->so_faddr = ti->ti_dst; 585 so->so_fport = ti->ti_dport; 586 587 if ((so->so_iptos = tcp_tos(so)) == 0) 588 so->so_iptos = ((struct ip *)ti)->ip_tos; 589 590 tp = sototcpcb(so); 591 tp->t_state = TCPS_LISTEN; 592 } 593 593 594 594 /* 595 595 * If this is a still-connecting socket, this probably 596 596 * a retransmit of the SYN. Whether it's a retransmit SYN 597 597 * or something else, we nuke it. 598 598 */ 599 599 if (so->so_state & SS_ISFCONNECTING) 600 600 goto drop; 601 601 602 603 604 605 606 607 608 609 610 611 /* 612 * 613 * 614 */ 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 602 tp = sototcpcb(so); 603 604 /* XXX Should never fail */ 605 if (tp == 0) 606 goto dropwithreset; 607 if (tp->t_state == TCPS_CLOSED) 608 goto drop; 609 610 /* Unscale the window into a 32-bit value. */ 611 /* if ((tiflags & TH_SYN) == 0) 612 * tiwin = ti->ti_win << tp->snd_scale; 613 * else 614 */ 615 tiwin = ti->ti_win; 616 617 /* 618 * Segment received on connection. 619 * Reset idle time and keep-alive timer. 620 */ 621 tp->t_idle = 0; 622 if (so_options) 623 tp->t_timer[TCPT_KEEP] = tcp_keepintvl; 624 else 625 tp->t_timer[TCPT_KEEP] = tcp_keepidle; 626 627 /* 628 * Process options if not in LISTEN state, 629 * else do it below (after getting remote address). 630 */ 631 if (optp && tp->t_state != TCPS_LISTEN) 632 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti); 633 633 /* , */ 634 /* 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 /* 657 658 659 660 661 662 663 664 /* 665 * 666 * 667 * 668 * 669 */ 670 671 672 673 674 675 676 677 678 /* 679 * 680 * 681 */ 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 /* 710 * 711 */ 712 713 714 715 716 717 718 719 720 721 722 634 /* &ts_present, &ts_val, &ts_ecr); */ 635 636 /* 637 * Header prediction: check for the two common cases 638 * of a uni-directional data xfer. If the packet has 639 * no control flags, is in-sequence, the window didn't 640 * change and we're not retransmitting, it's a 641 * candidate. If the length is zero and the ack moved 642 * forward, we're the sender side of the xfer. Just 643 * free the data acked & wake any higher level process 644 * that was blocked waiting for space. If the length 645 * is non-zero and the ack didn't move, we're the 646 * receiver side. If we're getting packets in-order 647 * (the reassembly queue is empty), add the data to 648 * the socket buffer and note that we need a delayed ack. 649 * 650 * XXX Some of these tests are not needed 651 * eg: the tiwin == tp->snd_wnd prevents many more 652 * predictions.. with no *real* advantage.. 653 */ 654 if (tp->t_state == TCPS_ESTABLISHED && 655 (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 656 /* (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) && */ 657 ti->ti_seq == tp->rcv_nxt && 658 tiwin && tiwin == tp->snd_wnd && 659 tp->snd_nxt == tp->snd_max) { 660 /* 661 * If last ACK falls within this segment's sequence numbers, 662 * record the timestamp. 663 */ 664 /* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) && 665 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len)) { 666 * tp->ts_recent_age = tcp_now; 667 * tp->ts_recent = ts_val; 668 * } 669 */ 670 if (ti->ti_len == 0) { 671 if (SEQ_GT(ti->ti_ack, tp->snd_una) && 672 SEQ_LEQ(ti->ti_ack, tp->snd_max) && 673 tp->snd_cwnd >= tp->snd_wnd) { 674 /* 675 * this is a pure ack for outstanding data. 676 */ 677 ++tcpstat.tcps_predack; 678 /* if (ts_present) 679 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1); 680 * else 681 */ if (tp->t_rtt && 682 SEQ_GT(ti->ti_ack, tp->t_rtseq)) 683 tcp_xmit_timer(pData, tp, tp->t_rtt); 684 acked = ti->ti_ack - tp->snd_una; 685 tcpstat.tcps_rcvackpack++; 686 tcpstat.tcps_rcvackbyte += acked; 687 sbdrop(&so->so_snd, acked); 688 tp->snd_una = ti->ti_ack; 689 m_freem(pData, m); 690 691 /* 692 * If all outstanding data are acked, stop 693 * retransmit timer, otherwise restart timer 694 * using current (possibly backed-off) value. 695 * If process is waiting for space, 696 * wakeup/selwakeup/signal. If data 697 * are ready to send, let tcp_output 698 * decide between more output or persist. 699 */ 700 if (tp->snd_una == tp->snd_max) 701 tp->t_timer[TCPT_REXMT] = 0; 702 else if (tp->t_timer[TCPT_PERSIST] == 0) 703 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; 704 705 /* 706 * There's room in so_snd, sowwakup will read() 707 * from the socket if we can 708 */ 709 /* if (so->so_snd.sb_flags & SB_NOTIFY) 710 * sowwakeup(so); 711 */ 712 /* 713 * This is called because sowwakeup might have 714 * put data into so_snd. Since we don't so sowwakeup, 715 * we don't need this.. XXX??? 716 */ 717 if (so->so_snd.sb_cc) 718 (void) tcp_output(pData, tp); 719 720 return; 721 } 722 } else if (ti->ti_ack == tp->snd_una && 723 723 #ifndef VBOX_WITH_BSD_TCP_REASS 724 724 u32_to_ptr(pData, tp->seg_next, struct tcpcb *) == tp && 725 725 #else /* VBOX_WITH_BSD_TCP_REASS */ 726 726 LIST_FIRST(&tp->t_segq) && 727 727 #endif /* VBOX_WITH_BSD_TCP_REASS */ 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 /* 752 753 754 755 *congestion avoidance sender won't send more until756 *he gets an ACK.757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 728 ti->ti_len <= sbspace(&so->so_rcv)) { 729 /* 730 * this is a pure, in-sequence data packet 731 * with nothing on the reassembly queue and 732 * we have enough buffer space to take it. 733 */ 734 ++tcpstat.tcps_preddat; 735 tp->rcv_nxt += ti->ti_len; 736 tcpstat.tcps_rcvpack++; 737 tcpstat.tcps_rcvbyte += ti->ti_len; 738 /* 739 * Add data to socket buffer. 740 */ 741 if (so->so_emu) { 742 if (tcp_emu(pData, so,m)) sbappend(pData, so, m); 743 } else 744 sbappend(pData, so, m); 745 746 /* 747 * XXX This is called when data arrives. Later, check 748 * if we can actually write() to the socket 749 * XXX Need to check? It's be NON_BLOCKING 750 */ 751 /* sorwakeup(so); */ 752 753 /* 754 * If this is a short packet, then ACK now - with Nagel 755 * congestion avoidance sender won't send more until 756 * he gets an ACK. 757 * 758 * It is better to not delay acks at all to maximize 759 * TCP throughput. See RFC 2581. 760 */ 761 tp->t_flags |= TF_ACKNOW; 762 tcp_output(pData, tp); 763 return; 764 } 765 } /* header prediction */ 766 /* 767 * Calculate amount of space in receive window, 768 * and then do TCP input processing. 769 * Receive window is amount of space in rcv queue, 770 * but not less than advertised window. 771 */ 772 { int win; 773 773 win = sbspace(&so->so_rcv); 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 /*&ts_present, &ts_val, &ts_ecr); */863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 *if seg contains an ACK, but not for our SYN, drop the input.882 *if seg contains a RST, then drop the connection.883 *if seg does not contain SYN, then drop it.884 885 *initialize tp->rcv_nxt and tp->irs886 *if seg contains ack then advance tp->snd_una887 *if SYN has been acked change to ESTABLISHED else SYN_RCVD state888 *arrange for segment to be acked (eventually)889 *continue processing rest of data/controls, beginning with URG890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 /* 922 * 923 * 924 * 925 * 774 if (win < 0) 775 win = 0; 776 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 777 } 778 779 switch (tp->t_state) { 780 781 /* 782 * If the state is LISTEN then ignore segment if it contains an RST. 783 * If the segment contains an ACK then it is bad and send a RST. 784 * If it does not contain a SYN then it is not interesting; drop it. 785 * Don't bother responding if the destination was a broadcast. 786 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial 787 * tp->iss, and send a segment: 788 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 789 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. 790 * Fill in remote peer address fields if not previously specified. 791 * Enter SYN_RECEIVED state, and process any other fields of this 792 * segment in this state. 793 */ 794 case TCPS_LISTEN: { 795 796 if (tiflags & TH_RST) 797 goto drop; 798 if (tiflags & TH_ACK) 799 goto dropwithreset; 800 if ((tiflags & TH_SYN) == 0) 801 goto drop; 802 803 /* 804 * This has way too many gotos... 805 * But a bit of spaghetti code never hurt anybody :) 806 */ 807 808 if (so->so_emu & EMU_NOCONNECT) { 809 so->so_emu &= ~EMU_NOCONNECT; 810 goto cont_input; 811 } 812 813 if((tcp_fconnect(pData, so) == -1) && (errno != EINPROGRESS) && (errno != EWOULDBLOCK)) { 814 u_char code=ICMP_UNREACH_NET; 815 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n", 816 errno,strerror(errno))); 817 if(errno == ECONNREFUSED) { 818 /* ACK the SYN, send RST to refuse the connection */ 819 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0, 820 TH_RST|TH_ACK); 821 } else { 822 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; 823 HTONL(ti->ti_seq); /* restore tcp header */ 824 HTONL(ti->ti_ack); 825 HTONS(ti->ti_win); 826 HTONS(ti->ti_urp); 827 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 828 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 829 *ip=save_ip; 830 icmp_error(pData, m, ICMP_UNREACH,code, 0,strerror(errno)); 831 } 832 tp = tcp_close(pData, tp); 833 m_free(pData, m); 834 } else { 835 /* 836 * Haven't connected yet, save the current mbuf 837 * and ti, and return 838 * XXX Some OS's don't tell us whether the connect() 839 * succeeded or not. So we must time it out. 840 */ 841 so->so_m = m; 842 so->so_ti = ti; 843 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; 844 tp->t_state = TCPS_SYN_RECEIVED; 845 } 846 return; 847 848 cont_conn: 849 /* m==NULL 850 * Check if the connect succeeded 851 */ 852 if (so->so_state & SS_NOFDREF) { 853 tp = tcp_close(pData, tp); 854 goto dropwithreset; 855 } 856 cont_input: 857 tcp_template(tp); 858 859 if (optp) 860 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti); 861 /* , */ 862 /* &ts_present, &ts_val, &ts_ecr); */ 863 864 if (iss) 865 tp->iss = iss; 866 else 867 tp->iss = tcp_iss; 868 tcp_iss += TCP_ISSINCR/2; 869 tp->irs = ti->ti_seq; 870 tcp_sendseqinit(tp); 871 tcp_rcvseqinit(tp); 872 tp->t_flags |= TF_ACKNOW; 873 tp->t_state = TCPS_SYN_RECEIVED; 874 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; 875 tcpstat.tcps_accepts++; 876 goto trimthenstep6; 877 } /* case TCPS_LISTEN */ 878 879 /* 880 * If the state is SYN_SENT: 881 * if seg contains an ACK, but not for our SYN, drop the input. 882 * if seg contains a RST, then drop the connection. 883 * if seg does not contain SYN, then drop it. 884 * Otherwise this is an acceptable SYN segment 885 * initialize tp->rcv_nxt and tp->irs 886 * if seg contains ack then advance tp->snd_una 887 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 888 * arrange for segment to be acked (eventually) 889 * continue processing rest of data/controls, beginning with URG 890 */ 891 case TCPS_SYN_SENT: 892 if ((tiflags & TH_ACK) && 893 (SEQ_LEQ(ti->ti_ack, tp->iss) || 894 SEQ_GT(ti->ti_ack, tp->snd_max))) 895 goto dropwithreset; 896 897 if (tiflags & TH_RST) { 898 if (tiflags & TH_ACK) 899 tp = tcp_drop(pData, tp,0); /* XXX Check t_softerror! */ 900 goto drop; 901 } 902 903 if ((tiflags & TH_SYN) == 0) 904 goto drop; 905 if (tiflags & TH_ACK) { 906 tp->snd_una = ti->ti_ack; 907 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 908 tp->snd_nxt = tp->snd_una; 909 } 910 911 tp->t_timer[TCPT_REXMT] = 0; 912 tp->irs = ti->ti_seq; 913 tcp_rcvseqinit(tp); 914 tp->t_flags |= TF_ACKNOW; 915 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) { 916 tcpstat.tcps_connects++; 917 soisfconnected(so); 918 tp->t_state = TCPS_ESTABLISHED; 919 920 /* Do window scaling on this connection? */ 921 /* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 922 * (TF_RCVD_SCALE|TF_REQ_SCALE)) { 923 * tp->snd_scale = tp->requested_s_scale; 924 * tp->rcv_scale = tp->request_r_scale; 925 * } 926 926 */ 927 927 #ifndef VBOX_WITH_BSD_TCP_REASS 928 929 928 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0, 929 (struct mbuf *)0); 930 930 #else /* VBOX_WITH_BSD_TCP_REASS */ 931 931 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0); 932 932 #endif /* VBOX_WITH_BSD_TCP_REASS */ 933 934 935 936 937 938 939 940 933 /* 934 * if we didn't have to retransmit the SYN, 935 * use its rtt as our initial srtt & rtt var. 936 */ 937 if (tp->t_rtt) 938 tcp_xmit_timer(pData, tp, tp->t_rtt); 939 } else 940 tp->t_state = TCPS_SYN_RECEIVED; 941 941 942 942 trimthenstep6: 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 /* 972 * 943 /* 944 * Advance ti->ti_seq to correspond to first data byte. 945 * If data, trim to stay within window, 946 * dropping FIN if necessary. 947 */ 948 ti->ti_seq++; 949 if (ti->ti_len > tp->rcv_wnd) { 950 todrop = ti->ti_len - tp->rcv_wnd; 951 m_adj(m, -todrop); 952 ti->ti_len = tp->rcv_wnd; 953 tiflags &= ~TH_FIN; 954 tcpstat.tcps_rcvpackafterwin++; 955 tcpstat.tcps_rcvbyteafterwin += todrop; 956 } 957 tp->snd_wl1 = ti->ti_seq - 1; 958 tp->rcv_up = ti->ti_seq; 959 goto step6; 960 } /* switch tp->t_state */ 961 /* 962 * States other than LISTEN or SYN_SENT. 963 * First check timestamp, if present. 964 * Then check that at least some bytes of segment are within 965 * receive window. If segment begins before rcv_nxt, 966 * drop leading data (and SYN); if nothing left, just ack. 967 * 968 * RFC 1323 PAWS: If we have a timestamp reply on this segment 969 * and it's less than ts_recent, drop it. 970 */ 971 /* if (ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent && 972 * TSTMP_LT(ts_val, tp->ts_recent)) { 973 973 * 974 */ 975 /* 976 */ 977 * 978 * 979 * 980 * 981 * 982 * 983 * 984 * 985 * 986 * 987 /* 988 * 989 * 990 * 991 * 992 * 993 * 994 * 995 */ 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 /* 1099 * 1100 * 1101 * 1102 * 1103 * 1104 */ 1105 1106 1107 1108 1109 *If passive open, return to LISTEN state.1110 *If active open, inform user that connection was refused.1111 1112 *Inform user that connection was reset, and close tcb.1113 1114 *Close the tcb.1115 1116 1117 1118 1119 /* 1120 1121 1122 1123 1124 1125 1126 /* 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 974 */ /* Check to see if ts_recent is over 24 days old. */ 975 /* if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) { 976 */ /* 977 * * Invalidate ts_recent. If this segment updates 978 * * ts_recent, the age will be reset later and ts_recent 979 * * will get a valid value. If it does not, setting 980 * * ts_recent to zero will at least satisfy the 981 * * requirement that zero be placed in the timestamp 982 * * echo reply when ts_recent isn't valid. The 983 * * age isn't reset until we get a valid ts_recent 984 * * because we don't want out-of-order segments to be 985 * * dropped when ts_recent is old. 986 * */ 987 /* tp->ts_recent = 0; 988 * } else { 989 * tcpstat.tcps_rcvduppack++; 990 * tcpstat.tcps_rcvdupbyte += ti->ti_len; 991 * tcpstat.tcps_pawsdrop++; 992 * goto dropafterack; 993 * } 994 * } 995 */ 996 997 todrop = tp->rcv_nxt - ti->ti_seq; 998 if (todrop > 0) { 999 if (tiflags & TH_SYN) { 1000 tiflags &= ~TH_SYN; 1001 ti->ti_seq++; 1002 if (ti->ti_urp > 1) 1003 ti->ti_urp--; 1004 else 1005 tiflags &= ~TH_URG; 1006 todrop--; 1007 } 1008 /* 1009 * Following if statement from Stevens, vol. 2, p. 960. 1010 */ 1011 if (todrop > ti->ti_len 1012 || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) { 1013 /* 1014 * Any valid FIN must be to the left of the window. 1015 * At this point the FIN must be a duplicate or out 1016 * of sequence; drop it. 1017 */ 1018 tiflags &= ~TH_FIN; 1019 1020 /* 1021 * Send an ACK to resynchronize and drop any data. 1022 * But keep on processing for RST or ACK. 1023 */ 1024 tp->t_flags |= TF_ACKNOW; 1025 todrop = ti->ti_len; 1026 tcpstat.tcps_rcvduppack++; 1027 tcpstat.tcps_rcvdupbyte += todrop; 1028 } else { 1029 tcpstat.tcps_rcvpartduppack++; 1030 tcpstat.tcps_rcvpartdupbyte += todrop; 1031 } 1032 m_adj(m, todrop); 1033 ti->ti_seq += todrop; 1034 ti->ti_len -= todrop; 1035 if (ti->ti_urp > todrop) 1036 ti->ti_urp -= todrop; 1037 else { 1038 tiflags &= ~TH_URG; 1039 ti->ti_urp = 0; 1040 } 1041 } 1042 /* 1043 * If new data are received on a connection after the 1044 * user processes are gone, then RST the other end. 1045 */ 1046 if ((so->so_state & SS_NOFDREF) && 1047 tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) { 1048 tp = tcp_close(pData, tp); 1049 tcpstat.tcps_rcvafterclose++; 1050 goto dropwithreset; 1051 } 1052 1053 /* 1054 * If segment ends after window, drop trailing data 1055 * (and PUSH and FIN); if nothing left, just ACK. 1056 */ 1057 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd); 1058 if (todrop > 0) { 1059 tcpstat.tcps_rcvpackafterwin++; 1060 if (todrop >= ti->ti_len) { 1061 tcpstat.tcps_rcvbyteafterwin += ti->ti_len; 1062 /* 1063 * If a new connection request is received 1064 * while in TIME_WAIT, drop the old connection 1065 * and start over if the sequence numbers 1066 * are above the previous ones. 1067 */ 1068 if (tiflags & TH_SYN && 1069 tp->t_state == TCPS_TIME_WAIT && 1070 SEQ_GT(ti->ti_seq, tp->rcv_nxt)) { 1071 iss = tp->rcv_nxt + TCP_ISSINCR; 1072 tp = tcp_close(pData, tp); 1073 goto findso; 1074 } 1075 /* 1076 * If window is closed can only take segments at 1077 * window edge, and have to drop data and PUSH from 1078 * incoming segments. Continue processing, but 1079 * remember to ack. Otherwise, drop segment 1080 * and ack. 1081 */ 1082 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) { 1083 tp->t_flags |= TF_ACKNOW; 1084 tcpstat.tcps_rcvwinprobe++; 1085 } else 1086 goto dropafterack; 1087 } else 1088 tcpstat.tcps_rcvbyteafterwin += todrop; 1089 m_adj(m, -todrop); 1090 ti->ti_len -= todrop; 1091 tiflags &= ~(TH_PUSH|TH_FIN); 1092 } 1093 1094 /* 1095 * If last ACK falls within this segment's sequence numbers, 1096 * record its timestamp. 1097 */ 1098 /* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) && 1099 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + 1100 * ((tiflags & (TH_SYN|TH_FIN)) != 0))) { 1101 * tp->ts_recent_age = tcp_now; 1102 * tp->ts_recent = ts_val; 1103 * } 1104 */ 1105 1106 /* 1107 * If the RST bit is set examine the state: 1108 * SYN_RECEIVED STATE: 1109 * If passive open, return to LISTEN state. 1110 * If active open, inform user that connection was refused. 1111 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: 1112 * Inform user that connection was reset, and close tcb. 1113 * CLOSING, LAST_ACK, TIME_WAIT STATES 1114 * Close the tcb. 1115 */ 1116 if (tiflags&TH_RST) switch (tp->t_state) { 1117 1118 case TCPS_SYN_RECEIVED: 1119 /* so->so_error = ECONNREFUSED; */ 1120 goto close; 1121 1122 case TCPS_ESTABLISHED: 1123 case TCPS_FIN_WAIT_1: 1124 case TCPS_FIN_WAIT_2: 1125 case TCPS_CLOSE_WAIT: 1126 /* so->so_error = ECONNRESET; */ 1127 close: 1128 tp->t_state = TCPS_CLOSED; 1129 tcpstat.tcps_drops++; 1130 tp = tcp_close(pData, tp); 1131 goto drop; 1132 1133 case TCPS_CLOSING: 1134 case TCPS_LAST_ACK: 1135 case TCPS_TIME_WAIT: 1136 tp = tcp_close(pData, tp); 1137 goto drop; 1138 } 1139 1140 /* 1141 * If a SYN is in the window, then this is an 1142 * error and we send an RST and drop the connection. 1143 */ 1144 if (tiflags & TH_SYN) { 1145 tp = tcp_drop(pData, tp,0); 1146 goto dropwithreset; 1147 } 1148 1149 /* 1150 * If the ACK bit is off we drop the segment and return. 1151 */ 1152 if ((tiflags & TH_ACK) == 0) goto drop; 1153 1154 /* 1155 * Ack processing. 1156 */ 1157 switch (tp->t_state) { 1158 /* 1159 * In SYN_RECEIVED state if the ack ACKs our SYN then enter 1160 * ESTABLISHED state and continue processing, otherwise 1161 * send an RST. una<=ack<=max 1162 */ 1163 case TCPS_SYN_RECEIVED: 1164 1165 if (SEQ_GT(tp->snd_una, ti->ti_ack) || 1166 SEQ_GT(ti->ti_ack, tp->snd_max)) 1167 goto dropwithreset; 1168 tcpstat.tcps_connects++; 1169 tp->t_state = TCPS_ESTABLISHED; 1170 /* 1171 * The sent SYN is ack'ed with our sequence number +1 1172 * The first data byte already in the buffer will get 1173 * lost if no correction is made. This is only needed for 1174 * SS_CTL since the buffer is empty otherwise. 1175 * tp->snd_una++; or: 1176 */ 1177 tp->snd_una=ti->ti_ack; 1178 1178 soisfconnected(so); 1179 1179 1180 1181 /* 1182 * 1183 * 1184 * 1185 * 1180 /* Do window scaling? */ 1181 /* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1182 * (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1183 * tp->snd_scale = tp->requested_s_scale; 1184 * tp->rcv_scale = tp->request_r_scale; 1185 * } 1186 1186 */ 1187 1187 #ifndef VBOX_WITH_BSD_TCP_REASS 1188 1188 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0, (struct mbuf *)0); 1189 1189 #else /* VBOX_WITH_BSD_TCP_REASS */ 1190 1190 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0); 1191 1191 #endif /*VBOX_WITH_BSD_TCP_REASS*/ 1192 1193 1194 1195 1196 1197 1198 1199 1200 *tp->snd_una < ti->ti_ack <= tp->snd_max1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 /* 1300 * 1301 * 1302 */ 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 /* 1346 * 1347 */ 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1192 tp->snd_wl1 = ti->ti_seq - 1; 1193 /* Avoid ack processing; snd_una==ti_ack => dup ack */ 1194 goto synrx_to_est; 1195 /* fall into ... */ 1196 1197 /* 1198 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1199 * ACKs. If the ack is in the range 1200 * tp->snd_una < ti->ti_ack <= tp->snd_max 1201 * then advance tp->snd_una to ti->ti_ack and drop 1202 * data from the retransmission queue. If this ACK reflects 1203 * more up to date window information we update our window information. 1204 */ 1205 case TCPS_ESTABLISHED: 1206 case TCPS_FIN_WAIT_1: 1207 case TCPS_FIN_WAIT_2: 1208 case TCPS_CLOSE_WAIT: 1209 case TCPS_CLOSING: 1210 case TCPS_LAST_ACK: 1211 case TCPS_TIME_WAIT: 1212 1213 if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) { 1214 if (ti->ti_len == 0 && tiwin == tp->snd_wnd) { 1215 tcpstat.tcps_rcvdupack++; 1216 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n", 1217 (long )m, (long )so)); 1218 /* 1219 * If we have outstanding data (other than 1220 * a window probe), this is a completely 1221 * duplicate ack (ie, window info didn't 1222 * change), the ack is the biggest we've 1223 * seen and we've seen exactly our rexmt 1224 * threshold of them, assume a packet 1225 * has been dropped and retransmit it. 1226 * Kludge snd_nxt & the congestion 1227 * window so we send only this one 1228 * packet. 1229 * 1230 * We know we're losing at the current 1231 * window size so do congestion avoidance 1232 * (set ssthresh to half the current window 1233 * and pull our congestion window back to 1234 * the new ssthresh). 1235 * 1236 * Dup acks mean that packets have left the 1237 * network (they're now cached at the receiver) 1238 * so bump cwnd by the amount in the receiver 1239 * to keep a constant cwnd packets in the 1240 * network. 1241 */ 1242 if (tp->t_timer[TCPT_REXMT] == 0 || 1243 ti->ti_ack != tp->snd_una) 1244 tp->t_dupacks = 0; 1245 else if (++tp->t_dupacks == tcprexmtthresh) { 1246 tcp_seq onxt = tp->snd_nxt; 1247 u_int win = 1248 min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1249 tp->t_maxseg; 1250 1251 if (win < 2) 1252 win = 2; 1253 tp->snd_ssthresh = win * tp->t_maxseg; 1254 tp->t_timer[TCPT_REXMT] = 0; 1255 tp->t_rtt = 0; 1256 tp->snd_nxt = ti->ti_ack; 1257 tp->snd_cwnd = tp->t_maxseg; 1258 (void) tcp_output(pData, tp); 1259 tp->snd_cwnd = tp->snd_ssthresh + 1260 tp->t_maxseg * tp->t_dupacks; 1261 if (SEQ_GT(onxt, tp->snd_nxt)) 1262 tp->snd_nxt = onxt; 1263 goto drop; 1264 } else if (tp->t_dupacks > tcprexmtthresh) { 1265 tp->snd_cwnd += tp->t_maxseg; 1266 (void) tcp_output(pData, tp); 1267 goto drop; 1268 } 1269 } else 1270 tp->t_dupacks = 0; 1271 break; 1272 } 1273 synrx_to_est: 1274 /* 1275 * If the congestion window was inflated to account 1276 * for the other side's cached packets, retract it. 1277 */ 1278 if (tp->t_dupacks > tcprexmtthresh && 1279 tp->snd_cwnd > tp->snd_ssthresh) 1280 tp->snd_cwnd = tp->snd_ssthresh; 1281 tp->t_dupacks = 0; 1282 if (SEQ_GT(ti->ti_ack, tp->snd_max)) { 1283 tcpstat.tcps_rcvacktoomuch++; 1284 goto dropafterack; 1285 } 1286 acked = ti->ti_ack - tp->snd_una; 1287 tcpstat.tcps_rcvackpack++; 1288 tcpstat.tcps_rcvackbyte += acked; 1289 1290 /* 1291 * If we have a timestamp reply, update smoothed 1292 * round trip time. If no timestamp is present but 1293 * transmit timer is running and timed sequence 1294 * number was acked, update smoothed round trip time. 1295 * Since we now have an rtt measurement, cancel the 1296 * timer backoff (cf., Phil Karn's retransmit alg.). 1297 * Recompute the initial retransmit timer. 1298 */ 1299 /* if (ts_present) 1300 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1); 1301 * else 1302 */ 1303 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq)) 1304 tcp_xmit_timer(pData, tp,tp->t_rtt); 1305 1306 /* 1307 * If all outstanding data is acked, stop retransmit 1308 * timer and remember to restart (more output or persist). 1309 * If there is more data to be acked, restart retransmit 1310 * timer, using current (possibly backed-off) value. 1311 */ 1312 if (ti->ti_ack == tp->snd_max) { 1313 tp->t_timer[TCPT_REXMT] = 0; 1314 needoutput = 1; 1315 } else if (tp->t_timer[TCPT_PERSIST] == 0) 1316 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; 1317 /* 1318 * When new data is acked, open the congestion window. 1319 * If the window gives us less than ssthresh packets 1320 * in flight, open exponentially (maxseg per packet). 1321 * Otherwise open linearly: maxseg per window 1322 * (maxseg^2 / cwnd per packet). 1323 */ 1324 { 1325 register u_int cw = tp->snd_cwnd; 1326 register u_int incr = tp->t_maxseg; 1327 1328 if (cw > tp->snd_ssthresh) 1329 incr = incr * incr / cw; 1330 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale); 1331 } 1332 if (acked > so->so_snd.sb_cc) { 1333 tp->snd_wnd -= so->so_snd.sb_cc; 1334 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc); 1335 ourfinisacked = 1; 1336 } else { 1337 sbdrop(&so->so_snd, acked); 1338 tp->snd_wnd -= acked; 1339 ourfinisacked = 0; 1340 } 1341 /* 1342 * XXX sowwakup is called when data is acked and there's room for 1343 * for more data... it should read() the socket 1344 */ 1345 /* if (so->so_snd.sb_flags & SB_NOTIFY) 1346 * sowwakeup(so); 1347 */ 1348 tp->snd_una = ti->ti_ack; 1349 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 1350 tp->snd_nxt = tp->snd_una; 1351 1352 switch (tp->t_state) { 1353 1354 /* 1355 * In FIN_WAIT_1 STATE in addition to the processing 1356 * for the ESTABLISHED state if our FIN is now acknowledged 1357 * then enter FIN_WAIT_2. 1358 */ 1359 case TCPS_FIN_WAIT_1: 1360 if (ourfinisacked) { 1361 /* 1362 * If we can't receive any more 1363 * data, then closing user can proceed. 1364 * Starting the timer is contrary to the 1365 * specification, but if we don't get a FIN 1366 * we'll hang forever. 1367 */ 1368 if (so->so_state & SS_FCANTRCVMORE) { 1369 soisfdisconnected(so); 1370 tp->t_timer[TCPT_2MSL] = tcp_maxidle; 1371 } 1372 tp->t_state = TCPS_FIN_WAIT_2; 1373 } 1374 break; 1375 1376 /* 1377 * In CLOSING STATE in addition to the processing for 1378 * the ESTABLISHED state if the ACK acknowledges our FIN 1379 * then enter the TIME-WAIT state, otherwise ignore 1380 * the segment. 1381 */ 1382 case TCPS_CLOSING: 1383 if (ourfinisacked) { 1384 tp->t_state = TCPS_TIME_WAIT; 1385 tcp_canceltimers(tp); 1386 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1387 soisfdisconnected(so); 1388 } 1389 break; 1390 1391 /* 1392 * In LAST_ACK, we may still be waiting for data to drain 1393 * and/or to be acked, as well as for the ack of our FIN. 1394 * If our FIN is now acknowledged, delete the TCB, 1395 * enter the closed state and return. 1396 */ 1397 case TCPS_LAST_ACK: 1398 if (ourfinisacked) { 1399 tp = tcp_close(pData, tp); 1400 goto drop; 1401 } 1402 break; 1403 1404 /* 1405 * In TIME_WAIT state the only thing that should arrive 1406 * is a retransmission of the remote FIN. Acknowledge 1407 * it and restart the finack timer. 1408 */ 1409 case TCPS_TIME_WAIT: 1410 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1411 goto dropafterack; 1412 } 1413 } /* switch(tp->t_state) */ 1414 1414 1415 1415 step6: 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1416 /* 1417 * Update window information. 1418 * Don't look at window if no ACK: TAC's send garbage on first SYN. 1419 */ 1420 if ((tiflags & TH_ACK) && 1421 (SEQ_LT(tp->snd_wl1, ti->ti_seq) || 1422 (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) || 1423 (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) { 1424 /* keep track of pure window updates */ 1425 if (ti->ti_len == 0 && 1426 tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd) 1427 tcpstat.tcps_rcvwinupd++; 1428 tp->snd_wnd = tiwin; 1429 tp->snd_wl1 = ti->ti_seq; 1430 tp->snd_wl2 = ti->ti_ack; 1431 if (tp->snd_wnd > tp->max_sndwnd) 1432 tp->max_sndwnd = tp->snd_wnd; 1433 needoutput = 1; 1434 } 1435 1436 /* 1437 * Process segments with URG. 1438 */ 1439 if ((tiflags & TH_URG) && ti->ti_urp && 1440 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 1441 /* 1442 * This is a kludge, but if we receive and accept 1443 * random urgent pointers, we'll crash in 1444 * soreceive. It's hard to imagine someone 1445 * actually wanting to send this much urgent data. 1446 */ 1447 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) { 1448 ti->ti_urp = 0; 1449 tiflags &= ~TH_URG; 1450 goto dodata; 1451 } 1452 /* 1453 * If this segment advances the known urgent pointer, 1454 * then mark the data stream. This should not happen 1455 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 1456 * a FIN has been received from the remote side. 1457 * In these states we ignore the URG. 1458 * 1459 * According to RFC961 (Assigned Protocols), 1460 * the urgent pointer points to the last octet 1461 * of urgent data. We continue, however, 1462 * to consider it to indicate the first octet 1463 * of data past the urgent section as the original 1464 * spec states (in one of two places). 1465 */ 1466 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) { 1467 tp->rcv_up = ti->ti_seq + ti->ti_urp; 1468 so->so_urgc = so->so_rcv.sb_cc + 1469 (tp->rcv_up - tp->rcv_nxt); /* -1; */ 1470 tp->rcv_up = ti->ti_seq + ti->ti_urp; 1471 1472 } 1473 } else 1474 /* 1475 * If no out of band data is expected, 1476 * pull receive urgent pointer along 1477 * with the receive window. 1478 */ 1479 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 1480 tp->rcv_up = tp->rcv_nxt; 1481 1481 dodata: 1482 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1483 /* 1484 * Process the segment text, merging it into the TCP sequencing queue, 1485 * and arranging for acknowledgment of receipt if necessary. 1486 * This process logically involves adjusting tp->rcv_wnd as data 1487 * is presented to the user (this happens in tcp_usrreq.c, 1488 * case PRU_RCVD). If a FIN has already been received on this 1489 * connection then we just ignore the text. 1490 */ 1491 if ((ti->ti_len || (tiflags&TH_FIN)) && 1492 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 1493 1493 #ifndef VBOX_WITH_BSD_TCP_REASS 1494 1494 TCP_REASS(pData, tp, ti, m, so, tiflags); 1495 1495 #else /* VBOX_WITH_BSD_TCP_REASS */ 1496 1496 if (ti->ti_seq == tp->rcv_nxt 1497 1497 && LIST_EMPTY(&tp->t_segq) 1498 1498 && tp->t_state == TCPS_ESTABLISHED) { 1499 1500 1501 1502 1503 1504 1505 1506 1507 1499 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */ 1500 tp->rcv_nxt += tlen; 1501 tiflags = ti->ti_t.th_flags & TH_FIN; 1502 tcpstat.tcps_rcvpack++; 1503 tcpstat.tcps_rcvbyte += tlen; 1504 if (so->so_state & SS_FCANTRCVMORE) 1505 m_freem(pData, m); 1506 else 1507 sbappend(pData, so, m); 1508 1508 } 1509 1509 else { … … 1512 1512 } 1513 1513 #endif /* VBOX_WITH_BSD_TCP_REASS */ 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1514 /* 1515 * Note the amount of data that peer has sent into 1516 * our window, in order to estimate the sender's 1517 * buffer size. 1518 */ 1519 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt); 1520 } else { 1521 mbuf_freed = 1; /* The mbuf must be freed, but only when its content is not needed anymore. */ 1522 tiflags &= ~TH_FIN; 1523 } 1524 1525 /* 1526 * If FIN is received ACK the FIN and let the user know 1527 * that the connection is closing. 1528 */ 1529 if (tiflags & TH_FIN) { 1530 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 1531 /* 1532 * If we receive a FIN we can't send more data, 1533 * set it SS_FDRAIN 1534 1534 * Shutdown the socket if there is no rx data in the 1535 1536 1537 1538 1539 1540 /* 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 /* 1597 */ 1598 /* 1599 * 1600 * 1601 * 1602 */ 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1535 * buffer. 1536 * soread() is called on completion of shutdown() and 1537 * will got to TCPS_LAST_ACK, and use tcp_output() 1538 * to send the FIN. 1539 */ 1540 /* sofcantrcvmore(so); */ 1541 sofwdrain(so); 1542 1543 tp->t_flags |= TF_ACKNOW; 1544 tp->rcv_nxt++; 1545 } 1546 switch (tp->t_state) { 1547 1548 /* 1549 * In SYN_RECEIVED and ESTABLISHED STATES 1550 * enter the CLOSE_WAIT state. 1551 */ 1552 case TCPS_SYN_RECEIVED: 1553 case TCPS_ESTABLISHED: 1554 if(so->so_emu == EMU_CTL) /* no shutdown on socket */ 1555 tp->t_state = TCPS_LAST_ACK; 1556 else 1557 tp->t_state = TCPS_CLOSE_WAIT; 1558 break; 1559 1560 /* 1561 * If still in FIN_WAIT_1 STATE FIN has not been acked so 1562 * enter the CLOSING state. 1563 */ 1564 case TCPS_FIN_WAIT_1: 1565 tp->t_state = TCPS_CLOSING; 1566 break; 1567 1568 /* 1569 * In FIN_WAIT_2 state enter the TIME_WAIT state, 1570 * starting the time-wait timer, turning off the other 1571 * standard timers. 1572 */ 1573 case TCPS_FIN_WAIT_2: 1574 tp->t_state = TCPS_TIME_WAIT; 1575 tcp_canceltimers(tp); 1576 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1577 soisfdisconnected(so); 1578 break; 1579 1580 /* 1581 * In TIME_WAIT state restart the 2 MSL time_wait timer. 1582 */ 1583 case TCPS_TIME_WAIT: 1584 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1585 break; 1586 } 1587 } 1588 1589 /* 1590 * If this is a small packet, then ACK now - with Nagel 1591 * congestion avoidance sender won't send more until 1592 * he gets an ACK. 1593 * 1594 * See above. 1595 */ 1596 /* if (ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg) { 1597 */ 1598 /* if ((ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg && 1599 * (so->so_iptos & IPTOS_LOWDELAY) == 0) || 1600 * ((so->so_iptos & IPTOS_LOWDELAY) && 1601 * ((struct tcpiphdr_2 *)ti)->first_char == (char)27)) { 1602 */ 1603 if (ti->ti_len && (unsigned)ti->ti_len <= 5 && 1604 ((struct tcpiphdr_2 *)ti)->first_char == (char)27) { 1605 tp->t_flags |= TF_ACKNOW; 1606 } 1607 1608 if (mbuf_freed) { 1609 m_free(pData, m); 1610 } 1611 /* 1612 * Return any desired output. 1613 */ 1614 if (needoutput || (tp->t_flags & TF_ACKNOW)) { 1615 (void) tcp_output(pData, tp); 1616 } 1617 return; 1618 1618 1619 1619 dropafterack: 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1620 /* 1621 * Generate an ACK dropping incoming segment if it occupies 1622 * sequence space, where the ACK reflects our state. 1623 */ 1624 if (tiflags & TH_RST) 1625 goto drop; 1626 m_freem(pData, m); 1627 tp->t_flags |= TF_ACKNOW; 1628 (void) tcp_output(pData, tp); 1629 return; 1630 1630 1631 1631 dropwithreset: 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1632 /* reuses m if m!=NULL, m_free() unnecessary */ 1633 if (tiflags & TH_ACK) 1634 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST); 1635 else { 1636 if (tiflags & TH_SYN) ti->ti_len++; 1637 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0, 1638 TH_RST|TH_ACK); 1639 } 1640 1641 return; 1642 1642 1643 1643 drop: 1644 1645 1646 1647 1648 1649 1644 /* 1645 * Drop space held by incoming segment and return. 1646 */ 1647 m_free(pData, m); 1648 1649 return; 1650 1650 } 1651 1651 1652 1652 /* , ts_present, ts_val, ts_ecr) */ 1653 /* 1654 * 1653 /* int *ts_present; 1654 * u_int32_t *ts_val, *ts_ecr; 1655 1655 */ 1656 1656 void 1657 1657 tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti) 1658 1658 { 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 (void) tcp_mss(pData, tp, mss);/* sets t_maxseg */1689 1690 1691 /* 1692 * 1693 * 1694 * 1695 * 1696 * 1697 * 1698 * 1699 */ 1700 /* 1701 * 1702 * 1703 * 1704 * 1705 * 1706 * 1707 * 1659 u_int16_t mss; 1660 int opt, optlen; 1661 1662 DEBUG_CALL("tcp_dooptions"); 1663 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt)); 1664 1665 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1666 opt = cp[0]; 1667 if (opt == TCPOPT_EOL) 1668 break; 1669 if (opt == TCPOPT_NOP) 1670 optlen = 1; 1671 else { 1672 optlen = cp[1]; 1673 if (optlen <= 0) 1674 break; 1675 } 1676 switch (opt) { 1677 1678 default: 1679 continue; 1680 1681 case TCPOPT_MAXSEG: 1682 if (optlen != TCPOLEN_MAXSEG) 1683 continue; 1684 if (!(ti->ti_flags & TH_SYN)) 1685 continue; 1686 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss)); 1687 NTOHS(mss); 1688 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */ 1689 break; 1690 1691 /* case TCPOPT_WINDOW: 1692 * if (optlen != TCPOLEN_WINDOW) 1693 * continue; 1694 * if (!(ti->ti_flags & TH_SYN)) 1695 * continue; 1696 * tp->t_flags |= TF_RCVD_SCALE; 1697 * tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); 1698 * break; 1699 */ 1700 /* case TCPOPT_TIMESTAMP: 1701 * if (optlen != TCPOLEN_TIMESTAMP) 1702 * continue; 1703 * *ts_present = 1; 1704 * memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val)); 1705 * NTOHL(*ts_val); 1706 * memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr)); 1707 * NTOHL(*ts_ecr); 1708 1708 * 1709 */ 1710 * 1711 * 1712 * 1713 /* 1714 * 1715 * 1716 * 1717 * 1718 */ 1719 1720 1709 */ /* 1710 * * A timestamp received in a SYN makes 1711 * * it ok to send timestamp requests and replies. 1712 * */ 1713 /* if (ti->ti_flags & TH_SYN) { 1714 * tp->t_flags |= TF_RCVD_TSTMP; 1715 * tp->ts_recent = *ts_val; 1716 * tp->ts_recent_age = tcp_now; 1717 * } 1718 */ break; 1719 } 1720 } 1721 1721 } 1722 1722 … … 1733 1733 void 1734 1734 tcp_pulloutofband(so, ti, m) 1735 1736 1737 1735 struct socket *so; 1736 struct tcpiphdr *ti; 1737 register struct mbuf *m; 1738 1738 { 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1739 int cnt = ti->ti_urp - 1; 1740 1741 while (cnt >= 0) { 1742 if (m->m_len > cnt) { 1743 char *cp = mtod(m, caddr_t) + cnt; 1744 struct tcpcb *tp = sototcpcb(so); 1745 1746 tp->t_iobc = *cp; 1747 tp->t_oobflags |= TCPOOB_HAVEDATA; 1748 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1)); 1749 m->m_len--; 1750 return; 1751 } 1752 cnt -= m->m_len; 1753 m = m->m_next; /* XXX WRONG! Fix it! */ 1754 if (m == 0) 1755 break; 1756 } 1757 panic("tcp_pulloutofband"); 1758 1758 } 1759 1759 … … 1768 1768 tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt) 1769 1769 { 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1770 register short delta; 1771 1772 DEBUG_CALL("tcp_xmit_timer"); 1773 DEBUG_ARG("tp = %lx", (long)tp); 1774 DEBUG_ARG("rtt = %d", rtt); 1775 1776 tcpstat.tcps_rttupdated++; 1777 if (tp->t_srtt != 0) { 1778 /* 1779 * srtt is stored as fixed point with 3 bits after the 1780 * binary point (i.e., scaled by 8). The following magic 1781 * is equivalent to the smoothing algorithm in rfc793 with 1782 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 1783 * point). Adjust rtt to origin 0. 1784 */ 1785 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT); 1786 if ((tp->t_srtt += delta) <= 0) 1787 tp->t_srtt = 1; 1788 /* 1789 * We accumulate a smoothed rtt variance (actually, a 1790 * smoothed mean difference), then set the retransmit 1791 * timer to smoothed rtt + 4 times the smoothed variance. 1792 * rttvar is stored as fixed point with 2 bits after the 1793 * binary point (scaled by 4). The following is 1794 * equivalent to rfc793 smoothing with an alpha of .75 1795 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 1796 * rfc793's wired-in beta. 1797 */ 1798 if (delta < 0) 1799 delta = -delta; 1800 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT); 1801 if ((tp->t_rttvar += delta) <= 0) 1802 tp->t_rttvar = 1; 1803 } else { 1804 /* 1805 * No rtt measurement yet - use the unsmoothed rtt. 1806 * Set the variance to half the rtt (so our first 1807 * retransmit happens at 3*rtt). 1808 */ 1809 tp->t_srtt = rtt << TCP_RTT_SHIFT; 1810 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 1811 } 1812 tp->t_rtt = 0; 1813 tp->t_rxtshift = 0; 1814 1815 /* 1816 * the retransmit should happen at rtt + 4 * rttvar. 1817 * Because of the way we do the smoothing, srtt and rttvar 1818 * will each average +1/2 tick of bias. When we compute 1819 * the retransmit timer, we want 1/2 tick of rounding and 1820 * 1 extra tick because of +-1/2 tick uncertainty in the 1821 * firing of the timer. The bias will give us exactly the 1822 * 1.5 tick we need. But, because the bias is 1823 * statistical, we have to test that we don't drop below 1824 * the minimum feasible timer (which is 2 ticks). 1825 */ 1826 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 1827 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */ 1828 1829 /* 1830 * We received an ack for a packet that wasn't retransmitted; 1831 * it is probably safe to discard any error indications we've 1832 * received recently. This isn't quite right, but close enough 1833 * for now (a route might have failed after we sent a segment, 1834 * and the return path might not be symmetrical). 1835 */ 1836 tp->t_softerror = 0; 1837 1837 } 1838 1838 … … 1856 1856 tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer) 1857 1857 { 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1858 struct socket *so = tp->t_socket; 1859 int mss; 1860 1861 DEBUG_CALL("tcp_mss"); 1862 DEBUG_ARG("tp = %lx", (long)tp); 1863 DEBUG_ARG("offer = %d", offer); 1864 1865 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr); 1866 if (offer) 1867 mss = min(mss, offer); 1868 mss = max(mss, 32); 1869 if (mss < tp->t_maxseg || offer != 0) 1870 tp->t_maxseg = mss; 1871 1872 tp->snd_cwnd = mss; 1873 1874 sbreserve(&so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0)); 1875 sbreserve(&so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0)); 1876 1877 DEBUG_MISC((dfd, " returning mss = %d\n", mss)); 1878 1879 return mss; 1880 1880 } -
trunk/src/VBox/Devices/Network/slirp/tcp_output.c
r1076 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)tcp_output.c8.3 (Berkeley) 12/30/9333 * @(#)tcp_output.c 8.3 (Berkeley) 12/30/93 34 34 * tcp_output.c,v 1.3 1994/09/15 10:36:55 davidg Exp 35 35 */ … … 50 50 */ 51 51 const char * const tcpstates[] = { 52 /* 53 "REDIRECT", "LISTEN","SYN_SENT", "SYN_RCVD",54 55 52 /* "CLOSED", "LISTEN", "SYN_SENT", "SYN_RCVD", */ 53 "REDIRECT", "LISTEN", "SYN_SENT", "SYN_RCVD", 54 "ESTABLISHED", "CLOSE_WAIT", "FIN_WAIT_1", "CLOSING", 55 "LAST_ACK", "FIN_WAIT_2", "TIME_WAIT", 56 56 }; 57 57 58 58 static const u_char tcp_outflags[TCP_NSTATES] = { 59 60 61 59 TH_RST|TH_ACK, 0, TH_SYN, TH_SYN|TH_ACK, 60 TH_ACK, TH_ACK, TH_FIN|TH_ACK, TH_FIN|TH_ACK, 61 TH_FIN|TH_ACK, TH_ACK, TH_ACK, 62 62 }; 63 63 64 64 65 #define MAX_TCPOPTLEN 32/* max # bytes that go in options */65 #define MAX_TCPOPTLEN 32 /* max # bytes that go in options */ 66 66 67 67 /* … … 71 71 tcp_output(PNATState pData, register struct tcpcb *tp) 72 72 { 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 73 register struct socket *so = tp->t_socket; 74 register long len, win; 75 int off, flags, error; 76 register struct mbuf *m; 77 register struct tcpiphdr *ti; 78 u_char opt[MAX_TCPOPTLEN]; 79 unsigned optlen, hdrlen; 80 int idle, sendalot; 81 82 DEBUG_CALL("tcp_output"); 83 DEBUG_ARG("tp = %lx", (long )tp); 84 85 /* 86 * Determine length of data that should be transmitted, 87 * and flags that will be used. 88 * If there is some data or critical controls (SYN, RST) 89 * to send, then transmit; otherwise, investigate further. 90 */ 91 idle = (tp->snd_max == tp->snd_una); 92 if (idle && tp->t_idle >= tp->t_rxtcur) 93 /* 94 * We have been idle for "a while" and no acks are 95 * expected to clock out any data we send -- 96 * slow start to get ack "clock" running again. 97 */ 98 tp->snd_cwnd = tp->t_maxseg; 99 99 again: 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 * idlenot doing retransmits or persists239 * persistingto move a small or zero window240 * (re)transmittingand thereby not persisting241 242 243 *is set when we are in persist state.244 245 *is set when we are called to send a persist packet.246 247 *is set when we are retransmitting248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 100 sendalot = 0; 101 off = tp->snd_nxt - tp->snd_una; 102 win = min(tp->snd_wnd, tp->snd_cwnd); 103 104 flags = tcp_outflags[tp->t_state]; 105 106 DEBUG_MISC((dfd, " --- tcp_output flags = 0x%x\n",flags)); 107 108 /* 109 * If in persist timeout with window of 0, send 1 byte. 110 * Otherwise, if window is small but nonzero 111 * and timer expired, we will send what we can 112 * and go to transmit state. 113 */ 114 if (tp->t_force) { 115 if (win == 0) { 116 /* 117 * If we still have some data to send, then 118 * clear the FIN bit. Usually this would 119 * happen below when it realizes that we 120 * aren't sending all the data. However, 121 * if we have exactly 1 byte of unset data, 122 * then it won't clear the FIN bit below, 123 * and if we are in persist state, we wind 124 * up sending the packet without recording 125 * that we sent the FIN bit. 126 * 127 * We can't just blindly clear the FIN bit, 128 * because if we don't have any more data 129 * to send then the probe will be the FIN 130 * itself. 131 */ 132 if (off < so->so_snd.sb_cc) 133 flags &= ~TH_FIN; 134 win = 1; 135 } else { 136 tp->t_timer[TCPT_PERSIST] = 0; 137 tp->t_rxtshift = 0; 138 } 139 } 140 141 len = min(so->so_snd.sb_cc, win) - off; 142 143 if (len < 0) { 144 /* 145 * If FIN has been sent but not acked, 146 * but we haven't been called to retransmit, 147 * len will be -1. Otherwise, window shrank 148 * after we sent into it. If window shrank to 0, 149 * cancel pending retransmit and pull snd_nxt 150 * back to (closed) window. We will enter persist 151 * state below. If the window didn't close completely, 152 * just wait for an ACK. 153 */ 154 len = 0; 155 if (win == 0) { 156 tp->t_timer[TCPT_REXMT] = 0; 157 tp->snd_nxt = tp->snd_una; 158 } 159 } 160 161 if (len > tp->t_maxseg) { 162 len = tp->t_maxseg; 163 sendalot = 1; 164 } 165 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) 166 flags &= ~TH_FIN; 167 168 win = sbspace(&so->so_rcv); 169 170 /* 171 * Sender silly window avoidance. If connection is idle 172 * and can send all data, a maximum segment, 173 * at least a maximum default-size segment do it, 174 * or are forced, do it; otherwise don't bother. 175 * If peer's buffer is tiny, then send 176 * when window is at least half open. 177 * If retransmitting (possibly after persist timer forced us 178 * to send into a small window), then must resend. 179 */ 180 if (len) { 181 if (len == tp->t_maxseg) 182 goto send; 183 if ((1 || idle || tp->t_flags & TF_NODELAY) && 184 len + off >= so->so_snd.sb_cc) 185 goto send; 186 if (tp->t_force) 187 goto send; 188 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) 189 goto send; 190 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 191 goto send; 192 } 193 194 /* 195 * Compare available window to amount of window 196 * known to peer (as advertised window less 197 * next expected input). If the difference is at least two 198 * max size segments, or at least 50% of the maximum possible 199 * window, then want to send a window update to peer. 200 */ 201 if (win > 0) { 202 /* 203 * "adv" is the amount we can increase the window, 204 * taking into account that we are limited by 205 * TCP_MAXWIN << tp->rcv_scale. 206 */ 207 long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale) - 208 (tp->rcv_adv - tp->rcv_nxt); 209 210 if (adv >= (long) (2 * tp->t_maxseg)) 211 goto send; 212 if (2 * adv >= (long) so->so_rcv.sb_datalen) 213 goto send; 214 } 215 216 /* 217 * Send if we owe peer an ACK. 218 */ 219 if (tp->t_flags & TF_ACKNOW) 220 goto send; 221 if (flags & (TH_SYN|TH_RST)) 222 goto send; 223 if (SEQ_GT(tp->snd_up, tp->snd_una)) 224 goto send; 225 /* 226 * If our state indicates that FIN should be sent 227 * and we have not yet done so, or we're retransmitting the FIN, 228 * then we need to send. 229 */ 230 if (flags & TH_FIN && 231 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una)) 232 goto send; 233 234 /* 235 * TCP window updates are not reliable, rather a polling protocol 236 * using ``persist'' packets is used to insure receipt of window 237 * updates. The three ``states'' for the output side are: 238 * idle not doing retransmits or persists 239 * persisting to move a small or zero window 240 * (re)transmitting and thereby not persisting 241 * 242 * tp->t_timer[TCPT_PERSIST] 243 * is set when we are in persist state. 244 * tp->t_force 245 * is set when we are called to send a persist packet. 246 * tp->t_timer[TCPT_REXMT] 247 * is set when we are retransmitting 248 * The output side is idle when both timers are zero. 249 * 250 * If send window is too small, there is data to transmit, and no 251 * retransmit or persist is pending, then go to persist state. 252 * If nothing happens soon, send when timer expires: 253 * if window is nonzero, transmit what we can, 254 * otherwise force out a byte. 255 */ 256 if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 && 257 tp->t_timer[TCPT_PERSIST] == 0) { 258 tp->t_rxtshift = 0; 259 tcp_setpersist(tp); 260 } 261 262 /* 263 * No reason to send a segment, just return. 264 */ 265 tcpstat.tcps_didnuttin++; 266 267 return (0); 268 268 269 269 send: 270 271 272 273 274 275 276 *max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MHLEN277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 /* 292 * 293 * 294 * 295 * 296 * 297 * 298 * 299 * 300 * 301 */ 302 303 304 305 306 307 308 309 310 /* 311 * 312 * 313 * 314 * 270 /* 271 * Before ESTABLISHED, force sending of initial options 272 * unless TCP set not to do any options. 273 * NOTE: we assume that the IP/TCP header plus TCP options 274 * always fit in a single mbuf, leaving room for a maximum 275 * link header, i.e. 276 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MHLEN 277 */ 278 optlen = 0; 279 hdrlen = sizeof (struct tcpiphdr); 280 if (flags & TH_SYN) { 281 tp->snd_nxt = tp->iss; 282 if ((tp->t_flags & TF_NOOPT) == 0) { 283 u_int16_t mss; 284 285 opt[0] = TCPOPT_MAXSEG; 286 opt[1] = 4; 287 mss = htons((u_int16_t) tcp_mss(pData, tp, 0)); 288 memcpy((caddr_t)(opt + 2), (caddr_t)&mss, sizeof(mss)); 289 optlen = 4; 290 291 /* if ((tp->t_flags & TF_REQ_SCALE) && 292 * ((flags & TH_ACK) == 0 || 293 * (tp->t_flags & TF_RCVD_SCALE))) { 294 * *((u_int32_t *) (opt + optlen)) = htonl( 295 * TCPOPT_NOP << 24 | 296 * TCPOPT_WINDOW << 16 | 297 * TCPOLEN_WINDOW << 8 | 298 * tp->request_r_scale); 299 * optlen += 4; 300 * } 301 */ 302 } 303 } 304 305 /* 306 * Send a timestamp and echo-reply if this is a SYN and our side 307 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side 308 * and our peer have sent timestamps in our SYN's. 309 */ 310 /* if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 311 * (flags & TH_RST) == 0 && 312 * ((flags & (TH_SYN|TH_ACK)) == TH_SYN || 313 * (tp->t_flags & TF_RCVD_TSTMP))) { 314 * u_int32_t *lp = (u_int32_t *)(opt + optlen); 315 315 * 316 * 317 * 318 * 319 * 320 * 321 * 322 */ 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 /* 353 354 355 356 357 358 359 360 361 362 363 /* 364 365 366 367 368 /* 369 * 370 * 371 * 372 * 373 */ 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 /* 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 316 * / * Form timestamp option as shown in appendix A of RFC 1323. * / 317 * *lp++ = htonl(TCPOPT_TSTAMP_HDR); 318 * *lp++ = htonl(tcp_now); 319 * *lp = htonl(tp->ts_recent); 320 * optlen += TCPOLEN_TSTAMP_APPA; 321 * } 322 */ 323 hdrlen += optlen; 324 325 /* 326 * Adjust data length if insertion of options will 327 * bump the packet length beyond the t_maxseg length. 328 */ 329 if (len > tp->t_maxseg - optlen) { 330 len = tp->t_maxseg - optlen; 331 sendalot = 1; 332 } 333 334 /* 335 * Grab a header mbuf, attaching a copy of data to 336 * be transmitted, and initialize the header from 337 * the template for sends on this connection. 338 */ 339 if (len) { 340 if (tp->t_force && len == 1) 341 tcpstat.tcps_sndprobe++; 342 else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 343 tcpstat.tcps_sndrexmitpack++; 344 tcpstat.tcps_sndrexmitbyte += len; 345 } else { 346 tcpstat.tcps_sndpack++; 347 tcpstat.tcps_sndbyte += len; 348 } 349 350 m = m_get(pData); 351 if (m == NULL) { 352 /* error = ENOBUFS; */ 353 error = 1; 354 goto out; 355 } 356 m->m_data += if_maxlinkhdr; 357 m->m_len = hdrlen; 358 359 /* 360 * This will always succeed, since we make sure our mbufs 361 * are big enough to hold one MSS packet + header + ... etc. 362 */ 363 /* if (len <= MHLEN - hdrlen - max_linkhdr) { */ 364 365 sbcopy(&so->so_snd, off, (int) len, mtod(m, caddr_t) + hdrlen); 366 m->m_len += len; 367 368 /* } else { 369 * m->m_next = m_copy(so->so_snd.sb_mb, off, (int) len); 370 * if (m->m_next == 0) 371 * len = 0; 372 * } 373 */ 374 /* 375 * If we're sending everything we've got, set PUSH. 376 * (This will keep happy those implementations which only 377 * give data to the user when a buffer fills or 378 * a PUSH comes in.) 379 */ 380 if (off + len == so->so_snd.sb_cc) 381 flags |= TH_PUSH; 382 } else { 383 if (tp->t_flags & TF_ACKNOW) 384 tcpstat.tcps_sndacks++; 385 else if (flags & (TH_SYN|TH_FIN|TH_RST)) 386 tcpstat.tcps_sndctrl++; 387 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 388 tcpstat.tcps_sndurg++; 389 else 390 tcpstat.tcps_sndwinup++; 391 392 m = m_get(pData); 393 if (m == NULL) { 394 /* error = ENOBUFS; */ 395 error = 1; 396 goto out; 397 } 398 m->m_data += if_maxlinkhdr; 399 m->m_len = hdrlen; 400 } 401 402 ti = mtod(m, struct tcpiphdr *); 403 404 memcpy((caddr_t)ti, &tp->t_template, sizeof (struct tcpiphdr)); 405 406 /* 407 * Fill in fields, remembering maximum advertised 408 * window for use in delaying messages about window sizes. 409 * If resending a FIN, be sure not to use a new sequence number. 410 */ 411 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 412 tp->snd_nxt == tp->snd_max) 413 tp->snd_nxt--; 414 /* 415 * If we are doing retransmissions, then snd_nxt will 416 * not reflect the first unsent octet. For ACK only 417 * packets, we do not want the sequence number of the 418 * retransmitted packet, we want the sequence number 419 * of the next unsent octet. So, if there is no data 420 * (and no SYN or FIN), use snd_max instead of snd_nxt 421 * when filling in ti_seq. But if we are in persist 422 * state, snd_max might reflect one byte beyond the 423 * right edge of the window, so use snd_nxt in that 424 * case, since we know we aren't doing a retransmission. 425 * (retransmit and persist are mutually exclusive...) 426 */ 427 if (len || (flags & (TH_SYN|TH_FIN)) || tp->t_timer[TCPT_PERSIST]) 428 ti->ti_seq = htonl(tp->snd_nxt); 429 else 430 ti->ti_seq = htonl(tp->snd_max); 431 ti->ti_ack = htonl(tp->rcv_nxt); 432 if (optlen) { 433 memcpy((caddr_t)(ti + 1), (caddr_t)opt, optlen); 434 ti->ti_off = (sizeof (struct tcphdr) + optlen) >> 2; 435 } 436 ti->ti_flags = flags; 437 /* 438 * Calculate receive window. Don't shrink window, 439 * but avoid silly window syndrome. 440 */ 441 if (win < (long)(so->so_rcv.sb_datalen / 4) && win < (long)tp->t_maxseg) 442 win = 0; 443 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 444 win = (long)TCP_MAXWIN << tp->rcv_scale; 445 if (win < (long)(tp->rcv_adv - tp->rcv_nxt)) 446 win = (long)(tp->rcv_adv - tp->rcv_nxt); 447 ti->ti_win = htons((u_int16_t) (win>>tp->rcv_scale)); 448 449 if (SEQ_GT(tp->snd_up, tp->snd_una)) { 450 ti->ti_urp = htons((u_int16_t)(tp->snd_up - ntohl(ti->ti_seq))); 451 451 #ifdef notdef 452 453 452 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 453 ti->ti_urp = htons((u_int16_t)(tp->snd_up - tp->snd_nxt)); 454 454 #endif 455 456 457 458 459 460 461 462 463 tp->snd_up = tp->snd_una;/* drag it along */464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 455 ti->ti_flags |= TH_URG; 456 } else 457 /* 458 * If no urgent pointer to send, then we pull 459 * the urgent pointer to the left edge of the send window 460 * so that it doesn't drift into the send window on sequence 461 * number wraparound. 462 */ 463 tp->snd_up = tp->snd_una; /* drag it along */ 464 465 /* 466 * Put TCP length in extended header, and then 467 * checksum extended header and data. 468 */ 469 if (len + optlen) 470 ti->ti_len = htons((u_int16_t)(sizeof (struct tcphdr) + 471 optlen + len)); 472 ti->ti_sum = cksum(m, (int)(hdrlen + len)); 473 474 /* 475 * In transmit state, time the transmission and arrange for 476 * the retransmit. In persist state, just set snd_max. 477 */ 478 if (tp->t_force == 0 || tp->t_timer[TCPT_PERSIST] == 0) { 479 tcp_seq startseq = tp->snd_nxt; 480 481 /* 482 * Advance snd_nxt over sequence space of this segment. 483 */ 484 if (flags & (TH_SYN|TH_FIN)) { 485 if (flags & TH_SYN) 486 tp->snd_nxt++; 487 if (flags & TH_FIN) { 488 tp->snd_nxt++; 489 tp->t_flags |= TF_SENTFIN; 490 } 491 } 492 tp->snd_nxt += len; 493 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 494 tp->snd_max = tp->snd_nxt; 495 /* 496 * Time this transmission if not a retransmission and 497 * not currently timing anything. 498 */ 499 if (tp->t_rtt == 0) { 500 tp->t_rtt = 1; 501 tp->t_rtseq = startseq; 502 tcpstat.tcps_segstimed++; 503 } 504 } 505 506 /* 507 * Set retransmit timer if not currently set, 508 * and not doing an ack or a keep-alive probe. 509 * Initial value for retransmit timer is smoothed 510 * round-trip time + 2 * round-trip time variance. 511 * Initialize shift counter which is used for backoff 512 * of retransmit time. 513 */ 514 if (tp->t_timer[TCPT_REXMT] == 0 && 515 tp->snd_nxt != tp->snd_una) { 516 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; 517 if (tp->t_timer[TCPT_PERSIST]) { 518 tp->t_timer[TCPT_PERSIST] = 0; 519 tp->t_rxtshift = 0; 520 } 521 } 522 } else 523 if (SEQ_GT(tp->snd_nxt + len, tp->snd_max)) 524 tp->snd_max = tp->snd_nxt + len; 525 526 /* 527 * Fill in IP length and desired time to live and 528 * send to IP level. There should be a better way 529 * to handle ttl and tos; we could keep them in 530 * the template, but need a way to checksum without them. 531 */ 532 m->m_len = hdrlen + len; /* XXX Needed? m_len should be correct */ 533 533 534 534 { 535 535 536 537 538 539 536 ((struct ip *)ti)->ip_len = m->m_len; 537 538 ((struct ip *)ti)->ip_ttl = ip_defttl; 539 ((struct ip *)ti)->ip_tos = so->so_iptos; 540 540 541 541 /* #if BSD >= 43 */ 542 543 /* 544 * 545 */ 546 542 /* Don't do IP options... */ 543 /* error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, 544 * so->so_options & SO_DONTROUTE, 0); 545 */ 546 error = ip_output(pData, so, m); 547 547 548 548 /* #else 549 * 550 * 549 * error = ip_output(m, (struct mbuf *)0, &tp->t_inpcb->inp_route, 550 * so->so_options & SO_DONTROUTE); 551 551 * #endif 552 552 */ 553 553 } 554 554 if (error) { 555 555 out: 556 /* 557 * 558 * 559 * 560 */ 561 /* 562 * 563 * 564 * 565 * 566 */ 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 556 /* if (error == ENOBUFS) { 557 * tcp_quench(tp->t_inpcb, 0); 558 * return (0); 559 * } 560 */ 561 /* if ((error == EHOSTUNREACH || error == ENETDOWN) 562 * && TCPS_HAVERCVDSYN(tp->t_state)) { 563 * tp->t_softerror = error; 564 * return (0); 565 * } 566 */ 567 return (error); 568 } 569 tcpstat.tcps_sndtotal++; 570 571 /* 572 * Data sent (as far as we can tell). 573 * If this advertises a larger window than any other segment, 574 * then remember the size of the advertised window. 575 * Any pending ACK has now been sent. 576 */ 577 if (win > 0 && SEQ_GT(tp->rcv_nxt+win, tp->rcv_adv)) 578 tp->rcv_adv = tp->rcv_nxt + win; 579 tp->last_ack_sent = tp->rcv_nxt; 580 tp->t_flags &= ~(TF_ACKNOW|TF_DELACK); 581 if (sendalot) 582 goto again; 583 584 return (0); 585 585 } 586 586 587 587 void 588 588 tcp_setpersist(tp) 589 589 register struct tcpcb *tp; 590 590 { 591 591 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; 592 592 593 /* 594 * 595 */ 596 597 598 599 600 601 602 603 593 /* if (tp->t_timer[TCPT_REXMT]) 594 * panic("tcp_output REXMT"); 595 */ 596 /* 597 * Start/restart persistence timer. 598 */ 599 TCPT_RANGESET(tp->t_timer[TCPT_PERSIST], 600 t * tcp_backoff[tp->t_rxtshift], 601 TCPTV_PERSMIN, TCPTV_PERSMAX); 602 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 603 tp->t_rxtshift++; 604 604 } -
trunk/src/VBox/Devices/Network/slirp/tcp_subr.c
r14331 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)tcp_subr.c8.1 (Berkeley) 6/10/9333 * @(#)tcp_subr.c 8.1 (Berkeley) 6/10/93 34 34 * tcp_subr.c,v 1.5 1994/10/08 22:39:58 phk Exp 35 35 */ … … 53 53 tcp_init(PNATState pData) 54 54 { 55 tcp_iss = 1;/* wrong */56 55 tcp_iss = 1; /* wrong */ 56 tcb.so_next = tcb.so_prev = &tcb; 57 57 tcp_last_so = &tcb; 58 58 #ifdef VBOX_WITH_BSD_TCP_REASS … … 71 71 void 72 72 tcp_template(tp) 73 74 { 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 73 struct tcpcb *tp; 74 { 75 struct socket *so = tp->t_socket; 76 register struct tcpiphdr *n = &tp->t_template; 77 78 n->ti_next = n->ti_prev = 0; 79 n->ti_x1 = 0; 80 n->ti_pr = IPPROTO_TCP; 81 n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip)); 82 n->ti_src = so->so_faddr; 83 n->ti_dst = so->so_laddr; 84 n->ti_sport = so->so_fport; 85 n->ti_dport = so->so_lport; 86 87 n->ti_seq = 0; 88 n->ti_ack = 0; 89 n->ti_x2 = 0; 90 n->ti_off = 5; 91 n->ti_flags = 0; 92 n->ti_win = 0; 93 n->ti_sum = 0; 94 n->ti_urp = 0; 95 95 } 96 96 … … 111 111 tcp_respond(PNATState pData, struct tcpcb *tp, struct tcpiphdr *ti, struct mbuf *m, tcp_seq ack, tcp_seq seq, int flags) 112 112 { 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 113 register int tlen; 114 int win = 0; 115 116 DEBUG_CALL("tcp_respond"); 117 DEBUG_ARG("tp = %lx", (long)tp); 118 DEBUG_ARG("ti = %lx", (long)ti); 119 DEBUG_ARG("m = %lx", (long)m); 120 DEBUG_ARG("ack = %u", ack); 121 DEBUG_ARG("seq = %u", seq); 122 DEBUG_ARG("flags = %x", flags); 123 124 if (tp) 125 win = sbspace(&tp->t_socket->so_rcv); 126 if (m == 0) { 127 if ((m = m_get(pData)) == NULL) 128 return; 129 129 #ifdef TCP_COMPAT_42 130 130 tlen = 1; 131 131 #else 132 132 tlen = 0; 133 133 #endif 134 135 136 137 138 139 140 141 142 143 144 145 146 134 m->m_data += if_maxlinkhdr; 135 *mtod(m, struct tcpiphdr *) = *ti; 136 ti = mtod(m, struct tcpiphdr *); 137 flags = TH_ACK; 138 } else { 139 /* 140 * ti points into m so the next line is just making 141 * the mbuf point to ti 142 */ 143 m->m_data = (caddr_t)ti; 144 145 m->m_len = sizeof (struct tcpiphdr); 146 tlen = 0; 147 147 #define xchg(a,b,type) { type t; t=a; a=b; b=t; } 148 149 148 xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_int32_t); 149 xchg(ti->ti_dport, ti->ti_sport, u_int16_t); 150 150 #undef xchg 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 151 } 152 ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen)); 153 tlen += sizeof (struct tcpiphdr); 154 m->m_len = tlen; 155 156 ti->ti_next = ti->ti_prev = 0; 157 ti->ti_x1 = 0; 158 ti->ti_seq = htonl(seq); 159 ti->ti_ack = htonl(ack); 160 ti->ti_x2 = 0; 161 ti->ti_off = sizeof (struct tcphdr) >> 2; 162 ti->ti_flags = flags; 163 if (tp) 164 ti->ti_win = htons((u_int16_t) (win >> tp->rcv_scale)); 165 else 166 ti->ti_win = htons((u_int16_t)win); 167 ti->ti_urp = 0; 168 ti->ti_sum = 0; 169 ti->ti_sum = cksum(m, tlen); 170 ((struct ip *)ti)->ip_len = tlen; 171 172 if(flags & TH_RST) 173 ((struct ip *)ti)->ip_ttl = MAXTTL; 174 else 175 ((struct ip *)ti)->ip_ttl = ip_defttl; 176 177 (void) ip_output(pData, (struct socket *)0, m); 178 178 } 179 179 … … 186 186 tcp_newtcpcb(PNATState pData, struct socket *so) 187 187 { 188 189 190 191 192 193 194 188 register struct tcpcb *tp; 189 190 tp = (struct tcpcb *)malloc(sizeof(*tp)); 191 if (tp == NULL) 192 return ((struct tcpcb *)0); 193 194 memset((char *) tp, 0, sizeof(struct tcpcb)); 195 195 #ifndef VBOX_WITH_BSD_TCP_REASS 196 196 tp->seg_next = tp->seg_prev = ptr_to_u32(pData, (struct tcpiphdr *)tp); 197 197 #else /* VBOX_WITH_BSD_TCP_REASS */ 198 198 LIST_INSERT_HEAD(&pData->tcpcbhead, tp, t_list); 199 199 #endif /* VBOX_WITH_BSD_TCP_REASS */ 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 200 tp->t_maxseg = tcp_mssdflt; 201 202 tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0; 203 tp->t_socket = so; 204 205 /* 206 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 207 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives 208 * reasonable initial retransmit time. 209 */ 210 tp->t_srtt = TCPTV_SRTTBASE; 211 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << 2; 212 tp->t_rttmin = TCPTV_MIN; 213 214 TCPT_RANGESET(tp->t_rxtcur, 215 ((TCPTV_SRTTBASE >> 2) + (TCPTV_SRTTDFLT << 2)) >> 1, 216 TCPTV_MIN, TCPTV_REXMTMAX); 217 218 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 219 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 220 tp->t_state = TCPS_CLOSED; 221 222 so->so_tcpcb = tp; 223 224 return (tp); 225 225 } 226 226 … … 233 233 { 234 234 /* tcp_drop(tp, errno) 235 236 235 register struct tcpcb *tp; 236 int errno; 237 237 { 238 238 */ 239 239 240 241 242 243 244 245 246 247 248 249 250 /* 251 * 252 */ 253 /* 254 240 DEBUG_CALL("tcp_drop"); 241 DEBUG_ARG("tp = %lx", (long)tp); 242 DEBUG_ARG("errno = %d", errno); 243 244 if (TCPS_HAVERCVDSYN(tp->t_state)) { 245 tp->t_state = TCPS_CLOSED; 246 (void) tcp_output(pData, tp); 247 tcpstat.tcps_drops++; 248 } else 249 tcpstat.tcps_conndrops++; 250 /* if (errno == ETIMEDOUT && tp->t_softerror) 251 * errno = tp->t_softerror; 252 */ 253 /* so->so_error = errno; */ 254 return (tcp_close(pData, tp)); 255 255 } 256 256 257 257 /* 258 258 * Close a TCP control block: 259 * 260 * 261 * 259 * discard all space held by the tcp 260 * discard internet protocol block 261 * wake up any sleepers 262 262 */ 263 263 struct tcpcb * 264 264 tcp_close(PNATState pData, register struct tcpcb *tp) 265 265 { 266 267 268 266 register struct tcpiphdr *t; 267 struct socket *so = tp->t_socket; 268 register struct mbuf *m; 269 269 270 270 #ifndef VBOX_WITH_BSD_TCP_REASS 271 272 273 274 275 276 277 278 279 280 281 282 283 /* 284 * 285 */ 286 /* 287 271 DEBUG_CALL("tcp_close"); 272 DEBUG_ARG("tp = %lx", (long )tp); 273 274 /* free the reassembly queue, if any */ 275 t = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *); 276 while (t != (struct tcpiphdr *)tp) { 277 t = u32_to_ptr(pData, t->ti_next, struct tcpiphdr *); 278 m = REASS_MBUF_GET(u32_to_ptr(pData, t->ti_prev, struct tcpiphdr *)); 279 remque_32(pData, u32_to_ptr(pData, t->ti_prev, struct tcpiphdr *)); 280 m_freem(pData, m); 281 } 282 /* It's static */ 283 /* if (tp->t_template) 284 * (void) m_free(dtom(tp->t_template)); 285 */ 286 /* free(tp, M_PCB); */ 287 u32ptr_done(pData, ptr_to_u32(pData, tp), tp); 288 288 #else /* VBOX_WITH_BSD_TCP_REASS */ 289 289 struct tseg_qent *te; 290 291 290 DEBUG_CALL("tcp_close"); 291 DEBUG_ARG("tp = %lx", (long )tp); 292 292 /*XXX: freeing the reassembly queue */ 293 293 LIST_FOREACH(te, &tp->t_segq, tqe_q) { … … 298 298 } 299 299 #endif /* VBOX_WITH_BSD_TCP_REASS */ 300 301 302 303 304 305 306 307 308 309 310 311 300 free(tp); 301 so->so_tcpcb = 0; 302 soisfdisconnected(so); 303 /* clobber input socket cache if we're closing the cached connection */ 304 if (so == tcp_last_so) 305 tcp_last_so = &tcb; 306 closesocket(so->s); 307 sbfree(&so->so_rcv); 308 sbfree(&so->so_snd); 309 sofree(pData, so); 310 tcpstat.tcps_closed++; 311 return ((struct tcpcb *)0); 312 312 } 313 313 … … 315 315 tcp_drain() 316 316 { 317 317 /* XXX */ 318 318 } 319 319 … … 328 328 tcp_quench(i, errno) 329 329 330 331 { 332 333 334 335 330 int errno; 331 { 332 struct tcpcb *tp = intotcpcb(inp); 333 334 if (tp) 335 tp->snd_cwnd = tp->t_maxseg; 336 336 } 337 337 … … 356 356 { 357 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 /* 380 381 382 383 358 DEBUG_CALL("tcp_sockclosed"); 359 DEBUG_ARG("tp = %lx", (long)tp); 360 361 switch (tp->t_state) { 362 363 case TCPS_CLOSED: 364 case TCPS_LISTEN: 365 case TCPS_SYN_SENT: 366 tp->t_state = TCPS_CLOSED; 367 tp = tcp_close(pData, tp); 368 break; 369 370 case TCPS_SYN_RECEIVED: 371 case TCPS_ESTABLISHED: 372 tp->t_state = TCPS_FIN_WAIT_1; 373 break; 374 375 case TCPS_CLOSE_WAIT: 376 tp->t_state = TCPS_LAST_ACK; 377 break; 378 } 379 /* soisfdisconnecting(tp->t_socket); */ 380 if (tp && tp->t_state >= TCPS_FIN_WAIT_2) 381 soisfdisconnected(tp->t_socket); 382 if (tp) 383 tcp_output(pData, tp); 384 384 } 385 385 … … 417 417 case CTL_DNS: 418 418 if (!get_dns_addr(pData, &dns_addr)) 419 419 addr.sin_addr = dns_addr; 420 420 else 421 421 addr.sin_addr = loopback_addr; 422 422 break; 423 423 case CTL_ALIAS: 424 424 default: 425 426 425 addr.sin_addr = loopback_addr; 426 break; 427 427 } 428 428 } else … … 431 431 432 432 DEBUG_MISC((dfd, " connect()ing, addr.sin_port=%d, " 433 434 433 "addr.sin_addr.s_addr=%.16s\n", 434 ntohs(addr.sin_port), inet_ntoa(addr.sin_addr))); 435 435 /* We don't care what port we get */ 436 436 ret = connect(s,(struct sockaddr *)&addr,sizeof (addr)); … … 461 461 tcp_connect(PNATState pData, struct socket *inso) 462 462 { 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 463 struct socket *so; 464 struct sockaddr_in addr; 465 socklen_t addrlen = sizeof(struct sockaddr_in); 466 struct tcpcb *tp; 467 int s, opt; 468 469 DEBUG_CALL("tcp_connect"); 470 DEBUG_ARG("inso = %lx", (long)inso); 471 472 /* 473 * If it's an SS_ACCEPTONCE socket, no need to socreate() 474 * another socket, just use the accept() socket. 475 */ 476 if (inso->so_state & SS_FACCEPTONCE) { 477 /* FACCEPTONCE already have a tcpcb */ 478 so = inso; 479 } else { 480 if ((so = socreate()) == NULL) { 481 /* If it failed, get rid of the pending connection */ 482 closesocket(accept(inso->s,(struct sockaddr *)&addr,&addrlen)); 483 return; 484 } 485 if (tcp_attach(pData, so) < 0) { 486 free(so); /* NOT sofree */ 487 return; 488 } 489 so->so_laddr = inso->so_laddr; 490 so->so_lport = inso->so_lport; 491 } 492 493 (void) tcp_mss(pData, sototcpcb(so), 0); 494 495 if ((s = accept(inso->s,(struct sockaddr *)&addr,&addrlen)) < 0) { 496 tcp_close(pData, sototcpcb(so)); /* This will sofree() as well */ 497 return; 498 } 499 fd_nonblock(s); 500 opt = 1; 501 setsockopt(s,SOL_SOCKET,SO_REUSEADDR,(char *)&opt,sizeof(int)); 502 opt = 1; 503 setsockopt(s,SOL_SOCKET,SO_OOBINLINE,(char *)&opt,sizeof(int)); 504 504 opt = 1; 505 505 setsockopt(s,IPPROTO_TCP,TCP_NODELAY,(char *)&opt,sizeof(int)); 506 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 /* 528 * 529 * 530 */ 531 532 /* 533 534 535 536 537 538 539 540 507 so->so_fport = addr.sin_port; 508 so->so_faddr = addr.sin_addr; 509 /* Translate connections from localhost to the real hostname */ 510 if (so->so_faddr.s_addr == 0 || so->so_faddr.s_addr == loopback_addr.s_addr) 511 so->so_faddr = alias_addr; 512 513 /* Close the accept() socket, set right state */ 514 if (inso->so_state & SS_FACCEPTONCE) { 515 closesocket(so->s); /* If we only accept once, close the accept() socket */ 516 so->so_state = SS_NOFDREF; /* Don't select it yet, even though we have an FD */ 517 /* if it's not FACCEPTONCE, it's already NOFDREF */ 518 } 519 so->s = s; 520 521 so->so_iptos = tcp_tos(so); 522 tp = sototcpcb(so); 523 524 tcp_template(tp); 525 526 /* Compute window scaling to request. */ 527 /* while (tp->request_r_scale < TCP_MAX_WINSHIFT && 528 * (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.sb_hiwat) 529 * tp->request_r_scale++; 530 */ 531 532 /* soisconnecting(so); */ /* NOFDREF used instead */ 533 tcpstat.tcps_connattempt++; 534 535 tp->t_state = TCPS_SYN_SENT; 536 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; 537 tp->iss = tcp_iss; 538 tcp_iss += TCP_ISSINCR/2; 539 tcp_sendseqinit(tp); 540 tcp_output(pData, tp); 541 541 } 542 542 … … 547 547 tcp_attach(PNATState pData, struct socket *so) 548 548 { 549 550 551 552 553 554 549 if ((so->so_tcpcb = tcp_newtcpcb(pData, so)) == NULL) 550 return -1; 551 552 insque(pData, so, &tcb); 553 554 return 0; 555 555 } 556 556 … … 559 559 */ 560 560 static const struct tos_t tcptos[] = { 561 {0, 20, IPTOS_THROUGHPUT, 0},/* ftp data */562 {21, 21, IPTOS_LOWDELAY, EMU_FTP},/* ftp control */563 {0, 23, IPTOS_LOWDELAY, 0},/* telnet */564 {0, 80, IPTOS_THROUGHPUT, 0},/* WWW */565 {0, 513, IPTOS_LOWDELAY, EMU_RLOGIN|EMU_NOCONNECT},/* rlogin */566 {0, 514, IPTOS_LOWDELAY, EMU_RSH|EMU_NOCONNECT},/* shell */567 {0, 544, IPTOS_LOWDELAY, EMU_KSH},/* kshell */568 {0, 543, IPTOS_LOWDELAY, 0},/* klogin */569 {0, 6667, IPTOS_THROUGHPUT, EMU_IRC},/* IRC */570 {0, 6668, IPTOS_THROUGHPUT, EMU_IRC},/* IRC undernet */571 572 573 561 {0, 20, IPTOS_THROUGHPUT, 0}, /* ftp data */ 562 {21, 21, IPTOS_LOWDELAY, EMU_FTP}, /* ftp control */ 563 {0, 23, IPTOS_LOWDELAY, 0}, /* telnet */ 564 {0, 80, IPTOS_THROUGHPUT, 0}, /* WWW */ 565 {0, 513, IPTOS_LOWDELAY, EMU_RLOGIN|EMU_NOCONNECT}, /* rlogin */ 566 {0, 514, IPTOS_LOWDELAY, EMU_RSH|EMU_NOCONNECT}, /* shell */ 567 {0, 544, IPTOS_LOWDELAY, EMU_KSH}, /* kshell */ 568 {0, 543, IPTOS_LOWDELAY, 0}, /* klogin */ 569 {0, 6667, IPTOS_THROUGHPUT, EMU_IRC}, /* IRC */ 570 {0, 6668, IPTOS_THROUGHPUT, EMU_IRC}, /* IRC undernet */ 571 {0, 7070, IPTOS_LOWDELAY, EMU_REALAUDIO }, /* RealAudio control */ 572 {0, 113, IPTOS_LOWDELAY, EMU_IDENT }, /* identd protocol */ 573 {0, 0, 0, 0} 574 574 }; 575 575 … … 579 579 u_int8_t 580 580 tcp_tos(so) 581 582 { 583 584 585 586 587 588 589 590 591 592 593 594 581 struct socket *so; 582 { 583 int i = 0; 584 585 while(tcptos[i].tos) { 586 if ((tcptos[i].fport && (ntohs(so->so_fport) == tcptos[i].fport)) || 587 (tcptos[i].lport && (ntohs(so->so_lport) == tcptos[i].lport))) { 588 so->so_emu = tcptos[i].emu; 589 return tcptos[i].tos; 590 } 591 i++; 592 } 593 594 return 0; 595 595 } 596 596 … … 622 622 tcp_emu(PNATState pData, struct socket *so, struct mbuf *m) 623 623 { 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 624 u_int n1, n2, n3, n4, n5, n6; 625 char buff[256]; 626 u_int32_t laddr; 627 u_int lport; 628 char *bptr; 629 630 DEBUG_CALL("tcp_emu"); 631 DEBUG_ARG("so = %lx", (long)so); 632 DEBUG_ARG("m = %lx", (long)m); 633 634 switch(so->so_emu) { 635 int x, i; 636 637 case EMU_IDENT: 638 /* 639 * Identification protocol as per rfc-1413 640 */ 641 642 { 643 struct socket *tmpso; 644 struct sockaddr_in addr; 645 socklen_t addrlen = sizeof(struct sockaddr_in); 646 struct sbuf *so_rcv = &so->so_rcv; 647 648 memcpy(so_rcv->sb_wptr, m->m_data, m->m_len); 649 so_rcv->sb_wptr += m->m_len; 650 so_rcv->sb_rptr += m->m_len; 651 m->m_data[m->m_len] = 0; /* NULL terminate */ 652 if (strchr(m->m_data, '\r') || strchr(m->m_data, '\n')) { 653 if (sscanf(so_rcv->sb_data, "%u%*[ ,]%u", &n1, &n2) == 2) { 654 HTONS(n1); 655 HTONS(n2); 656 /* n2 is the one on our host */ 657 for (tmpso = tcb.so_next; tmpso != &tcb; tmpso = tmpso->so_next) { 658 if (tmpso->so_laddr.s_addr == so->so_laddr.s_addr && 659 tmpso->so_lport == n2 && 660 tmpso->so_faddr.s_addr == so->so_faddr.s_addr && 661 tmpso->so_fport == n1) { 662 if (getsockname(tmpso->s, 663 (struct sockaddr *)&addr, &addrlen) == 0) 664 n2 = ntohs(addr.sin_port); 665 break; 666 } 667 } 668 } 669 so_rcv->sb_cc = sprintf(so_rcv->sb_data, "%d,%d\r\n", n1, n2); 670 so_rcv->sb_rptr = so_rcv->sb_data; 671 so_rcv->sb_wptr = so_rcv->sb_data + so_rcv->sb_cc; 672 } 673 m_free(pData, m); 674 return 0; 675 } 676 676 677 677 case EMU_FTP: /* ftp */ 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 678 *(m->m_data+m->m_len) = 0; /* NULL terminate for strstr */ 679 if ((bptr = (char *)strstr(m->m_data, "ORT")) != NULL) { 680 /* 681 * Need to emulate the PORT command 682 */ 683 x = sscanf(bptr, "ORT %u,%u,%u,%u,%u,%u\r\n%256[^\177]", 684 &n1, &n2, &n3, &n4, &n5, &n6, buff); 685 if (x < 6) 686 return 1; 687 688 laddr = htonl((n1 << 24) | (n2 << 16) | (n3 << 8) | (n4)); 689 lport = htons((n5 << 8) | (n6)); 690 691 if ((so = solisten(pData, 0, laddr, lport, SS_FACCEPTONCE)) == NULL) 692 return 1; 693 694 n6 = ntohs(so->so_fport); 695 696 n5 = (n6 >> 8) & 0xff; 697 n6 &= 0xff; 698 699 laddr = ntohl(so->so_faddr.s_addr); 700 701 n1 = ((laddr >> 24) & 0xff); 702 n2 = ((laddr >> 16) & 0xff); 703 n3 = ((laddr >> 8) & 0xff); 704 n4 = (laddr & 0xff); 705 706 m->m_len = bptr - m->m_data; /* Adjust length */ 707 m->m_len += sprintf(bptr,"ORT %d,%d,%d,%d,%d,%d\r\n%s", 708 n1, n2, n3, n4, n5, n6, x==7?buff:""); 709 return 1; 710 } else if ((bptr = (char *)strstr(m->m_data, "27 Entering")) != NULL) { 711 /* 712 * Need to emulate the PASV response 713 */ 714 x = sscanf(bptr, "27 Entering Passive Mode (%u,%u,%u,%u,%u,%u)\r\n%256[^\177]", 715 &n1, &n2, &n3, &n4, &n5, &n6, buff); 716 if (x < 6) 717 return 1; 718 719 laddr = htonl((n1 << 24) | (n2 << 16) | (n3 << 8) | (n4)); 720 lport = htons((n5 << 8) | (n6)); 721 722 if ((so = solisten(pData, 0, laddr, lport, SS_FACCEPTONCE)) == NULL) 723 return 1; 724 725 n6 = ntohs(so->so_fport); 726 727 n5 = (n6 >> 8) & 0xff; 728 n6 &= 0xff; 729 730 laddr = ntohl(so->so_faddr.s_addr); 731 732 n1 = ((laddr >> 24) & 0xff); 733 n2 = ((laddr >> 16) & 0xff); 734 n3 = ((laddr >> 8) & 0xff); 735 n4 = (laddr & 0xff); 736 737 m->m_len = bptr - m->m_data; /* Adjust length */ 738 m->m_len += sprintf(bptr,"27 Entering Passive Mode (%d,%d,%d,%d,%d,%d)\r\n%s", 739 n1, n2, n3, n4, n5, n6, x==7?buff:""); 740 741 return 1; 742 } 743 744 return 1; 745 746 case EMU_KSH: 747 /* 748 * The kshell (Kerberos rsh) and shell services both pass 749 * a local port port number to carry signals to the server 750 * and stderr to the client. It is passed at the beginning 751 * of the connection as a NUL-terminated decimal ASCII string. 752 */ 753 so->so_emu = 0; 754 for (lport = 0, i = 0; i < m->m_len-1; ++i) { 755 if (m->m_data[i] < '0' || m->m_data[i] > '9') 756 return 1; /* invalid number */ 757 lport *= 10; 758 lport += m->m_data[i] - '0'; 759 } 760 if (m->m_data[m->m_len-1] == '\0' && lport != 0 && 761 (so = solisten(pData, 0, so->so_laddr.s_addr, htons(lport), SS_FACCEPTONCE)) != NULL) 762 m->m_len = sprintf(m->m_data, "%d", ntohs(so->so_fport))+1; 763 return 1; 764 765 case EMU_IRC: 766 /* 767 * Need to emulate DCC CHAT, DCC SEND and DCC MOVE 768 */ 769 *(m->m_data+m->m_len) = 0; /* NULL terminate the string for strstr */ 770 if ((bptr = (char *)strstr(m->m_data, "DCC")) == NULL) 771 return 1; 772 773 /* The %256s is for the broken mIRC */ 774 if (sscanf(bptr, "DCC CHAT %256s %u %u", buff, &laddr, &lport) == 3) { 775 if ((so = solisten(pData, 0, htonl(laddr), htons(lport), SS_FACCEPTONCE)) == NULL) 776 return 1; 777 778 m->m_len = bptr - m->m_data; /* Adjust length */ 779 m->m_len += sprintf(bptr, "DCC CHAT chat %lu %u%c\n", 780 (unsigned long)ntohl(so->so_faddr.s_addr), 781 ntohs(so->so_fport), 1); 782 } else if (sscanf(bptr, "DCC SEND %256s %u %u %u", buff, &laddr, &lport, &n1) == 4) { 783 if ((so = solisten(pData, 0, htonl(laddr), htons(lport), SS_FACCEPTONCE)) == NULL) 784 return 1; 785 786 m->m_len = bptr - m->m_data; /* Adjust length */ 787 m->m_len += sprintf(bptr, "DCC SEND %s %lu %u %u%c\n", 788 buff, (unsigned long)ntohl(so->so_faddr.s_addr), 789 ntohs(so->so_fport), n1, 1); 790 } else if (sscanf(bptr, "DCC MOVE %256s %u %u %u", buff, &laddr, &lport, &n1) == 4) { 791 if ((so = solisten(pData, 0, htonl(laddr), htons(lport), SS_FACCEPTONCE)) == NULL) 792 return 1; 793 794 m->m_len = bptr - m->m_data; /* Adjust length */ 795 m->m_len += sprintf(bptr, "DCC MOVE %s %lu %u %u%c\n", 796 buff, (unsigned long)ntohl(so->so_faddr.s_addr), 797 ntohs(so->so_fport), n1, 1); 798 } 799 return 1; 800 800 801 801 #ifdef VBOX … … 803 803 * This is not legal when more than one slirp instance is active. */ 804 804 #else /* !VBOX */ 805 805 case EMU_REALAUDIO: 806 806 /* 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 807 * RealAudio emulation - JP. We must try to parse the incoming 808 * data and try to find the two characters that contain the 809 * port number. Then we redirect an udp port and replace the 810 * number with the real port we got. 811 * 812 * The 1.0 beta versions of the player are not supported 813 * any more. 814 * 815 * A typical packet for player version 1.0 (release version): 816 * 817 * 0000:50 4E 41 00 05 818 * 0000:00 01 00 02 1B D7 00 00 67 E6 6C DC 63 00 12 50 .....×..gælÜc..P 819 * 0010:4E 43 4C 49 45 4E 54 20 31 30 31 20 41 4C 50 48 NCLIENT 101 ALPH 820 * 0020:41 6C 00 00 52 00 17 72 61 66 69 6C 65 73 2F 76 Al..R..rafiles/v 821 * 0030:6F 61 2F 65 6E 67 6C 69 73 68 5F 2E 72 61 79 42 oa/english_.rayB 822 * 823 * Now the port number 0x1BD7 is found at offset 0x04 of the 824 * Now the port number 0x1BD7 is found at offset 0x04 of the 825 * second packet. This time we received five bytes first and 826 * then the rest. You never know how many bytes you get. 827 * 828 * A typical packet for player version 2.0 (beta): 829 * 830 * 0000:50 4E 41 00 06 00 02 00 00 00 01 00 02 1B C1 00 PNA...........Á. 831 * 0010:00 67 75 78 F5 63 00 0A 57 69 6E 32 2E 30 2E 30 .guxõc..Win2.0.0 832 * 0020:2E 35 6C 00 00 52 00 1C 72 61 66 69 6C 65 73 2F .5l..R..rafiles/ 833 * 0030:77 65 62 73 69 74 65 2F 32 30 72 65 6C 65 61 73 website/20releas 834 * 0040:65 2E 72 61 79 53 00 00 06 36 42 e.rayS...6B 835 * 836 * Port number 0x1BC1 is found at offset 0x0d. 837 * 838 * This is just a horrible switch statement. Variable ra tells 839 * us where we're going. 840 */ 841 842 bptr = m->m_data; 843 while (bptr < m->m_data + m->m_len) { 844 u_short p; 845 static int ra = 0; 846 char ra_tbl[4]; 847 848 ra_tbl[0] = 0x50; 849 ra_tbl[1] = 0x4e; 850 ra_tbl[2] = 0x41; 851 ra_tbl[3] = 0; 852 853 switch (ra) { 854 case 0: 855 case 2: 856 case 3: 857 if (*bptr++ != ra_tbl[ra]) { 858 ra = 0; 859 continue; 860 } 861 break; 862 863 case 1: 864 /* 865 * We may get 0x50 several times, ignore them 866 */ 867 if (*bptr == 0x50) { 868 ra = 1; 869 bptr++; 870 continue; 871 } else if (*bptr++ != ra_tbl[ra]) { 872 ra = 0; 873 continue; 874 } 875 break; 876 877 case 4: 878 /* 879 * skip version number 880 */ 881 bptr++; 882 break; 883 884 case 5: 885 /* 886 * The difference between versions 1.0 and 887 * 2.0 is here. For future versions of 888 * the player this may need to be modified. 889 */ 890 if (*(bptr + 1) == 0x02) 891 bptr += 8; 892 else 893 bptr += 4; 894 break; 895 896 case 6: 897 /* This is the field containing the port 898 * number that RA-player is listening to. 899 */ 900 lport = (((u_char*)bptr)[0] << 8) 901 + ((u_char *)bptr)[1]; 902 if (lport < 6970) 903 lport += 256; /* don't know why */ 904 if (lport < 6970 || lport > 7170) 905 return 1; /* failed */ 906 907 /* try to get udp port between 6970 - 7170 */ 908 for (p = 6970; p < 7071; p++) { 909 if (udp_listen( htons(p), 910 so->so_laddr.s_addr, 911 htons(lport), 912 SS_FACCEPTONCE)) { 913 break; 914 } 915 } 916 if (p == 7071) 917 p = 0; 918 *(u_char *)bptr++ = (p >> 8) & 0xff; 919 *(u_char *)bptr++ = p & 0xff; 920 ra = 0; 921 return 1; /* port redirected, we're done */ 922 break; 923 924 default: 925 ra = 0; 926 } 927 ra++; 928 } 929 return 1; 930 930 #endif /* !VBOX */ 931 931 932 933 934 935 936 932 default: 933 /* Ooops, not emulated, won't call tcp_emu again */ 934 so->so_emu = 0; 935 return 1; 936 } 937 937 } 938 938 -
trunk/src/VBox/Devices/Network/slirp/tcp_timer.c
r13984 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)tcp_timer.c8.1 (Berkeley) 6/10/9333 * @(#)tcp_timer.c 8.1 (Berkeley) 6/10/93 34 34 * tcp_timer.c,v 1.2 1994/08/02 07:49:10 davidg Exp 35 35 */ … … 44 44 tcp_fasttimo(PNATState pData) 45 45 { 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 46 register struct socket *so; 47 register struct tcpcb *tp; 48 49 DEBUG_CALL("tcp_fasttimo"); 50 51 so = tcb.so_next; 52 if (so) 53 for (; so != &tcb; so = so->so_next) 54 if ((tp = (struct tcpcb *)so->so_tcpcb) && 55 (tp->t_flags & TF_DELACK)) { 56 tp->t_flags &= ~TF_DELACK; 57 tp->t_flags |= TF_ACKNOW; 58 tcpstat.tcps_delack++; 59 (void) tcp_output(pData, tp); 60 } 61 61 } 62 62 … … 69 69 tcp_slowtimo(PNATState pData) 70 70 { 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 71 register struct socket *ip, *ipnxt; 72 register struct tcpcb *tp; 73 register int i; 74 75 DEBUG_CALL("tcp_slowtimo"); 76 77 /* 78 * Search through tcb's and update active timers. 79 */ 80 ip = tcb.so_next; 81 if (ip == 0) 82 return; 83 for (; ip != &tcb; ip = ipnxt) { 84 ipnxt = ip->so_next; 85 tp = sototcpcb(ip); 86 if (tp == 0) 87 continue; 88 for (i = 0; i < TCPT_NTIMERS; i++) { 89 if (tp->t_timer[i] && --tp->t_timer[i] == 0) { 90 tcp_timers(pData, tp,i); 91 if (ipnxt->so_prev != ip) 92 goto tpgone; 93 } 94 } 95 tp->t_idle++; 96 if (tp->t_rtt) 97 tp->t_rtt++; 98 98 tpgone: 99 100 101 tcp_iss += TCP_ISSINCR/PR_SLOWHZ;/* increment iss */99 ; 100 } 101 tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */ 102 102 #ifdef TCP_COMPAT_42 103 104 tcp_iss = 0;/* XXX */103 if ((int)tcp_iss < 0) 104 tcp_iss = 0; /* XXX */ 105 105 #endif 106 tcp_now++;/* for timestamps */106 tcp_now++; /* for timestamps */ 107 107 } 108 108 … … 112 112 void 113 113 tcp_canceltimers(tp) 114 114 struct tcpcb *tp; 115 115 { 116 117 118 119 116 register int i; 117 118 for (i = 0; i < TCPT_NTIMERS; i++) 119 tp->t_timer[i] = 0; 120 120 } 121 121 122 const int 122 const int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 123 123 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; 124 124 … … 129 129 tcp_timers(PNATState pData, register struct tcpcb *tp, int timer) 130 130 { 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 /* 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 /* 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 131 register int rexmt; 132 133 DEBUG_CALL("tcp_timers"); 134 135 switch (timer) { 136 137 /* 138 * 2 MSL timeout in shutdown went off. If we're closed but 139 * still waiting for peer to close and connection has been idle 140 * too long, or if 2MSL time is up from TIME_WAIT, delete connection 141 * control block. Otherwise, check again in a bit. 142 */ 143 case TCPT_2MSL: 144 if (tp->t_state != TCPS_TIME_WAIT && 145 tp->t_idle <= tcp_maxidle) 146 tp->t_timer[TCPT_2MSL] = tcp_keepintvl; 147 else 148 tp = tcp_close(pData, tp); 149 break; 150 151 /* 152 * Retransmission timer went off. Message has not 153 * been acked within retransmit interval. Back off 154 * to a longer retransmit interval and retransmit one segment. 155 */ 156 case TCPT_REXMT: 157 158 /* 159 * XXXXX If a packet has timed out, then remove all the queued 160 * packets for that session. 161 */ 162 163 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { 164 /* 165 * This is a hack to suit our terminal server here at the uni of canberra 166 * since they have trouble with zeroes... It usually lets them through 167 * unharmed, but under some conditions, it'll eat the zeros. If we 168 * keep retransmitting it, it'll keep eating the zeroes, so we keep 169 * retransmitting, and eventually the connection dies... 170 * (this only happens on incoming data) 171 * 172 * So, if we were gonna drop the connection from too many retransmits, 173 * don't... instead halve the t_maxseg, which might break up the NULLs and 174 * let them through 175 * 176 * *sigh* 177 */ 178 179 tp->t_maxseg >>= 1; 180 if (tp->t_maxseg < 32) { 181 /* 182 * We tried our best, now the connection must die! 183 */ 184 tp->t_rxtshift = TCP_MAXRXTSHIFT; 185 tcpstat.tcps_timeoutdrop++; 186 tp = tcp_drop(pData, tp, tp->t_softerror); 187 /* tp->t_softerror : ETIMEDOUT); */ /* XXX */ 188 return (tp); /* XXX */ 189 } 190 191 /* 192 * Set rxtshift to 6, which is still at the maximum 193 * backoff time 194 */ 195 tp->t_rxtshift = 6; 196 } 197 tcpstat.tcps_rexmttimeo++; 198 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 199 TCPT_RANGESET(tp->t_rxtcur, rexmt, 200 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */ 201 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; 202 /* 203 * If losing, let the lower level know and try for 204 * a better route. Also, if we backed off this far, 205 * our srtt estimate is probably bogus. Clobber it 206 * so we'll take the next rtt measurement as our srtt; 207 * move the current srtt into rttvar to keep the current 208 * retransmit times until then. 209 */ 210 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 211 /* in_losing(tp->t_inpcb); */ 212 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); 213 tp->t_srtt = 0; 214 } 215 tp->snd_nxt = tp->snd_una; 216 /* 217 * If timing a segment in this window, stop the timer. 218 */ 219 tp->t_rtt = 0; 220 /* 221 * Close the congestion window down to one segment 222 * (we'll open it by one segment for each ack we get). 223 * Since we probably have a window's worth of unacked 224 * data accumulated, this "slow start" keeps us from 225 * dumping all that data as back-to-back packets (which 226 * might overwhelm an intermediate gateway). 227 * 228 * There are two phases to the opening: Initially we 229 * open by one mss on each ack. This makes the window 230 * size increase exponentially with time. If the 231 * window is larger than the path can handle, this 232 * exponential growth results in dropped packet(s) 233 * almost immediately. To get more time between 234 * drops but still "push" the network to take advantage 235 * of improving conditions, we switch from exponential 236 * to linear window opening at some threshold size. 237 * For a threshold, we use half the current window 238 * size, truncated to a multiple of the mss. 239 * 240 * (the minimum cwnd that will give us exponential 241 * growth is 2 mss. We don't allow the threshold 242 * to go below this.) 243 */ 244 { 245 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; 246 if (win < 2) 247 win = 2; 248 tp->snd_cwnd = tp->t_maxseg; 249 tp->snd_ssthresh = win * tp->t_maxseg; 250 tp->t_dupacks = 0; 251 } 252 (void) tcp_output(pData, tp); 253 break; 254 255 /* 256 * Persistence timer into zero window. 257 * Force a byte to be output, if possible. 258 */ 259 case TCPT_PERSIST: 260 tcpstat.tcps_persisttimeo++; 261 tcp_setpersist(tp); 262 tp->t_force = 1; 263 (void) tcp_output(pData, tp); 264 tp->t_force = 0; 265 break; 266 267 /* 268 * Keep-alive timer went off; send something 269 * or drop connection if idle for too long. 270 */ 271 case TCPT_KEEP: 272 tcpstat.tcps_keeptimeo++; 273 if (tp->t_state < TCPS_ESTABLISHED) 274 goto dropit; 275 276 /* if (tp->t_socket->so_options & SO_KEEPALIVE && */ 277 if ((so_options) && tp->t_state <= TCPS_CLOSE_WAIT) { 278 if (tp->t_idle >= tcp_keepidle + tcp_maxidle) 279 goto dropit; 280 /* 281 * Send a packet designed to force a response 282 * if the peer is up and reachable: 283 * either an ACK if the connection is still alive, 284 * or an RST if the peer has closed the connection 285 * due to timeout or reboot. 286 * Using sequence number tp->snd_una-1 287 * causes the transmitted zero-length segment 288 * to lie outside the receive window; 289 * by the protocol spec, this requires the 290 * correspondent TCP to respond. 291 */ 292 tcpstat.tcps_keepprobe++; 293 293 #ifdef TCP_COMPAT_42 294 295 296 297 298 299 294 /* 295 * The keepalive packet must have nonzero length 296 * to get a 4.2 host to respond. 297 */ 298 tcp_respond(tp, &tp->t_template, (struct mbuf *)NULL, 299 tp->rcv_nxt - 1, tp->snd_una - 1, 0); 300 300 #else 301 302 301 tcp_respond(pData, tp, &tp->t_template, (struct mbuf *)NULL, 302 tp->rcv_nxt, tp->snd_una - 1, 0); 303 303 #endif 304 305 306 307 308 309 310 311 312 313 314 315 304 tp->t_timer[TCPT_KEEP] = tcp_keepintvl; 305 } else 306 tp->t_timer[TCPT_KEEP] = tcp_keepidle; 307 break; 308 309 dropit: 310 tcpstat.tcps_keepdrops++; 311 tp = tcp_drop(pData, tp, 0); /* ETIMEDOUT); */ 312 break; 313 } 314 315 return (tp); 316 316 } -
trunk/src/VBox/Devices/Network/slirp/tcp_timer.h
r1076 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)tcp_timer.h8.1 (Berkeley) 6/10/9333 * @(#)tcp_timer.h 8.1 (Berkeley) 6/10/93 34 34 * tcp_timer.h,v 1.4 1994/08/21 05:27:38 paul Exp 35 35 */ … … 42 42 * down PR_SLOWHZ times a second. 43 43 */ 44 #define TCPT_NTIMERS444 #define TCPT_NTIMERS 4 45 45 46 #define TCPT_REXMT 0/* retransmit */47 #define TCPT_PERSIST 1/* retransmit persistence */48 #define TCPT_KEEP 2/* keep alive */49 #define TCPT_2MSL 3/* 2*msl quiet time timer */46 #define TCPT_REXMT 0 /* retransmit */ 47 #define TCPT_PERSIST 1 /* retransmit persistence */ 48 #define TCPT_KEEP 2 /* keep alive */ 49 #define TCPT_2MSL 3 /* 2*msl quiet time timer */ 50 50 51 51 /* … … 78 78 * (and keepalives have been enabled on the socket), we begin to probe 79 79 * the connection. We force the peer to send us a segment by sending: 80 * 80 * <SEQ=SND.UNA-1><ACK=RCV.NXT><CTL=ACK> 81 81 * This segment is (deliberately) outside the window, and should elicit 82 82 * an ack segment in response from the peer. If, despite the TCPT_KEEP … … 90 90 #define TCPTV_MSL ( 5*PR_SLOWHZ) /* max seg lifetime (hah!) */ 91 91 92 #define TCPTV_SRTTBASE 0/* base roundtrip time;93 94 #define TCPTV_SRTTDFLT ( 3*PR_SLOWHZ)/* assumed RTT if no info */92 #define TCPTV_SRTTBASE 0 /* base roundtrip time; 93 if 0, no idea yet */ 94 #define TCPTV_SRTTDFLT ( 3*PR_SLOWHZ) /* assumed RTT if no info */ 95 95 96 #define TCPTV_PERSMIN ( 5*PR_SLOWHZ)/* retransmit persistence */97 #define TCPTV_PERSMAX ( 60*PR_SLOWHZ)/* maximum persist interval */96 #define TCPTV_PERSMIN ( 5*PR_SLOWHZ) /* retransmit persistence */ 97 #define TCPTV_PERSMAX ( 60*PR_SLOWHZ) /* maximum persist interval */ 98 98 99 #define TCPTV_KEEP_INIT ( 75*PR_SLOWHZ)/* initial connect keep alive */100 #define TCPTV_KEEP_IDLE (120*60*PR_SLOWHZ)/* dflt time before probing */101 #define TCPTV_KEEPINTVL ( 75*PR_SLOWHZ)/* default probe interval */102 #define TCPTV_KEEPCNT 8/* max probes before drop */99 #define TCPTV_KEEP_INIT ( 75*PR_SLOWHZ) /* initial connect keep alive */ 100 #define TCPTV_KEEP_IDLE (120*60*PR_SLOWHZ) /* dflt time before probing */ 101 #define TCPTV_KEEPINTVL ( 75*PR_SLOWHZ) /* default probe interval */ 102 #define TCPTV_KEEPCNT 8 /* max probes before drop */ 103 103 104 #define TCPTV_MIN ( 1*PR_SLOWHZ)/* minimum allowable value */105 /* #define TCPTV_REXMTMAX ( 64*PR_SLOWHZ) *//* max allowable REXMT value */106 #define TCPTV_REXMTMAX ( 12*PR_SLOWHZ) 104 #define TCPTV_MIN ( 1*PR_SLOWHZ) /* minimum allowable value */ 105 /* #define TCPTV_REXMTMAX ( 64*PR_SLOWHZ) */ /* max allowable REXMT value */ 106 #define TCPTV_REXMTMAX ( 12*PR_SLOWHZ) /* max allowable REXMT value */ 107 107 108 #define TCP_LINGERTIME 120/* linger at most 2 minutes */108 #define TCP_LINGERTIME 120 /* linger at most 2 minutes */ 109 109 110 110 #define TCP_MAXRXTSHIFT 12 /* maximum retransmits */ 111 111 112 112 113 #ifdef 113 #ifdef TCPTIMERS 114 114 char *tcptimers[] = 115 115 { "REXMT", "PERSIST", "KEEP", "2MSL" }; … … 119 119 * Force a time value to be in a certain range. 120 120 */ 121 #define 122 123 124 125 126 121 #define TCPT_RANGESET(tv, value, tvmin, tvmax) { \ 122 (tv) = (value); \ 123 if ((tv) < (tvmin)) \ 124 (tv) = (tvmin); \ 125 else if ((tv) > (tvmax)) \ 126 (tv) = (tvmax); \ 127 127 } 128 128 -
trunk/src/VBox/Devices/Network/slirp/tcp_var.h
r14407 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1993, 1994 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)tcp_var.h8.3 (Berkeley) 4/10/9433 * @(#)tcp_var.h 8.3 (Berkeley) 4/10/94 34 34 * tcp_var.h,v 1.3 1994/08/21 05:27:39 paul Exp 35 35 */ … … 70 70 /* TCP segment queue entry */ 71 71 struct tseg_qent { 72 73 int tqe_len;/* TCP segment data length */74 struct tcphdr *tqe_th;/* a pointer to tcp header */75 struct mbuf *tqe_m;/* mbuf contains packet */72 LIST_ENTRY(tseg_qent) tqe_q; 73 int tqe_len; /* TCP segment data length */ 74 struct tcphdr *tqe_th; /* a pointer to tcp header */ 75 struct mbuf *tqe_m; /* mbuf contains packet */ 76 76 }; 77 77 LIST_HEAD(tsegqe_head, tseg_qent); … … 83 83 struct tcpcb { 84 84 #ifndef VBOX_WITH_BSD_TCP_REASS 85 tcpiphdrp_32 seg_next;/* sequencing queue */86 85 tcpiphdrp_32 seg_next; /* sequencing queue */ 86 tcpiphdrp_32 seg_prev; 87 87 #else /* VBOX_WITH_BSD_TCP_REASS */ 88 89 struct tsegqe_head t_segq;/* segment reassembly queue */90 int t_segqlen;/* segment reassembly queue length */88 LIST_ENTRY(tcpcb) t_list; 89 struct tsegqe_head t_segq; /* segment reassembly queue */ 90 int t_segqlen; /* segment reassembly queue length */ 91 91 #endif /* VBOX_WITH_BSD_TCP_REASS */ 92 short t_state;/* state of this connection */93 short t_timer[TCPT_NTIMERS];/* tcp timers */94 short t_rxtshift;/* log(2) of rexmt exp. backoff */95 short t_rxtcur;/* current retransmit value */96 short t_dupacks;/* consecutive dup acks recd */97 u_short t_maxseg;/* maximum segment size */98 char t_force;/* 1 if forcing out a byte */99 u_shortt_flags;100 #define TF_ACKNOW 0x0001/* ack peer immediately */101 #define TF_DELACK 0x0002/* ack, but try to delay it */102 #define TF_NODELAY 0x0004/* don't delay packets to coalesce */103 #define TF_NOOPT 0x0008/* don't use tcp options */104 #define TF_SENTFIN 0x0010/* have sent FIN */105 #define TF_REQ_SCALE 0x0020/* have/will request window scaling */106 #define TF_RCVD_SCALE 0x0040/* other side has requested scaling */107 #define TF_REQ_TSTMP 0x0080/* have/will request timestamps */108 #define TF_RCVD_TSTMP 0x0100/* a timestamp was received in SYN */109 #define TF_SACK_PERMIT 0x0200/* other side said I could SACK */110 111 112 /* struct tcpiphdr *t_template;/ * skeletal packet for transmit */113 structtcpiphdr t_template;114 115 struct socket *t_socket;/* back pointer to socket */92 short t_state; /* state of this connection */ 93 short t_timer[TCPT_NTIMERS]; /* tcp timers */ 94 short t_rxtshift; /* log(2) of rexmt exp. backoff */ 95 short t_rxtcur; /* current retransmit value */ 96 short t_dupacks; /* consecutive dup acks recd */ 97 u_short t_maxseg; /* maximum segment size */ 98 char t_force; /* 1 if forcing out a byte */ 99 u_short t_flags; 100 #define TF_ACKNOW 0x0001 /* ack peer immediately */ 101 #define TF_DELACK 0x0002 /* ack, but try to delay it */ 102 #define TF_NODELAY 0x0004 /* don't delay packets to coalesce */ 103 #define TF_NOOPT 0x0008 /* don't use tcp options */ 104 #define TF_SENTFIN 0x0010 /* have sent FIN */ 105 #define TF_REQ_SCALE 0x0020 /* have/will request window scaling */ 106 #define TF_RCVD_SCALE 0x0040 /* other side has requested scaling */ 107 #define TF_REQ_TSTMP 0x0080 /* have/will request timestamps */ 108 #define TF_RCVD_TSTMP 0x0100 /* a timestamp was received in SYN */ 109 #define TF_SACK_PERMIT 0x0200 /* other side said I could SACK */ 110 111 /* Make it static for now */ 112 /* struct tcpiphdr *t_template; / * skeletal packet for transmit */ 113 struct tcpiphdr t_template; 114 115 struct socket *t_socket; /* back pointer to socket */ 116 116 /* 117 117 * The following fields are used as in the protocol specification. … … 119 119 */ 120 120 /* send sequence variables */ 121 tcp_seq snd_una;/* send unacknowledged */122 tcp_seq snd_nxt;/* send next */123 tcp_seq snd_up;/* send urgent pointer */124 tcp_seq snd_wl1;/* window update seg seq number */125 tcp_seq snd_wl2;/* window update seg ack number */126 tcp_seq iss;/* initial send sequence number */127 u_int32_t snd_wnd;/* send window */121 tcp_seq snd_una; /* send unacknowledged */ 122 tcp_seq snd_nxt; /* send next */ 123 tcp_seq snd_up; /* send urgent pointer */ 124 tcp_seq snd_wl1; /* window update seg seq number */ 125 tcp_seq snd_wl2; /* window update seg ack number */ 126 tcp_seq iss; /* initial send sequence number */ 127 u_int32_t snd_wnd; /* send window */ 128 128 /* receive sequence variables */ 129 u_int32_t rcv_wnd;/* receive window */130 tcp_seq rcv_nxt;/* receive next */131 tcp_seq rcv_up;/* receive urgent pointer */132 tcp_seq irs;/* initial receive sequence number */129 u_int32_t rcv_wnd; /* receive window */ 130 tcp_seq rcv_nxt; /* receive next */ 131 tcp_seq rcv_up; /* receive urgent pointer */ 132 tcp_seq irs; /* initial receive sequence number */ 133 133 /* 134 134 * Additional variables for this implementation. 135 135 */ 136 136 /* receive variables */ 137 tcp_seq rcv_adv;/* advertised window */137 tcp_seq rcv_adv; /* advertised window */ 138 138 /* retransmit variables */ 139 tcp_seq snd_max;/* highest sequence number sent;140 141 139 tcp_seq snd_max; /* highest sequence number sent; 140 * used to recognize retransmits 141 */ 142 142 /* congestion control (for slow start, source quench, retransmit after loss) */ 143 u_int32_t snd_cwnd;/* congestion-controlled window */144 u_int32_t snd_ssthresh;/* snd_cwnd size threshold for145 146 147 143 u_int32_t snd_cwnd; /* congestion-controlled window */ 144 u_int32_t snd_ssthresh; /* snd_cwnd size threshold for 145 * for slow start exponential to 146 * linear switch 147 */ 148 148 /* 149 149 * transmit timing stuff. See below for scale of srtt and rttvar. 150 150 * "Variance" is actually smoothed difference. 151 151 */ 152 short t_idle;/* inactivity time */153 short t_rtt;/* round trip time */154 tcp_seq t_rtseq;/* sequence number being timed */155 short t_srtt;/* smoothed round-trip time */156 short t_rttvar;/* variance in round-trip time */157 u_short t_rttmin;/* minimum rtt allowed */158 u_int32_t max_sndwnd;/* largest window peer has offered */152 short t_idle; /* inactivity time */ 153 short t_rtt; /* round trip time */ 154 tcp_seq t_rtseq; /* sequence number being timed */ 155 short t_srtt; /* smoothed round-trip time */ 156 short t_rttvar; /* variance in round-trip time */ 157 u_short t_rttmin; /* minimum rtt allowed */ 158 u_int32_t max_sndwnd; /* largest window peer has offered */ 159 159 160 160 /* out-of-band data */ 161 char t_oobflags;/* have some */162 char t_iobc;/* input character */163 #define TCPOOB_HAVEDATA0x01164 #define TCPOOB_HADDATA0x02165 short t_softerror;/* possible error not yet reported */161 char t_oobflags; /* have some */ 162 char t_iobc; /* input character */ 163 #define TCPOOB_HAVEDATA 0x01 164 #define TCPOOB_HADDATA 0x02 165 short t_softerror; /* possible error not yet reported */ 166 166 167 167 /* RFC 1323 variables */ 168 u_char snd_scale;/* window scaling for send window */169 u_char rcv_scale;/* window scaling for recv window */170 u_char request_r_scale;/* pending window scaling */171 u_charrequested_s_scale;172 u_int32_t ts_recent;/* timestamp echo data */173 u_int32_t ts_recent_age;/* when last updated */174 tcp_seqlast_ack_sent;168 u_char snd_scale; /* window scaling for send window */ 169 u_char rcv_scale; /* window scaling for recv window */ 170 u_char request_r_scale; /* pending window scaling */ 171 u_char requested_s_scale; 172 u_int32_t ts_recent; /* timestamp echo data */ 173 u_int32_t ts_recent_age; /* when last updated */ 174 tcp_seq last_ack_sent; 175 175 176 176 }; … … 180 180 #endif /*VBOX_WITH_BSD_TCP_REASS*/ 181 181 182 #define sototcpcb(so)((so)->so_tcpcb)182 #define sototcpcb(so) ((so)->so_tcpcb) 183 183 184 184 /* … … 191 191 * binary point, and is smoothed with an ALPHA of 0.75. 192 192 */ 193 #define TCP_RTT_SCALE 8/* multiplier for srtt; 3 bits frac. */194 #define TCP_RTT_SHIFT 3/* shift for srtt; 3 bits frac. */195 #define TCP_RTTVAR_SCALE 4/* multiplier for rttvar; 2 bits */196 #define TCP_RTTVAR_SHIFT 2/* multiplier for rttvar; 2 bits */193 #define TCP_RTT_SCALE 8 /* multiplier for srtt; 3 bits frac. */ 194 #define TCP_RTT_SHIFT 3 /* shift for srtt; 3 bits frac. */ 195 #define TCP_RTTVAR_SCALE 4 /* multiplier for rttvar; 2 bits */ 196 #define TCP_RTTVAR_SHIFT 2 /* multiplier for rttvar; 2 bits */ 197 197 198 198 /* … … 209 209 * is the same as the multiplier for rttvar. 210 210 */ 211 #define 212 211 #define TCP_REXMTVAL(tp) \ 212 (((tp)->t_srtt >> TCP_RTT_SHIFT) + (tp)->t_rttvar) 213 213 214 214 /* XXX … … 239 239 */ 240 240 struct tcpstat_t { 241 u_long tcps_connattempt;/* connections initiated */242 u_long tcps_accepts;/* connections accepted */243 u_long tcps_connects;/* connections established */244 u_long tcps_drops;/* connections dropped */245 u_long tcps_conndrops;/* embryonic connections dropped */246 u_long tcps_closed;/* conn. closed (includes drops) */247 u_long tcps_segstimed;/* segs where we tried to get rtt */248 u_long tcps_rttupdated;/* times we succeeded */249 u_long tcps_delack;/* delayed acks sent */250 u_long tcps_timeoutdrop;/* conn. dropped in rxmt timeout */251 u_long tcps_rexmttimeo;/* retransmit timeouts */252 u_long tcps_persisttimeo;/* persist timeouts */253 u_long tcps_keeptimeo;/* keepalive timeouts */254 u_long tcps_keepprobe;/* keepalive probes sent */255 u_long tcps_keepdrops;/* connections dropped in keepalive */256 257 u_long tcps_sndtotal;/* total packets sent */258 u_long tcps_sndpack;/* data packets sent */259 u_long tcps_sndbyte;/* data bytes sent */260 u_long tcps_sndrexmitpack;/* data packets retransmitted */261 u_long tcps_sndrexmitbyte;/* data bytes retransmitted */262 u_long tcps_sndacks;/* ack-only packets sent */263 u_long tcps_sndprobe;/* window probes sent */264 u_long tcps_sndurg;/* packets sent with URG only */265 u_long tcps_sndwinup;/* window update-only packets sent */266 u_long tcps_sndctrl;/* control (SYN|FIN|RST) packets sent */267 268 u_long tcps_rcvtotal;/* total packets received */269 u_long tcps_rcvpack;/* packets received in sequence */270 u_long tcps_rcvbyte;/* bytes received in sequence */271 u_long tcps_rcvbadsum;/* packets received with ccksum errs */272 u_long tcps_rcvbadoff;/* packets received with bad offset */273 /* u_long tcps_rcvshort; *//* packets received too short */274 u_long tcps_rcvduppack;/* duplicate-only packets received */275 u_long tcps_rcvdupbyte;/* duplicate-only bytes received */276 u_long tcps_rcvpartduppack;/* packets with some duplicate data */277 u_long tcps_rcvpartdupbyte;/* dup. bytes in part-dup. packets */278 u_long tcps_rcvoopack;/* out-of-order packets received */279 u_long tcps_rcvoobyte;/* out-of-order bytes received */280 u_long tcps_rcvpackafterwin;/* packets with data after window */281 u_long tcps_rcvbyteafterwin;/* bytes rcvd after window */282 u_long tcps_rcvafterclose;/* packets rcvd after "close" */283 u_long tcps_rcvwinprobe;/* rcvd window probe packets */284 u_long tcps_rcvdupack;/* rcvd duplicate acks */285 u_long tcps_rcvacktoomuch;/* rcvd acks for unsent data */286 u_long tcps_rcvackpack;/* rcvd ack packets */287 u_long tcps_rcvackbyte;/* bytes acked by rcvd acks */288 u_long tcps_rcvwinupd;/* rcvd window update packets */289 /* u_long tcps_pawsdrop; *//* segments dropped due to PAWS */290 u_long tcps_predack;/* times hdr predict ok for acks */291 u_long tcps_preddat;/* times hdr predict ok for data pkts */292 u_long tcps_socachemiss;/* tcp_last_so misses */293 u_long tcps_didnuttin;/* Times tcp_output didn't do anything XXX */241 u_long tcps_connattempt; /* connections initiated */ 242 u_long tcps_accepts; /* connections accepted */ 243 u_long tcps_connects; /* connections established */ 244 u_long tcps_drops; /* connections dropped */ 245 u_long tcps_conndrops; /* embryonic connections dropped */ 246 u_long tcps_closed; /* conn. closed (includes drops) */ 247 u_long tcps_segstimed; /* segs where we tried to get rtt */ 248 u_long tcps_rttupdated; /* times we succeeded */ 249 u_long tcps_delack; /* delayed acks sent */ 250 u_long tcps_timeoutdrop; /* conn. dropped in rxmt timeout */ 251 u_long tcps_rexmttimeo; /* retransmit timeouts */ 252 u_long tcps_persisttimeo; /* persist timeouts */ 253 u_long tcps_keeptimeo; /* keepalive timeouts */ 254 u_long tcps_keepprobe; /* keepalive probes sent */ 255 u_long tcps_keepdrops; /* connections dropped in keepalive */ 256 257 u_long tcps_sndtotal; /* total packets sent */ 258 u_long tcps_sndpack; /* data packets sent */ 259 u_long tcps_sndbyte; /* data bytes sent */ 260 u_long tcps_sndrexmitpack; /* data packets retransmitted */ 261 u_long tcps_sndrexmitbyte; /* data bytes retransmitted */ 262 u_long tcps_sndacks; /* ack-only packets sent */ 263 u_long tcps_sndprobe; /* window probes sent */ 264 u_long tcps_sndurg; /* packets sent with URG only */ 265 u_long tcps_sndwinup; /* window update-only packets sent */ 266 u_long tcps_sndctrl; /* control (SYN|FIN|RST) packets sent */ 267 268 u_long tcps_rcvtotal; /* total packets received */ 269 u_long tcps_rcvpack; /* packets received in sequence */ 270 u_long tcps_rcvbyte; /* bytes received in sequence */ 271 u_long tcps_rcvbadsum; /* packets received with ccksum errs */ 272 u_long tcps_rcvbadoff; /* packets received with bad offset */ 273 /* u_long tcps_rcvshort; */ /* packets received too short */ 274 u_long tcps_rcvduppack; /* duplicate-only packets received */ 275 u_long tcps_rcvdupbyte; /* duplicate-only bytes received */ 276 u_long tcps_rcvpartduppack; /* packets with some duplicate data */ 277 u_long tcps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */ 278 u_long tcps_rcvoopack; /* out-of-order packets received */ 279 u_long tcps_rcvoobyte; /* out-of-order bytes received */ 280 u_long tcps_rcvpackafterwin; /* packets with data after window */ 281 u_long tcps_rcvbyteafterwin; /* bytes rcvd after window */ 282 u_long tcps_rcvafterclose; /* packets rcvd after "close" */ 283 u_long tcps_rcvwinprobe; /* rcvd window probe packets */ 284 u_long tcps_rcvdupack; /* rcvd duplicate acks */ 285 u_long tcps_rcvacktoomuch; /* rcvd acks for unsent data */ 286 u_long tcps_rcvackpack; /* rcvd ack packets */ 287 u_long tcps_rcvackbyte; /* bytes acked by rcvd acks */ 288 u_long tcps_rcvwinupd; /* rcvd window update packets */ 289 /* u_long tcps_pawsdrop; */ /* segments dropped due to PAWS */ 290 u_long tcps_predack; /* times hdr predict ok for acks */ 291 u_long tcps_preddat; /* times hdr predict ok for data pkts */ 292 u_long tcps_socachemiss; /* tcp_last_so misses */ 293 u_long tcps_didnuttin; /* Times tcp_output didn't do anything XXX */ 294 294 #ifdef VBOX_WITH_BSD_TCP_REASS 295 295 u_long tcps_rcvmemdrop; -
trunk/src/VBox/Devices/Network/slirp/tftp.c
r5401 r14470 75 75 if (spt->in_use) { 76 76 if (!memcmp(&spt->client_ip, &tp->ip.ip_src, sizeof(spt->client_ip))) { 77 78 79 77 if (spt->client_port == tp->udp.uh_sport) { 78 return k; 79 } 80 80 } 81 81 } … … 86 86 87 87 static int tftp_read_data(PNATState pData, struct tftp_session *spt, u_int16_t block_nr, 88 88 u_int8_t *buf, int len) 89 89 { 90 90 int fd; … … 94 94 95 95 n = RTStrPrintf(buffer, sizeof(buffer), "%s/%s", 96 96 tftp_prefix, spt->filename); 97 97 if (n >= sizeof(buffer)) 98 98 return -1; … … 128 128 129 129 if (!m) 130 130 return -1; 131 131 132 132 memset(m->m_data, 0, m->m_size); … … 157 157 static int tftp_send_error(PNATState pData, 158 158 struct tftp_session *spt, 159 160 159 u_int16_t errorcode, const char *msg, 160 struct tftp_t *recv_tp) 161 161 { 162 162 struct sockaddr_in saddr, daddr; … … 201 201 static int tftp_send_data(PNATState pData, 202 202 struct tftp_session *spt, 203 204 203 u_int16_t block_nr, 204 struct tftp_t *recv_tp) 205 205 { 206 206 struct sockaddr_in saddr, daddr; … … 347 347 348 348 if (k >= n) { 349 350 349 tftp_send_error(pData, spt, 2, "Access violation", tp); 350 return; 351 351 } 352 352 … … 355 355 356 356 if (strcmp(key, "tsize") == 0) { 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 357 int tsize = atoi(value); 358 struct stat stat_p; 359 360 if (tsize == 0 && tftp_prefix) { 361 char buffer[1024]; 362 int len; 363 364 len = RTStrPrintf(buffer, sizeof(buffer), "%s/%s", 365 tftp_prefix, spt->filename); 366 367 if (stat(buffer, &stat_p) == 0) 368 tsize = stat_p.st_size; 369 else { 370 tftp_send_error(pData, spt, 1, "File not found", tp); 371 return; 372 } 373 } 374 375 tftp_send_oack(pData, spt, "tsize", tsize, tp); 376 376 } 377 377 } … … 391 391 392 392 if (tftp_send_data(pData, &tftp_sessions[s], 393 394 393 ntohs(tp->x.tp_data.tp_block_nr) + 1, 394 tp) < 0) { 395 395 return; 396 396 } -
trunk/src/VBox/Devices/Network/slirp/tftp.h
r1076 r14470 3 3 #define TFTP_SESSIONS_MAX 3 4 4 5 #define TFTP_SERVER 5 #define TFTP_SERVER 69 6 6 7 7 #define TFTP_RRQ 1 -
trunk/src/VBox/Devices/Network/slirp/udp.c
r14390 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)udp_usrreq.c8.4 (Berkeley) 1/21/9433 * @(#)udp_usrreq.c 8.4 (Berkeley) 1/21/94 34 34 * udp_usrreq.c,v 1.4 1994/10/02 17:48:45 phk Exp 35 35 */ … … 57 57 { 58 58 udp_last_so = &udb; 59 59 udb.so_next = udb.so_prev = &udb; 60 60 } 61 61 … … 67 67 udp_input(PNATState pData, register struct mbuf *m, int iphlen) 68 68 { 69 70 71 /* 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 69 register struct ip *ip; 70 register struct udphdr *uh; 71 /* struct mbuf *opts = 0;*/ 72 int len; 73 struct ip save_ip; 74 struct socket *so; 75 76 DEBUG_CALL("udp_input"); 77 DEBUG_ARG("m = %lx", (long)m); 78 DEBUG_ARG("iphlen = %d", iphlen); 79 80 udpstat.udps_ipackets++; 81 82 /* 83 * Strip IP options, if any; should skip this, 84 * make available to user, and use on returned packets, 85 * but we don't yet have a way to check the checksum 86 * with options still present. 87 */ 88 if(iphlen > sizeof(struct ip)) { 89 ip_stripoptions(m, (struct mbuf *)0); 90 iphlen = sizeof(struct ip); 91 } 92 93 /* 94 * Get IP and UDP header together in first mbuf. 95 */ 96 ip = mtod(m, struct ip *); 97 uh = (struct udphdr *)((caddr_t)ip + iphlen); 98 99 /* 100 * Make mbuf data length reflect UDP length. 101 * If not enough data to reflect UDP length, drop. 102 */ 103 len = ntohs((u_int16_t)uh->uh_ulen); 104 105 if (ip->ip_len != len) { 106 if (len > ip->ip_len) { 107 udpstat.udps_badlen++; 108 goto bad; 109 } 110 m_adj(m, len - ip->ip_len); 111 ip->ip_len = len; 112 } 113 114 /* 115 * Save a copy of the IP header in case we want restore it 116 * for sending an ICMP error message in response. 117 */ 118 save_ip = *ip; 119 save_ip.ip_len+= iphlen; /* tcp_input subtracts this */ 120 121 /* 122 * Checksum extended UDP header and data. 123 */ 124 if (udpcksum && uh->uh_sum) { 125 ((struct ipovly *)ip)->ih_next = 0; 126 ((struct ipovly *)ip)->ih_prev = 0; 127 ((struct ipovly *)ip)->ih_x1 = 0; 128 ((struct ipovly *)ip)->ih_len = uh->uh_ulen; 129 /* keep uh_sum for ICMP reply 130 * uh->uh_sum = cksum(m, len + sizeof (struct ip)); 131 * if (uh->uh_sum) { 132 */ 133 if(cksum(m, len + sizeof(struct ip))) { 134 udpstat.udps_badsum++; 135 goto bad; 136 } 137 } 138 138 139 139 /* … … 153 153 } 154 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 155 /* 156 * Locate pcb for datagram. 157 */ 158 so = udp_last_so; 159 if (so->so_lport != uh->uh_sport || 160 so->so_laddr.s_addr != ip->ip_src.s_addr) { 161 struct socket *tmp; 162 163 for (tmp = udb.so_next; tmp != &udb; tmp = tmp->so_next) { 164 if (tmp->so_lport == uh->uh_sport && 165 tmp->so_laddr.s_addr == ip->ip_src.s_addr) { 166 so = tmp; 167 break; 168 } 169 } 170 if (tmp == &udb) { 171 so = NULL; 172 } else { 173 udpstat.udpps_pcbcachemiss++; 174 udp_last_so = so; 175 } 176 } 177 178 if (so == NULL) { 179 /* 180 * If there's no socket for this packet, 181 * create one 182 */ 183 if ((so = socreate()) == NULL) goto bad; 184 if(udp_attach(pData, so) == -1) { 185 DEBUG_MISC((dfd," udp_attach errno = %d-%s\n", 186 errno,strerror(errno))); 187 187 sofree(pData, so); 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 188 goto bad; 189 } 190 191 /* 192 * Setup fields 193 */ 194 /* udp_last_so = so; */ 195 so->so_laddr = ip->ip_src; 196 so->so_lport = uh->uh_sport; 197 198 if ((so->so_iptos = udp_tos(so)) == 0) 199 so->so_iptos = ip->ip_tos; 200 201 /* 202 * XXXXX Here, check if it's in udpexec_list, 203 * and if it is, do the fork_exec() etc. 204 */ 205 } 206 206 207 207 so->so_faddr = ip->ip_dst; /* XXX */ 208 208 so->so_fport = uh->uh_dport; /* XXX */ 209 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 210 iphlen += sizeof(struct udphdr); 211 m->m_len -= iphlen; 212 m->m_data += iphlen; 213 214 /* 215 * Now we sendto() the packet. 216 */ 217 if (so->so_emu) 218 udp_emu(pData, so, m); 219 220 if(sosendto(pData, so,m) == -1) { 221 m->m_len += iphlen; 222 m->m_data -= iphlen; 223 *ip=save_ip; 224 DEBUG_MISC((dfd,"udp tx errno = %d-%s\n",errno,strerror(errno))); 225 icmp_error(pData, m, ICMP_UNREACH,ICMP_UNREACH_NET, 0,strerror(errno)); 226 } 227 228 m_free(pData, so->so_m); /* used for ICMP if error on sorecvfrom */ 229 230 /* restore the orig mbuf packet */ 231 m->m_len += iphlen; 232 m->m_data -= iphlen; 233 *ip=save_ip; 234 so->so_m=m; /* ICMP backup */ 235 236 return; 237 237 bad: 238 239 240 238 m_freem(pData, m); 239 /* if (opts) m_freem(opts); */ 240 return; 241 241 } 242 242 … … 245 245 int iptos) 246 246 { 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 247 register struct udpiphdr *ui; 248 int error = 0; 249 250 DEBUG_CALL("udp_output"); 251 DEBUG_ARG("so = %lx", (long)so); 252 DEBUG_ARG("m = %lx", (long)m); 253 DEBUG_ARG("saddr = %lx", (long)saddr->sin_addr.s_addr); 254 DEBUG_ARG("daddr = %lx", (long)daddr->sin_addr.s_addr); 255 256 /* 257 * Adjust for header 258 */ 259 m->m_data -= sizeof(struct udpiphdr); 260 m->m_len += sizeof(struct udpiphdr); 261 262 /* 263 * Fill in mbuf with extended UDP header 264 * and addresses and length put into network format. 265 */ 266 ui = mtod(m, struct udpiphdr *); 267 ui->ui_next = ui->ui_prev = 0; 268 ui->ui_x1 = 0; 269 ui->ui_pr = IPPROTO_UDP; 270 ui->ui_len = htons(m->m_len - sizeof(struct ip)); /* + sizeof (struct udphdr)); */ 271 /* XXXXX Check for from-one-location sockets, or from-any-location sockets */ 272 272 ui->ui_src = saddr->sin_addr; 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 273 ui->ui_dst = daddr->sin_addr; 274 ui->ui_sport = saddr->sin_port; 275 ui->ui_dport = daddr->sin_port; 276 ui->ui_ulen = ui->ui_len; 277 278 /* 279 * Stuff checksum and output datagram. 280 */ 281 ui->ui_sum = 0; 282 if (udpcksum) { 283 if ((ui->ui_sum = cksum(m, /* sizeof (struct udpiphdr) + */ m->m_len)) == 0) 284 ui->ui_sum = 0xffff; 285 } 286 ((struct ip *)ui)->ip_len = m->m_len; 287 288 ((struct ip *)ui)->ip_ttl = ip_defttl; 289 ((struct ip *)ui)->ip_tos = iptos; 290 291 udpstat.udps_opackets++; 292 293 error = ip_output(pData, so, m); 294 295 return (error); 296 296 } 297 297 … … 357 357 udp_detach(PNATState pData, struct socket *so) 358 358 { 359 360 361 362 363 364 359 /* Correctly update list if detaching last socket in list. */ 360 if (so == udp_last_so) udp_last_so = &udb; 361 closesocket(so->s); 362 /* if (so->so_m) m_free(so->so_m); done by sofree */ 363 364 sofree(pData, so); 365 365 } 366 366 367 367 static const struct tos_t udptos[] = { 368 {0, 53, IPTOS_LOWDELAY, 0},/* DNS */369 {517, 517, IPTOS_LOWDELAY, EMU_TALK},/* talk */370 {518, 518, IPTOS_LOWDELAY, EMU_NTALK},/* ntalk */371 {0, 7648, IPTOS_LOWDELAY, EMU_CUSEEME},/* Cu-Seeme */372 368 {0, 53, IPTOS_LOWDELAY, 0}, /* DNS */ 369 {517, 517, IPTOS_LOWDELAY, EMU_TALK}, /* talk */ 370 {518, 518, IPTOS_LOWDELAY, EMU_NTALK}, /* ntalk */ 371 {0, 7648, IPTOS_LOWDELAY, EMU_CUSEEME}, /* Cu-Seeme */ 372 {0, 0, 0, 0} 373 373 }; 374 374 375 375 u_int8_t 376 376 udp_tos(so) 377 378 { 379 380 381 382 383 384 385 386 387 388 389 390 377 struct socket *so; 378 { 379 int i = 0; 380 381 while(udptos[i].tos) { 382 if ((udptos[i].fport && ntohs(so->so_fport) == udptos[i].fport) || 383 (udptos[i].lport && ntohs(so->so_lport) == udptos[i].lport)) { 384 so->so_emu = udptos[i].emu; 385 return udptos[i].tos; 386 } 387 i++; 388 } 389 390 return 0; 391 391 } 392 392 … … 401 401 udp_emu(PNATState pData, struct socket *so, struct mbuf *m) 402 402 { 403 403 struct sockaddr_in addr; 404 404 socklen_t addrlen = sizeof(addr); 405 405 #ifdef EMULATE_TALK 406 407 408 409 406 CTL_MSG_OLD *omsg; 407 CTL_MSG *nmsg; 408 char buff[sizeof(CTL_MSG)]; 409 u_char type; 410 410 411 411 struct talk_request { 412 413 414 412 struct talk_request *next; 413 struct socket *udp_so; 414 struct socket *tcp_so; 415 415 } *req; 416 416 417 417 static struct talk_request *req_tbl = 0; 418 418 419 419 #endif 420 420 421 421 struct cu_header { 422 uint16_t d_family;/* destination family */423 uint16_t d_port;/* destination port */424 uint32_t d_addr;/* destination address */425 uint16_t s_family;/* source family */426 uint16_t s_port;/* source port */427 uint32_t so_addr;/* source address */428 uint32_t seqn;/* sequence number */429 uint16_t message;/* message */430 uint16_t data_type;/* data type */431 uint16_t pkt_len;/* packet length */422 uint16_t d_family; /* destination family */ 423 uint16_t d_port; /* destination port */ 424 uint32_t d_addr; /* destination address */ 425 uint16_t s_family; /* source family */ 426 uint16_t s_port; /* source port */ 427 uint32_t so_addr; /* source address */ 428 uint32_t seqn; /* sequence number */ 429 uint16_t message; /* message */ 430 uint16_t data_type; /* data type */ 431 uint16_t pkt_len; /* packet length */ 432 432 } *cu_head; 433 433 434 434 switch(so->so_emu) { 435 435 436 436 #ifdef EMULATE_TALK 437 438 439 440 441 442 443 444 445 446 447 448 #define IS_OLD(so->so_emu == EMU_TALK)437 case EMU_TALK: 438 case EMU_NTALK: 439 /* 440 * Talk emulation. We always change the ctl_addr to get 441 * some answers from the daemon. When an ANNOUNCE comes, 442 * we send LEAVE_INVITE to the local daemons. Also when a 443 * DELETE comes, we send copies to the local daemons. 444 */ 445 if (getsockname(so->s, (struct sockaddr *)&addr, &addrlen) < 0) 446 return; 447 448 #define IS_OLD (so->so_emu == EMU_TALK) 449 449 450 450 #define COPY_MSG(dest, src) { dest->type = src->type; \ 451 452 453 454 455 456 457 451 dest->id_num = src->id_num; \ 452 dest->pid = src->pid; \ 453 dest->addr = src->addr; \ 454 dest->ctl_addr = src->ctl_addr; \ 455 memcpy(&dest->l_name, &src->l_name, NAME_SIZE_OLD); \ 456 memcpy(&dest->r_name, &src->r_name, NAME_SIZE_OLD); \ 457 memcpy(&dest->r_tty, &src->r_tty, TTY_SIZE); } 458 458 459 459 #define OTOSIN(ptr, field) ((struct sockaddr_in *)&ptr->field) … … 461 461 462 462 463 if (IS_OLD) {/* old talk */464 465 466 467 468 469 470 } else {/* new talk */471 472 473 474 475 476 477 478 479 480 return;/* for LOOK_UP this is enough */481 482 if (IS_OLD) {/* make a copy of the message */483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 break;/* found it */509 510 if (!req) {/* no entry for so, create new */511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 463 if (IS_OLD) { /* old talk */ 464 omsg = mtod(m, CTL_MSG_OLD*); 465 nmsg = (CTL_MSG *) buff; 466 type = omsg->type; 467 OTOSIN(omsg, ctl_addr)->sin_port = addr.sin_port; 468 OTOSIN(omsg, ctl_addr)->sin_addr = our_addr; 469 strncpy(omsg->l_name, getlogin(), NAME_SIZE_OLD); 470 } else { /* new talk */ 471 omsg = (CTL_MSG_OLD *) buff; 472 nmsg = mtod(m, CTL_MSG *); 473 type = nmsg->type; 474 OTOSIN(nmsg, ctl_addr)->sin_port = addr.sin_port; 475 OTOSIN(nmsg, ctl_addr)->sin_addr = our_addr; 476 strncpy(nmsg->l_name, getlogin(), NAME_SIZE_OLD); 477 } 478 479 if (type == LOOK_UP) 480 return; /* for LOOK_UP this is enough */ 481 482 if (IS_OLD) { /* make a copy of the message */ 483 COPY_MSG(nmsg, omsg); 484 nmsg->vers = 1; 485 nmsg->answer = 0; 486 } else 487 COPY_MSG(omsg, nmsg); 488 489 /* 490 * If if is an ANNOUNCE message, we go through the 491 * request table to see if a tcp port has already 492 * been redirected for this socket. If not, we solisten() 493 * a new socket and add this entry to the table. 494 * The port number of the tcp socket and our IP 495 * are put to the addr field of the message structures. 496 * Then a LEAVE_INVITE is sent to both local daemon 497 * ports, 517 and 518. This is why we have two copies 498 * of the message, one in old talk and one in new talk 499 * format. 500 */ 501 502 if (type == ANNOUNCE) { 503 int s; 504 u_short temp_port; 505 506 for(req = req_tbl; req; req = req->next) 507 if (so == req->udp_so) 508 break; /* found it */ 509 510 if (!req) { /* no entry for so, create new */ 511 req = (struct talk_request *) 512 malloc(sizeof(struct talk_request)); 513 req->udp_so = so; 514 req->tcp_so = solisten(0, 515 OTOSIN(omsg, addr)->sin_addr.s_addr, 516 OTOSIN(omsg, addr)->sin_port, 517 SS_FACCEPTONCE); 518 req->next = req_tbl; 519 req_tbl = req; 520 } 521 522 /* replace port number in addr field */ 523 addrlen = sizeof(addr); 524 getsockname(req->tcp_so->s, 525 (struct sockaddr *) &addr, 526 &addrlen); 527 OTOSIN(omsg, addr)->sin_port = addr.sin_port; 528 OTOSIN(omsg, addr)->sin_addr = our_addr; 529 OTOSIN(nmsg, addr)->sin_port = addr.sin_port; 530 OTOSIN(nmsg, addr)->sin_addr = our_addr; 531 532 /* send LEAVE_INVITEs */ 533 temp_port = OTOSIN(omsg, ctl_addr)->sin_port; 534 OTOSIN(omsg, ctl_addr)->sin_port = 0; 535 OTOSIN(nmsg, ctl_addr)->sin_port = 0; 536 omsg->type = nmsg->type = LEAVE_INVITE; 537 538 s = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); 539 addr.sin_addr = our_addr; 540 addr.sin_family = AF_INET; 541 addr.sin_port = htons(517); 542 sendto(s, (char *)omsg, sizeof(*omsg), 0, 543 (struct sockaddr *)&addr, sizeof(addr)); 544 addr.sin_port = htons(518); 545 sendto(s, (char *)nmsg, sizeof(*nmsg), 0, 546 (struct sockaddr *) &addr, sizeof(addr)); 547 closesocket(s) ; 548 549 omsg->type = nmsg->type = ANNOUNCE; 550 OTOSIN(omsg, ctl_addr)->sin_port = temp_port; 551 OTOSIN(nmsg, ctl_addr)->sin_port = temp_port; 552 } 553 554 /* 555 * If it is a DELETE message, we send a copy to the 556 * local daemons. Then we delete the entry corresponding 557 * to our socket from the request table. 558 */ 559 560 if (type == DELETE) { 561 struct talk_request *temp_req, *req_next; 562 int s; 563 u_short temp_port; 564 565 temp_port = OTOSIN(omsg, ctl_addr)->sin_port; 566 OTOSIN(omsg, ctl_addr)->sin_port = 0; 567 OTOSIN(nmsg, ctl_addr)->sin_port = 0; 568 569 s = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); 570 addr.sin_addr = our_addr; 571 addr.sin_family = AF_INET; 572 addr.sin_port = htons(517); 573 sendto(s, (char *)omsg, sizeof(*omsg), 0, 574 (struct sockaddr *)&addr, sizeof(addr)); 575 addr.sin_port = htons(518); 576 sendto(s, (char *)nmsg, sizeof(*nmsg), 0, 577 (struct sockaddr *)&addr, sizeof(addr)); 578 closesocket(s); 579 580 OTOSIN(omsg, ctl_addr)->sin_port = temp_port; 581 OTOSIN(nmsg, ctl_addr)->sin_port = temp_port; 582 583 /* delete table entry */ 584 if (so == req_tbl->udp_so) { 585 temp_req = req_tbl; 586 req_tbl = req_tbl->next; 587 free(temp_req); 588 } else { 589 temp_req = req_tbl; 590 for(req = req_tbl->next; req; req = req_next) { 591 req_next = req->next; 592 if (so == req->udp_so) { 593 temp_req->next = req_next; 594 free(req); 595 break; 596 } else { 597 temp_req = req; 598 } 599 } 600 } 601 } 602 603 return; 604 604 #endif 605 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 606 case EMU_CUSEEME: 607 608 /* 609 * Cu-SeeMe emulation. 610 * Hopefully the packet is more that 16 bytes long. We don't 611 * do any other tests, just replace the address and port 612 * fields. 613 */ 614 if (m->m_len >= sizeof (*cu_head)) { 615 if (getsockname(so->s, (struct sockaddr *)&addr, &addrlen) < 0) 616 return; 617 cu_head = mtod(m, struct cu_header *); 618 cu_head->s_port = addr.sin_port; 619 cu_head->so_addr = our_addr.s_addr; 620 } 621 622 return; 623 } 624 624 } 625 625 … … 627 627 udp_listen(PNATState pData, u_int port, u_int32_t laddr, u_int lport, int flags) 628 628 { 629 630 631 629 struct sockaddr_in addr; 630 struct socket *so; 631 socklen_t addrlen = sizeof(struct sockaddr_in); 632 632 int opt = 1; 633 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 /* 652 653 654 634 if ((so = socreate()) == NULL) { 635 free(so); 636 return NULL; 637 } 638 so->s = socket(AF_INET,SOCK_DGRAM,0); 639 so->so_expire = curtime + SO_EXPIRE; 640 insque(pData, so,&udb); 641 642 addr.sin_family = AF_INET; 643 addr.sin_addr.s_addr = INADDR_ANY; 644 addr.sin_port = port; 645 646 if (bind(so->s,(struct sockaddr *)&addr, addrlen) < 0) { 647 udp_detach(pData, so); 648 return NULL; 649 } 650 setsockopt(so->s,SOL_SOCKET,SO_REUSEADDR,(char *)&opt,sizeof(int)); 651 /* setsockopt(so->s,SOL_SOCKET,SO_OOBINLINE,(char *)&opt,sizeof(int)); */ 652 653 getsockname(so->s,(struct sockaddr *)&addr,&addrlen); 654 so->so_fport = addr.sin_port; 655 655 /* The original check was completely broken, as the commented out 656 656 * if statement was always true (INADDR_ANY=0). */ 657 657 /* if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr) */ 658 658 if (1 == 0) /* always use the else part */ 659 660 661 662 663 664 665 666 667 668 669 670 671 } 659 so->so_faddr = alias_addr; 660 else 661 so->so_faddr = addr.sin_addr; 662 663 so->so_lport = lport; 664 so->so_laddr.s_addr = laddr; 665 if (flags != SS_FACCEPTONCE) 666 so->so_expire = 0; 667 668 so->so_state = SS_ISFCONNECTED; 669 670 return so; 671 } -
trunk/src/VBox/Devices/Network/slirp/udp.h
r1076 r14470 1 1 /* 2 2 * Copyright (c) 1982, 1986, 1993 3 * 3 * The Regents of the University of California. All rights reserved. 4 4 * 5 5 * Redistribution and use in source and binary forms, with or without … … 13 13 * 3. All advertising materials mentioning features or use of this software 14 14 * must display the following acknowledgement: 15 * 16 * 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 17 * 4. Neither the name of the University nor the names of its contributors 18 18 * may be used to endorse or promote products derived from this software … … 31 31 * SUCH DAMAGE. 32 32 * 33 * @(#)udp.h8.1 (Berkeley) 6/10/9333 * @(#)udp.h 8.1 (Berkeley) 6/10/93 34 34 * udp.h,v 1.3 1994/08/21 05:27:41 paul Exp 35 35 */ … … 48 48 */ 49 49 struct udphdr { 50 u_int16_t uh_sport;/* source port */51 u_int16_t uh_dport;/* destination port */52 int16_t uh_ulen;/* udp length */53 u_int16_t uh_sum;/* udp checksum */50 u_int16_t uh_sport; /* source port */ 51 u_int16_t uh_dport; /* destination port */ 52 int16_t uh_ulen; /* udp length */ 53 u_int16_t uh_sum; /* udp checksum */ 54 54 }; 55 55 … … 58 58 */ 59 59 struct udpiphdr { 60 61 60 struct ipovly ui_i; /* overlaid ip structure */ 61 struct udphdr ui_u; /* udp header */ 62 62 }; 63 63 #define ui_next ui_i.ih_next … … 74 74 75 75 struct udpstat_t { 76 77 78 79 80 81 82 83 84 85 86 76 /* input statistics: */ 77 u_long udps_ipackets; /* total input packets */ 78 u_long udps_hdrops; /* packet shorter than header */ 79 u_long udps_badsum; /* checksum error */ 80 u_long udps_badlen; /* data length larger than packet */ 81 u_long udps_noport; /* no socket on port */ 82 u_long udps_noportbcast; /* of above, arrived as broadcast */ 83 u_long udps_fullsock; /* not delivered, input socket full */ 84 u_long udpps_pcbcachemiss; /* input packets missing pcb cache */ 85 /* output statistics: */ 86 u_long udps_opackets; /* total output packets */ 87 87 }; 88 88
Note:
See TracChangeset
for help on using the changeset viewer.