VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 35923

Last change on this file since 35923 was 35923, checked in by vboxsync, 14 years ago

NAT: 'icmp_error` frees source mbuf.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 62.2 KB
Line 
1/* $Id: tcp_input.c 35923 2011-02-10 03:48:27Z vboxsync $ */
2/** @file
3 * NAT - TCP input.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
53 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66
67
68#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
69
70/* for modulo comparisons of timestamps */
71#define TSTMP_LT(a, b) ((int)((a)-(b)) < 0)
72#define TSTMP_GEQ(a, b) ((int)((a)-(b)) >= 0)
73
74#ifndef TCP_ACK_HACK
75#define DELAY_ACK(tp, ti) \
76 if (ti->ti_flags & TH_PUSH) \
77 tp->t_flags |= TF_ACKNOW; \
78 else \
79 tp->t_flags |= TF_DELACK;
80#else /* !TCP_ACK_HACK */
81#define DELAY_ACK(tp, ign) \
82 tp->t_flags |= TF_DELACK;
83#endif /* TCP_ACK_HACK */
84
85
86/*
87 * deps: netinet/tcp_reass.c
88 * tcp_reass_maxqlen = 48 (deafault)
89 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
90 */
91int
92tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
93{
94 struct tseg_qent *q;
95 struct tseg_qent *p = NULL;
96 struct tseg_qent *nq;
97 struct tseg_qent *te = NULL;
98 struct socket *so = tp->t_socket;
99 int flags;
100 STAM_PROFILE_START(&pData->StatTCP_reassamble, tcp_reassamble);
101
102 /*
103 * XXX: tcp_reass() is rather inefficient with its data structures
104 * and should be rewritten (see NetBSD for optimizations). While
105 * doing that it should move to its own file tcp_reass.c.
106 */
107
108 /*
109 * Call with th==NULL after become established to
110 * force pre-ESTABLISHED data up to user socket.
111 */
112 if (th == NULL)
113 goto present;
114
115 /*
116 * Limit the number of segments in the reassembly queue to prevent
117 * holding on to too many segments (and thus running out of mbufs).
118 * Make sure to let the missing segment through which caused this
119 * queue. Always keep one global queue entry spare to be able to
120 * process the missing segment.
121 */
122 if ( th->th_seq != tp->rcv_nxt
123 && ( tcp_reass_qsize + 1 >= tcp_reass_maxseg
124 || tp->t_segqlen >= tcp_reass_maxqlen))
125 {
126 tcp_reass_overflows++;
127 tcpstat.tcps_rcvmemdrop++;
128 m_freem(pData, m);
129 *tlenp = 0;
130 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
131 return (0);
132 }
133
134 /*
135 * Allocate a new queue entry. If we can't, or hit the zone limit
136 * just drop the pkt.
137 */
138 te = RTMemAlloc(sizeof(struct tseg_qent));
139 if (te == NULL)
140 {
141 tcpstat.tcps_rcvmemdrop++;
142 m_freem(pData, m);
143 *tlenp = 0;
144 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
145 return (0);
146 }
147 tp->t_segqlen++;
148 tcp_reass_qsize++;
149
150 /*
151 * Find a segment which begins after this one does.
152 */
153 LIST_FOREACH(q, &tp->t_segq, tqe_q)
154 {
155 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
156 break;
157 p = q;
158 }
159
160 /*
161 * If there is a preceding segment, it may provide some of
162 * our data already. If so, drop the data from the incoming
163 * segment. If it provides all of our data, drop us.
164 */
165 if (p != NULL)
166 {
167 int i;
168 /* conversion to int (in i) handles seq wraparound */
169 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
170 if (i > 0)
171 {
172 if (i >= *tlenp)
173 {
174 tcpstat.tcps_rcvduppack++;
175 tcpstat.tcps_rcvdupbyte += *tlenp;
176 m_freem(pData, m);
177 RTMemFree(te);
178 tp->t_segqlen--;
179 tcp_reass_qsize--;
180 /*
181 * Try to present any queued data
182 * at the left window edge to the user.
183 * This is needed after the 3-WHS
184 * completes.
185 */
186 goto present; /* ??? */
187 }
188 m_adj(m, i);
189 *tlenp -= i;
190 th->th_seq += i;
191 }
192 }
193 tcpstat.tcps_rcvoopack++;
194 tcpstat.tcps_rcvoobyte += *tlenp;
195
196 /*
197 * While we overlap succeeding segments trim them or,
198 * if they are completely covered, dequeue them.
199 */
200 while (q)
201 {
202 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
203 if (i <= 0)
204 break;
205 if (i < q->tqe_len)
206 {
207 q->tqe_th->th_seq += i;
208 q->tqe_len -= i;
209 m_adj(q->tqe_m, i);
210 break;
211 }
212
213 nq = LIST_NEXT(q, tqe_q);
214 LIST_REMOVE(q, tqe_q);
215 m_freem(pData, q->tqe_m);
216 RTMemFree(q);
217 tp->t_segqlen--;
218 tcp_reass_qsize--;
219 q = nq;
220 }
221
222 /* Insert the new segment queue entry into place. */
223 te->tqe_m = m;
224 te->tqe_th = th;
225 te->tqe_len = *tlenp;
226
227 if (p == NULL)
228 {
229 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
230 }
231 else
232 {
233 LIST_INSERT_AFTER(p, te, tqe_q);
234 }
235
236present:
237 /*
238 * Present data to user, advancing rcv_nxt through
239 * completed sequence space.
240 */
241 if (!TCPS_HAVEESTABLISHED(tp->t_state))
242 {
243 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
244 return (0);
245 }
246 q = LIST_FIRST(&tp->t_segq);
247 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
248 {
249 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
250 return (0);
251 }
252 do
253 {
254 tp->rcv_nxt += q->tqe_len;
255 flags = q->tqe_th->th_flags & TH_FIN;
256 nq = LIST_NEXT(q, tqe_q);
257 LIST_REMOVE(q, tqe_q);
258 /* XXX: This place should be checked for the same code in
259 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
260 */
261 if (so->so_state & SS_FCANTSENDMORE)
262 m_freem(pData, q->tqe_m);
263 else
264 sbappend(pData, so, q->tqe_m);
265 RTMemFree(q);
266 tp->t_segqlen--;
267 tcp_reass_qsize--;
268 q = nq;
269 }
270 while (q && q->tqe_th->th_seq == tp->rcv_nxt);
271
272 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
273 return flags;
274}
275
276/*
277 * TCP input routine, follows pages 65-76 of the
278 * protocol specification dated September, 1981 very closely.
279 */
280void
281tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
282{
283 struct ip save_ip, *ip;
284 register struct tcpiphdr *ti;
285 caddr_t optp = NULL;
286 int optlen = 0;
287 int len, tlen, off;
288 register struct tcpcb *tp = 0;
289 register int tiflags;
290 struct socket *so = 0;
291 int todrop, acked, ourfinisacked, needoutput = 0;
292/* int dropsocket = 0; */
293 int iss = 0;
294 u_long tiwin;
295/* int ts_present = 0; */
296 STAM_PROFILE_START(&pData->StatTCP_input, counter_input);
297
298 LogFlow(("tcp_input: m = %8lx, iphlen = %2d, inso = %lx\n",
299 (long)m, iphlen, (long)inso));
300
301 if (inso != NULL)
302 {
303 QSOCKET_LOCK(tcb);
304 SOCKET_LOCK(inso);
305 QSOCKET_UNLOCK(tcb);
306 }
307 /*
308 * If called with m == 0, then we're continuing the connect
309 */
310 if (m == NULL)
311 {
312 so = inso;
313 Log4(("NAT: tcp_input: %R[natsock]\n", so));
314 /* Re-set a few variables */
315 tp = sototcpcb(so);
316 m = so->so_m;
317
318 so->so_m = 0;
319 ti = so->so_ti;
320
321 /** @todo (vvl) clarify why it might happens */
322 if (ti == NULL)
323 {
324 LogRel(("NAT: ti is null. can't do any reseting connection actions\n"));
325 /* mbuf should be cleared in sofree called from tcp_close */
326 tcp_close(pData, tp);
327 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
328 return;
329 }
330
331 tiwin = ti->ti_win;
332 tiflags = ti->ti_flags;
333
334 goto cont_conn;
335 }
336
337 tcpstat.tcps_rcvtotal++;
338 /*
339 * Get IP and TCP header together in first mbuf.
340 * Note: IP leaves IP header in first mbuf.
341 */
342 ti = mtod(m, struct tcpiphdr *);
343 if (iphlen > sizeof(struct ip))
344 {
345 ip_stripoptions(m, (struct mbuf *)0);
346 iphlen = sizeof(struct ip);
347 }
348 /* XXX Check if too short */
349
350
351 /*
352 * Save a copy of the IP header in case we want restore it
353 * for sending an ICMP error message in response.
354 */
355 ip = mtod(m, struct ip *);
356 /*
357 * (vvl) ip_input substracts IP header length from ip->ip_len value.
358 * here we do the test the same as input method of UDP protocol.
359 */
360 Assert((ip->ip_len + iphlen == m_length(m, NULL)));
361 save_ip = *ip;
362 save_ip.ip_len+= iphlen;
363
364 /*
365 * Checksum extended TCP header and data.
366 */
367 tlen = ((struct ip *)ti)->ip_len;
368 memset(ti->ti_x1, 0, 9);
369 ti->ti_len = RT_H2N_U16((u_int16_t)tlen);
370 len = sizeof(struct ip) + tlen;
371 /* keep checksum for ICMP reply
372 * ti->ti_sum = cksum(m, len);
373 * if (ti->ti_sum) { */
374 if (cksum(m, len))
375 {
376 tcpstat.tcps_rcvbadsum++;
377 goto drop;
378 }
379
380 /*
381 * Check that TCP offset makes sense,
382 * pull out TCP options and adjust length. XXX
383 */
384 off = ti->ti_off << 2;
385 if ( off < sizeof (struct tcphdr)
386 || off > tlen)
387 {
388 tcpstat.tcps_rcvbadoff++;
389 goto drop;
390 }
391 tlen -= off;
392 ti->ti_len = tlen;
393 if (off > sizeof (struct tcphdr))
394 {
395 optlen = off - sizeof (struct tcphdr);
396 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
397
398 /*
399 * Do quick retrieval of timestamp options ("options
400 * prediction?"). If timestamp is the only option and it's
401 * formatted as recommended in RFC 1323 appendix A, we
402 * quickly get the values now and not bother calling
403 * tcp_dooptions(), etc.
404 */
405#if 0
406 if (( optlen == TCPOLEN_TSTAMP_APPA
407 || ( optlen > TCPOLEN_TSTAMP_APPA
408 && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
409 *(u_int32_t *)optp == RT_H2N_U32_C(TCPOPT_TSTAMP_HDR) &&
410 (ti->ti_flags & TH_SYN) == 0)
411 {
412 ts_present = 1;
413 ts_val = RT_N2H_U32(*(u_int32_t *)(optp + 4));
414 ts_ecr = RT_N2H_U32(*(u_int32_t *)(optp + 8));
415 optp = NULL; / * we have parsed the options * /
416 }
417#endif
418 }
419 tiflags = ti->ti_flags;
420
421 /*
422 * Convert TCP protocol specific fields to host format.
423 */
424 NTOHL(ti->ti_seq);
425 NTOHL(ti->ti_ack);
426 NTOHS(ti->ti_win);
427 NTOHS(ti->ti_urp);
428
429 /*
430 * Drop TCP, IP headers and TCP options.
431 */
432 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
433 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
434
435 /*
436 * Locate pcb for segment.
437 */
438findso:
439 if (so != NULL && so != &tcb)
440 SOCKET_UNLOCK(so);
441 QSOCKET_LOCK(tcb);
442 so = tcp_last_so;
443 if ( so->so_fport != ti->ti_dport
444 || so->so_lport != ti->ti_sport
445 || so->so_laddr.s_addr != ti->ti_src.s_addr
446 || so->so_faddr.s_addr != ti->ti_dst.s_addr)
447 {
448#ifdef VBOX_WITH_SLIRP_MT
449 struct socket *sonxt;
450#endif
451 QSOCKET_UNLOCK(tcb);
452 /* @todo fix SOLOOKUP macrodefinition to be usable here */
453#ifndef VBOX_WITH_SLIRP_MT
454 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
455 ti->ti_dst, ti->ti_dport);
456#else
457 so = NULL;
458 QSOCKET_FOREACH(so, sonxt, tcp)
459 /* { */
460 if ( so->so_lport == ti->ti_sport
461 && so->so_laddr.s_addr == ti->ti_src.s_addr
462 && so->so_faddr.s_addr == ti->ti_dst.s_addr
463 && so->so_fport == ti->ti_dport
464 && so->so_deleted != 1)
465 {
466 break; /* so is locked here */
467 }
468 LOOP_LABEL(tcp, so, sonxt);
469 }
470 if (so == &tcb) {
471 so = NULL;
472 }
473#endif
474 if (so)
475 {
476 tcp_last_so = so;
477 }
478 ++tcpstat.tcps_socachemiss;
479 }
480 else
481 {
482 SOCKET_LOCK(so);
483 QSOCKET_UNLOCK(tcb);
484 }
485
486 /*
487 * If the state is CLOSED (i.e., TCB does not exist) then
488 * all data in the incoming segment is discarded.
489 * If the TCB exists but is in CLOSED state, it is embryonic,
490 * but should either do a listen or a connect soon.
491 *
492 * state == CLOSED means we've done socreate() but haven't
493 * attached it to a protocol yet...
494 *
495 * XXX If a TCB does not exist, and the TH_SYN flag is
496 * the only flag set, then create a session, mark it
497 * as if it was LISTENING, and continue...
498 */
499 if (so == 0)
500 {
501 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
502 goto dropwithreset;
503
504 if ((so = socreate()) == NULL)
505 goto dropwithreset;
506 if (tcp_attach(pData, so) < 0)
507 {
508 RTMemFree(so); /* Not sofree (if it failed, it's not insqued) */
509 goto dropwithreset;
510 }
511 SOCKET_LOCK(so);
512#ifndef VBOX_WITH_SLIRP_BSD_SBUF
513 sbreserve(pData, &so->so_snd, tcp_sndspace);
514 sbreserve(pData, &so->so_rcv, tcp_rcvspace);
515#else
516 sbuf_new(&so->so_snd, NULL, tcp_sndspace, SBUF_AUTOEXTEND);
517 sbuf_new(&so->so_rcv, NULL, tcp_rcvspace, SBUF_AUTOEXTEND);
518#endif
519
520/* tcp_last_so = so; */ /* XXX ? */
521/* tp = sototcpcb(so); */
522
523 so->so_laddr = ti->ti_src;
524 so->so_lport = ti->ti_sport;
525 so->so_faddr = ti->ti_dst;
526 so->so_fport = ti->ti_dport;
527
528 so->so_iptos = ((struct ip *)ti)->ip_tos;
529
530 tp = sototcpcb(so);
531 tp->t_state = TCPS_LISTEN;
532 }
533
534 /*
535 * If this is a still-connecting socket, this probably
536 * a retransmit of the SYN. Whether it's a retransmit SYN
537 * or something else, we nuke it.
538 */
539 if (so->so_state & SS_ISFCONNECTING)
540 {
541 goto drop;
542 }
543
544 tp = sototcpcb(so);
545
546 /* XXX Should never fail */
547 if (tp == 0)
548 goto dropwithreset;
549 if (tp->t_state == TCPS_CLOSED)
550 {
551 goto drop;
552 }
553
554 /* Unscale the window into a 32-bit value. */
555/* if ((tiflags & TH_SYN) == 0)
556 * tiwin = ti->ti_win << tp->snd_scale;
557 * else
558 */
559 tiwin = ti->ti_win;
560
561 /*
562 * Segment received on connection.
563 * Reset idle time and keep-alive timer.
564 */
565 tp->t_idle = 0;
566 if (so_options)
567 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
568 else
569 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
570
571 /*
572 * Process options if not in LISTEN state,
573 * else do it below (after getting remote address).
574 */
575 if (optp && tp->t_state != TCPS_LISTEN)
576 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
577/* , */
578/* &ts_present, &ts_val, &ts_ecr); */
579
580 /*
581 * Header prediction: check for the two common cases
582 * of a uni-directional data xfer. If the packet has
583 * no control flags, is in-sequence, the window didn't
584 * change and we're not retransmitting, it's a
585 * candidate. If the length is zero and the ack moved
586 * forward, we're the sender side of the xfer. Just
587 * free the data acked & wake any higher level process
588 * that was blocked waiting for space. If the length
589 * is non-zero and the ack didn't move, we're the
590 * receiver side. If we're getting packets in-order
591 * (the reassembly queue is empty), add the data to
592 * the socket buffer and note that we need a delayed ack.
593 *
594 * XXX Some of these tests are not needed
595 * eg: the tiwin == tp->snd_wnd prevents many more
596 * predictions.. with no *real* advantage..
597 */
598 if ( tp->t_state == TCPS_ESTABLISHED
599 && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK
600/* && (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) */
601 && ti->ti_seq == tp->rcv_nxt
602 && tiwin && tiwin == tp->snd_wnd
603 && tp->snd_nxt == tp->snd_max)
604 {
605 /*
606 * If last ACK falls within this segment's sequence numbers,
607 * record the timestamp.
608 */
609#if 0
610 if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
611 SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len))
612 {
613 tp->ts_recent_age = tcp_now;
614 tp->ts_recent = ts_val;
615 }
616#endif
617
618 if (ti->ti_len == 0)
619 {
620 if ( SEQ_GT(ti->ti_ack, tp->snd_una)
621 && SEQ_LEQ(ti->ti_ack, tp->snd_max)
622 && tp->snd_cwnd >= tp->snd_wnd)
623 {
624 /*
625 * this is a pure ack for outstanding data.
626 */
627 ++tcpstat.tcps_predack;
628#if 0
629 if (ts_present)
630 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
631 else
632#endif
633 if ( tp->t_rtt
634 && SEQ_GT(ti->ti_ack, tp->t_rtseq))
635 tcp_xmit_timer(pData, tp, tp->t_rtt);
636 acked = ti->ti_ack - tp->snd_una;
637 tcpstat.tcps_rcvackpack++;
638 tcpstat.tcps_rcvackbyte += acked;
639#ifndef VBOX_WITH_SLIRP_BSD_SBUF
640 sbdrop(&so->so_snd, acked);
641#else
642 if (sbuf_len(&so->so_snd) < acked)
643 /* drop all what sbuf have */
644 sbuf_setpos(&so->so_snd, 0);
645 else
646 sbuf_setpos(&so->so_snd, sbuf_len(&so->so_snd) - acked);
647#endif
648 tp->snd_una = ti->ti_ack;
649 m_freem(pData, m);
650
651 /*
652 * If all outstanding data are acked, stop
653 * retransmit timer, otherwise restart timer
654 * using current (possibly backed-off) value.
655 * If process is waiting for space,
656 * wakeup/selwakeup/signal. If data
657 * are ready to send, let tcp_output
658 * decide between more output or persist.
659 */
660 if (tp->snd_una == tp->snd_max)
661 tp->t_timer[TCPT_REXMT] = 0;
662 else if (tp->t_timer[TCPT_PERSIST] == 0)
663 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
664
665 /*
666 * There's room in so_snd, sowwakup will read()
667 * from the socket if we can
668 */
669#if 0
670 if (so->so_snd.sb_flags & SB_NOTIFY)
671 sowwakeup(so);
672#endif
673 /*
674 * This is called because sowwakeup might have
675 * put data into so_snd. Since we don't so sowwakeup,
676 * we don't need this.. XXX???
677 */
678 if (SBUF_LEN(&so->so_snd))
679 (void) tcp_output(pData, tp);
680
681 SOCKET_UNLOCK(so);
682 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
683 return;
684 }
685 }
686 else if ( ti->ti_ack == tp->snd_una
687 && LIST_FIRST(&tp->t_segq)
688 && ti->ti_len <= sbspace(&so->so_rcv))
689 {
690 /*
691 * this is a pure, in-sequence data packet
692 * with nothing on the reassembly queue and
693 * we have enough buffer space to take it.
694 */
695 ++tcpstat.tcps_preddat;
696 tp->rcv_nxt += ti->ti_len;
697 tcpstat.tcps_rcvpack++;
698 tcpstat.tcps_rcvbyte += ti->ti_len;
699 /*
700 * Add data to socket buffer.
701 */
702 sbappend(pData, so, m);
703
704 /*
705 * XXX This is called when data arrives. Later, check
706 * if we can actually write() to the socket
707 * XXX Need to check? It's be NON_BLOCKING
708 */
709/* sorwakeup(so); */
710
711 /*
712 * If this is a short packet, then ACK now - with Nagel
713 * congestion avoidance sender won't send more until
714 * he gets an ACK.
715 *
716 * It is better to not delay acks at all to maximize
717 * TCP throughput. See RFC 2581.
718 */
719 tp->t_flags |= TF_ACKNOW;
720 tcp_output(pData, tp);
721 SOCKET_UNLOCK(so);
722 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
723 return;
724 }
725 } /* header prediction */
726 /*
727 * Calculate amount of space in receive window,
728 * and then do TCP input processing.
729 * Receive window is amount of space in rcv queue,
730 * but not less than advertised window.
731 */
732 {
733 int win;
734 win = sbspace(&so->so_rcv);
735 if (win < 0)
736 win = 0;
737 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
738 }
739
740 switch (tp->t_state)
741 {
742 /*
743 * If the state is LISTEN then ignore segment if it contains an RST.
744 * If the segment contains an ACK then it is bad and send a RST.
745 * If it does not contain a SYN then it is not interesting; drop it.
746 * Don't bother responding if the destination was a broadcast.
747 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
748 * tp->iss, and send a segment:
749 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
750 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
751 * Fill in remote peer address fields if not previously specified.
752 * Enter SYN_RECEIVED state, and process any other fields of this
753 * segment in this state.
754 */
755 case TCPS_LISTEN:
756 {
757 if (tiflags & TH_RST)
758 goto drop;
759 if (tiflags & TH_ACK)
760 goto dropwithreset;
761 if ((tiflags & TH_SYN) == 0)
762 goto drop;
763
764 /*
765 * This has way too many gotos...
766 * But a bit of spaghetti code never hurt anybody :)
767 */
768 if ( (tcp_fconnect(pData, so) == -1)
769 && errno != EINPROGRESS
770 && errno != EWOULDBLOCK)
771 {
772 u_char code = ICMP_UNREACH_NET;
773 Log2((" tcp fconnect errno = %d (%s)\n", errno, strerror(errno)));
774 if (errno == ECONNREFUSED)
775 {
776 /* ACK the SYN, send RST to refuse the connection */
777 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
778 TH_RST|TH_ACK);
779 }
780 else
781 {
782 if (errno == EHOSTUNREACH)
783 code = ICMP_UNREACH_HOST;
784 HTONL(ti->ti_seq); /* restore tcp header */
785 HTONL(ti->ti_ack);
786 HTONS(ti->ti_win);
787 HTONS(ti->ti_urp);
788 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
789 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
790 *ip = save_ip;
791 icmp_error(pData, m, ICMP_UNREACH, code, 0, strerror(errno));
792 tp->t_socket->so_m = NULL;
793 }
794 tp = tcp_close(pData, tp);
795 }
796 else
797 {
798 /*
799 * Haven't connected yet, save the current mbuf
800 * and ti, and return
801 * XXX Some OS's don't tell us whether the connect()
802 * succeeded or not. So we must time it out.
803 */
804 so->so_m = m;
805 so->so_ti = ti;
806 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
807 tp->t_state = TCPS_SYN_RECEIVED;
808 }
809 SOCKET_UNLOCK(so);
810 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
811 return;
812
813cont_conn:
814 /* m==NULL
815 * Check if the connect succeeded
816 */
817 if (so->so_state & SS_NOFDREF)
818 {
819 tp = tcp_close(pData, tp);
820 goto dropwithreset;
821 }
822cont_input:
823 tcp_template(tp);
824
825 if (optp)
826 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
827
828 if (iss)
829 tp->iss = iss;
830 else
831 tp->iss = tcp_iss;
832 tcp_iss += TCP_ISSINCR/2;
833 tp->irs = ti->ti_seq;
834 tcp_sendseqinit(tp);
835 tcp_rcvseqinit(tp);
836 tp->t_flags |= TF_ACKNOW;
837 tp->t_state = TCPS_SYN_RECEIVED;
838 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
839 tcpstat.tcps_accepts++;
840 goto trimthenstep6;
841 } /* case TCPS_LISTEN */
842
843 /*
844 * If the state is SYN_SENT:
845 * if seg contains an ACK, but not for our SYN, drop the input.
846 * if seg contains a RST, then drop the connection.
847 * if seg does not contain SYN, then drop it.
848 * Otherwise this is an acceptable SYN segment
849 * initialize tp->rcv_nxt and tp->irs
850 * if seg contains ack then advance tp->snd_una
851 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
852 * arrange for segment to be acked (eventually)
853 * continue processing rest of data/controls, beginning with URG
854 */
855 case TCPS_SYN_SENT:
856 if ( (tiflags & TH_ACK)
857 && ( SEQ_LEQ(ti->ti_ack, tp->iss)
858 || SEQ_GT(ti->ti_ack, tp->snd_max)))
859 goto dropwithreset;
860
861 if (tiflags & TH_RST)
862 {
863 if (tiflags & TH_ACK)
864 tp = tcp_drop(pData, tp, 0); /* XXX Check t_softerror! */
865 goto drop;
866 }
867
868 if ((tiflags & TH_SYN) == 0)
869 {
870 goto drop;
871 }
872 if (tiflags & TH_ACK)
873 {
874 tp->snd_una = ti->ti_ack;
875 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
876 tp->snd_nxt = tp->snd_una;
877 }
878
879 tp->t_timer[TCPT_REXMT] = 0;
880 tp->irs = ti->ti_seq;
881 tcp_rcvseqinit(tp);
882 tp->t_flags |= TF_ACKNOW;
883 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss))
884 {
885 tcpstat.tcps_connects++;
886 soisfconnected(so);
887 tp->t_state = TCPS_ESTABLISHED;
888
889 /* Do window scaling on this connection? */
890#if 0
891 if (( tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
892 == (TF_RCVD_SCALE|TF_REQ_SCALE))
893 {
894 tp->snd_scale = tp->requested_s_scale;
895 tp->rcv_scale = tp->request_r_scale;
896 }
897#endif
898 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
899 /*
900 * if we didn't have to retransmit the SYN,
901 * use its rtt as our initial srtt & rtt var.
902 */
903 if (tp->t_rtt)
904 tcp_xmit_timer(pData, tp, tp->t_rtt);
905 }
906 else
907 tp->t_state = TCPS_SYN_RECEIVED;
908
909trimthenstep6:
910 /*
911 * Advance ti->ti_seq to correspond to first data byte.
912 * If data, trim to stay within window,
913 * dropping FIN if necessary.
914 */
915 ti->ti_seq++;
916 if (ti->ti_len > tp->rcv_wnd)
917 {
918 todrop = ti->ti_len - tp->rcv_wnd;
919 m_adj(m, -todrop);
920 ti->ti_len = tp->rcv_wnd;
921 tiflags &= ~TH_FIN;
922 tcpstat.tcps_rcvpackafterwin++;
923 tcpstat.tcps_rcvbyteafterwin += todrop;
924 }
925 tp->snd_wl1 = ti->ti_seq - 1;
926 tp->rcv_up = ti->ti_seq;
927 Log2(("hit6\n"));
928 goto step6;
929 } /* switch tp->t_state */
930 /*
931 * States other than LISTEN or SYN_SENT.
932 * First check timestamp, if present.
933 * Then check that at least some bytes of segment are within
934 * receive window. If segment begins before rcv_nxt,
935 * drop leading data (and SYN); if nothing left, just ack.
936 *
937 * RFC 1323 PAWS: If we have a timestamp reply on this segment
938 * and it's less than ts_recent, drop it.
939 */
940#if 0
941 if ( ts_present
942 && (tiflags & TH_RST) == 0
943 && tp->ts_recent
944 && TSTMP_LT(ts_val, tp->ts_recent))
945 {
946 /* Check to see if ts_recent is over 24 days old. */
947 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE)
948 {
949 /*
950 * Invalidate ts_recent. If this segment updates
951 * ts_recent, the age will be reset later and ts_recent
952 * will get a valid value. If it does not, setting
953 * ts_recent to zero will at least satisfy the
954 * requirement that zero be placed in the timestamp
955 * echo reply when ts_recent isn't valid. The
956 * age isn't reset until we get a valid ts_recent
957 * because we don't want out-of-order segments to be
958 * dropped when ts_recent is old.
959 */
960 tp->ts_recent = 0;
961 }
962 else
963 {
964 tcpstat.tcps_rcvduppack++;
965 tcpstat.tcps_rcvdupbyte += ti->ti_len;
966 tcpstat.tcps_pawsdrop++;
967 goto dropafterack;
968 }
969 }
970#endif
971
972 todrop = tp->rcv_nxt - ti->ti_seq;
973 if (todrop > 0)
974 {
975 if (tiflags & TH_SYN)
976 {
977 tiflags &= ~TH_SYN;
978 ti->ti_seq++;
979 if (ti->ti_urp > 1)
980 ti->ti_urp--;
981 else
982 tiflags &= ~TH_URG;
983 todrop--;
984 }
985 /*
986 * Following if statement from Stevens, vol. 2, p. 960.
987 */
988 if ( todrop > ti->ti_len
989 || ( todrop == ti->ti_len
990 && (tiflags & TH_FIN) == 0))
991 {
992 /*
993 * Any valid FIN must be to the left of the window.
994 * At this point the FIN must be a duplicate or out
995 * of sequence; drop it.
996 */
997 tiflags &= ~TH_FIN;
998
999 /*
1000 * Send an ACK to resynchronize and drop any data.
1001 * But keep on processing for RST or ACK.
1002 */
1003 tp->t_flags |= TF_ACKNOW;
1004 todrop = ti->ti_len;
1005 tcpstat.tcps_rcvduppack++;
1006 tcpstat.tcps_rcvdupbyte += todrop;
1007 }
1008 else
1009 {
1010 tcpstat.tcps_rcvpartduppack++;
1011 tcpstat.tcps_rcvpartdupbyte += todrop;
1012 }
1013 m_adj(m, todrop);
1014 ti->ti_seq += todrop;
1015 ti->ti_len -= todrop;
1016 if (ti->ti_urp > todrop)
1017 ti->ti_urp -= todrop;
1018 else
1019 {
1020 tiflags &= ~TH_URG;
1021 ti->ti_urp = 0;
1022 }
1023 }
1024 /*
1025 * If new data are received on a connection after the
1026 * user processes are gone, then RST the other end.
1027 */
1028 if ( (so->so_state & SS_NOFDREF)
1029 && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len)
1030 {
1031 tp = tcp_close(pData, tp);
1032 tcpstat.tcps_rcvafterclose++;
1033 goto dropwithreset;
1034 }
1035
1036 /*
1037 * If segment ends after window, drop trailing data
1038 * (and PUSH and FIN); if nothing left, just ACK.
1039 */
1040 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
1041 if (todrop > 0)
1042 {
1043 tcpstat.tcps_rcvpackafterwin++;
1044 if (todrop >= ti->ti_len)
1045 {
1046 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
1047 /*
1048 * If a new connection request is received
1049 * while in TIME_WAIT, drop the old connection
1050 * and start over if the sequence numbers
1051 * are above the previous ones.
1052 */
1053 if ( tiflags & TH_SYN
1054 && tp->t_state == TCPS_TIME_WAIT
1055 && SEQ_GT(ti->ti_seq, tp->rcv_nxt))
1056 {
1057 iss = tp->rcv_nxt + TCP_ISSINCR;
1058 tp = tcp_close(pData, tp);
1059 SOCKET_UNLOCK(tp->t_socket);
1060 goto findso;
1061 }
1062 /*
1063 * If window is closed can only take segments at
1064 * window edge, and have to drop data and PUSH from
1065 * incoming segments. Continue processing, but
1066 * remember to ack. Otherwise, drop segment
1067 * and ack.
1068 */
1069 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt)
1070 {
1071 tp->t_flags |= TF_ACKNOW;
1072 tcpstat.tcps_rcvwinprobe++;
1073 }
1074 else
1075 goto dropafterack;
1076 }
1077 else
1078 tcpstat.tcps_rcvbyteafterwin += todrop;
1079 m_adj(m, -todrop);
1080 ti->ti_len -= todrop;
1081 tiflags &= ~(TH_PUSH|TH_FIN);
1082 }
1083
1084 /*
1085 * If last ACK falls within this segment's sequence numbers,
1086 * record its timestamp.
1087 */
1088#if 0
1089 if ( ts_present
1090 && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)
1091 && SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + ((tiflags & (TH_SYN|TH_FIN)) != 0)))
1092 {
1093 tp->ts_recent_age = tcp_now;
1094 tp->ts_recent = ts_val;
1095 }
1096#endif
1097
1098 /*
1099 * If the RST bit is set examine the state:
1100 * SYN_RECEIVED STATE:
1101 * If passive open, return to LISTEN state.
1102 * If active open, inform user that connection was refused.
1103 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1104 * Inform user that connection was reset, and close tcb.
1105 * CLOSING, LAST_ACK, TIME_WAIT STATES
1106 * Close the tcb.
1107 */
1108 if (tiflags&TH_RST)
1109 switch (tp->t_state)
1110 {
1111 case TCPS_SYN_RECEIVED:
1112/* so->so_error = ECONNREFUSED; */
1113 goto close;
1114
1115 case TCPS_ESTABLISHED:
1116 case TCPS_FIN_WAIT_1:
1117 case TCPS_FIN_WAIT_2:
1118 case TCPS_CLOSE_WAIT:
1119/* so->so_error = ECONNRESET; */
1120close:
1121 tp->t_state = TCPS_CLOSED;
1122 tcpstat.tcps_drops++;
1123 tp = tcp_close(pData, tp);
1124 goto drop;
1125
1126 case TCPS_CLOSING:
1127 case TCPS_LAST_ACK:
1128 case TCPS_TIME_WAIT:
1129 tp = tcp_close(pData, tp);
1130 goto drop;
1131 }
1132
1133 /*
1134 * If a SYN is in the window, then this is an
1135 * error and we send an RST and drop the connection.
1136 */
1137 if (tiflags & TH_SYN)
1138 {
1139 tp = tcp_drop(pData, tp, 0);
1140 goto dropwithreset;
1141 }
1142
1143 /*
1144 * If the ACK bit is off we drop the segment and return.
1145 */
1146 if ((tiflags & TH_ACK) == 0)
1147 {
1148 goto drop;
1149 }
1150
1151 /*
1152 * Ack processing.
1153 */
1154 switch (tp->t_state)
1155 {
1156 /*
1157 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1158 * ESTABLISHED state and continue processing, otherwise
1159 * send an RST. una<=ack<=max
1160 */
1161 case TCPS_SYN_RECEIVED:
1162 if ( SEQ_GT(tp->snd_una, ti->ti_ack)
1163 || SEQ_GT(ti->ti_ack, tp->snd_max))
1164 goto dropwithreset;
1165 tcpstat.tcps_connects++;
1166 tp->t_state = TCPS_ESTABLISHED;
1167 /*
1168 * The sent SYN is ack'ed with our sequence number +1
1169 * The first data byte already in the buffer will get
1170 * lost if no correction is made. This is only needed for
1171 * SS_CTL since the buffer is empty otherwise.
1172 * tp->snd_una++; or:
1173 */
1174 tp->snd_una = ti->ti_ack;
1175 soisfconnected(so);
1176
1177 /* Do window scaling? */
1178#if 0
1179 if ( (tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
1180 == (TF_RCVD_SCALE|TF_REQ_SCALE))
1181 {
1182 tp->snd_scale = tp->requested_s_scale;
1183 tp->rcv_scale = tp->request_r_scale;
1184 }
1185#endif
1186 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1187 tp->snd_wl1 = ti->ti_seq - 1;
1188 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1189 goto synrx_to_est;
1190 /* fall into ... */
1191
1192 /*
1193 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1194 * ACKs. If the ack is in the range
1195 * tp->snd_una < ti->ti_ack <= tp->snd_max
1196 * then advance tp->snd_una to ti->ti_ack and drop
1197 * data from the retransmission queue. If this ACK reflects
1198 * more up to date window information we update our window information.
1199 */
1200 case TCPS_ESTABLISHED:
1201 case TCPS_FIN_WAIT_1:
1202 case TCPS_FIN_WAIT_2:
1203 case TCPS_CLOSE_WAIT:
1204 case TCPS_CLOSING:
1205 case TCPS_LAST_ACK:
1206 case TCPS_TIME_WAIT:
1207 if (SEQ_LEQ(ti->ti_ack, tp->snd_una))
1208 {
1209 if (ti->ti_len == 0 && tiwin == tp->snd_wnd)
1210 {
1211 tcpstat.tcps_rcvdupack++;
1212 Log2((" dup ack m = %lx, so = %lx\n", (long)m, (long)so));
1213 /*
1214 * If we have outstanding data (other than
1215 * a window probe), this is a completely
1216 * duplicate ack (ie, window info didn't
1217 * change), the ack is the biggest we've
1218 * seen and we've seen exactly our rexmt
1219 * threshold of them, assume a packet
1220 * has been dropped and retransmit it.
1221 * Kludge snd_nxt & the congestion
1222 * window so we send only this one
1223 * packet.
1224 *
1225 * We know we're losing at the current
1226 * window size so do congestion avoidance
1227 * (set ssthresh to half the current window
1228 * and pull our congestion window back to
1229 * the new ssthresh).
1230 *
1231 * Dup acks mean that packets have left the
1232 * network (they're now cached at the receiver)
1233 * so bump cwnd by the amount in the receiver
1234 * to keep a constant cwnd packets in the
1235 * network.
1236 */
1237 if ( tp->t_timer[TCPT_REXMT] == 0
1238 || ti->ti_ack != tp->snd_una)
1239 tp->t_dupacks = 0;
1240 else if (++tp->t_dupacks == tcprexmtthresh)
1241 {
1242 tcp_seq onxt = tp->snd_nxt;
1243 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
1244 if (win < 2)
1245 win = 2;
1246 tp->snd_ssthresh = win * tp->t_maxseg;
1247 tp->t_timer[TCPT_REXMT] = 0;
1248 tp->t_rtt = 0;
1249 tp->snd_nxt = ti->ti_ack;
1250 tp->snd_cwnd = tp->t_maxseg;
1251 (void) tcp_output(pData, tp);
1252 tp->snd_cwnd = tp->snd_ssthresh +
1253 tp->t_maxseg * tp->t_dupacks;
1254 if (SEQ_GT(onxt, tp->snd_nxt))
1255 tp->snd_nxt = onxt;
1256 goto drop;
1257 }
1258 else if (tp->t_dupacks > tcprexmtthresh)
1259 {
1260 tp->snd_cwnd += tp->t_maxseg;
1261 (void) tcp_output(pData, tp);
1262 goto drop;
1263 }
1264 }
1265 else
1266 tp->t_dupacks = 0;
1267 break;
1268 }
1269synrx_to_est:
1270 /*
1271 * If the congestion window was inflated to account
1272 * for the other side's cached packets, retract it.
1273 */
1274 if ( tp->t_dupacks > tcprexmtthresh
1275 && tp->snd_cwnd > tp->snd_ssthresh)
1276 tp->snd_cwnd = tp->snd_ssthresh;
1277 tp->t_dupacks = 0;
1278 if (SEQ_GT(ti->ti_ack, tp->snd_max))
1279 {
1280 tcpstat.tcps_rcvacktoomuch++;
1281 goto dropafterack;
1282 }
1283 acked = ti->ti_ack - tp->snd_una;
1284 tcpstat.tcps_rcvackpack++;
1285 tcpstat.tcps_rcvackbyte += acked;
1286
1287 /*
1288 * If we have a timestamp reply, update smoothed
1289 * round trip time. If no timestamp is present but
1290 * transmit timer is running and timed sequence
1291 * number was acked, update smoothed round trip time.
1292 * Since we now have an rtt measurement, cancel the
1293 * timer backoff (cf., Phil Karn's retransmit alg.).
1294 * Recompute the initial retransmit timer.
1295 */
1296#if 0
1297 if (ts_present)
1298 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1299 else
1300#endif
1301 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1302 tcp_xmit_timer(pData, tp, tp->t_rtt);
1303
1304 /*
1305 * If all outstanding data is acked, stop retransmit
1306 * timer and remember to restart (more output or persist).
1307 * If there is more data to be acked, restart retransmit
1308 * timer, using current (possibly backed-off) value.
1309 */
1310 if (ti->ti_ack == tp->snd_max)
1311 {
1312 tp->t_timer[TCPT_REXMT] = 0;
1313 needoutput = 1;
1314 }
1315 else if (tp->t_timer[TCPT_PERSIST] == 0)
1316 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1317 /*
1318 * When new data is acked, open the congestion window.
1319 * If the window gives us less than ssthresh packets
1320 * in flight, open exponentially (maxseg per packet).
1321 * Otherwise open linearly: maxseg per window
1322 * (maxseg^2 / cwnd per packet).
1323 */
1324 {
1325 register u_int cw = tp->snd_cwnd;
1326 register u_int incr = tp->t_maxseg;
1327
1328 if (cw > tp->snd_ssthresh)
1329 incr = incr * incr / cw;
1330 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1331 }
1332 if (acked > SBUF_LEN(&so->so_snd))
1333 {
1334 tp->snd_wnd -= SBUF_LEN(&so->so_snd);
1335#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1336 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
1337#else
1338 sbuf_clear(&so->so_snd);
1339#endif
1340 ourfinisacked = 1;
1341 }
1342 else
1343 {
1344#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1345 sbdrop(&so->so_snd, acked);
1346#else
1347 sbuf_setpos(&so->so_snd, sbuf_len(&so->so_snd) - acked);
1348#endif
1349 tp->snd_wnd -= acked;
1350 ourfinisacked = 0;
1351 }
1352 /*
1353 * XXX sowwakup is called when data is acked and there's room for
1354 * for more data... it should read() the socket
1355 */
1356#if 0
1357 if (so->so_snd.sb_flags & SB_NOTIFY)
1358 sowwakeup(so);
1359#endif
1360 tp->snd_una = ti->ti_ack;
1361 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1362 tp->snd_nxt = tp->snd_una;
1363
1364 switch (tp->t_state)
1365 {
1366 /*
1367 * In FIN_WAIT_1 STATE in addition to the processing
1368 * for the ESTABLISHED state if our FIN is now acknowledged
1369 * then enter FIN_WAIT_2.
1370 */
1371 case TCPS_FIN_WAIT_1:
1372 if (ourfinisacked)
1373 {
1374 /*
1375 * If we can't receive any more
1376 * data, then closing user can proceed.
1377 * Starting the timer is contrary to the
1378 * specification, but if we don't get a FIN
1379 * we'll hang forever.
1380 */
1381 if (so->so_state & SS_FCANTRCVMORE)
1382 {
1383 soisfdisconnected(so);
1384 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1385 }
1386 tp->t_state = TCPS_FIN_WAIT_2;
1387 }
1388 break;
1389
1390 /*
1391 * In CLOSING STATE in addition to the processing for
1392 * the ESTABLISHED state if the ACK acknowledges our FIN
1393 * then enter the TIME-WAIT state, otherwise ignore
1394 * the segment.
1395 */
1396 case TCPS_CLOSING:
1397 if (ourfinisacked)
1398 {
1399 tp->t_state = TCPS_TIME_WAIT;
1400 tcp_canceltimers(tp);
1401 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1402 soisfdisconnected(so);
1403 }
1404 break;
1405
1406 /*
1407 * In LAST_ACK, we may still be waiting for data to drain
1408 * and/or to be acked, as well as for the ack of our FIN.
1409 * If our FIN is now acknowledged, delete the TCB,
1410 * enter the closed state and return.
1411 */
1412 case TCPS_LAST_ACK:
1413 if (ourfinisacked)
1414 {
1415 tp = tcp_close(pData, tp);
1416 goto drop;
1417 }
1418 break;
1419
1420 /*
1421 * In TIME_WAIT state the only thing that should arrive
1422 * is a retransmission of the remote FIN. Acknowledge
1423 * it and restart the finack timer.
1424 */
1425 case TCPS_TIME_WAIT:
1426 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1427 goto dropafterack;
1428 }
1429 } /* switch(tp->t_state) */
1430
1431step6:
1432 /*
1433 * Update window information.
1434 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1435 */
1436 if ( (tiflags & TH_ACK)
1437 && ( SEQ_LT(tp->snd_wl1, ti->ti_seq)
1438 || ( tp->snd_wl1 == ti->ti_seq
1439 && ( SEQ_LT(tp->snd_wl2, ti->ti_ack)
1440 || ( tp->snd_wl2 == ti->ti_ack
1441 && tiwin > tp->snd_wnd)))))
1442 {
1443 /* keep track of pure window updates */
1444 if ( ti->ti_len == 0
1445 && tp->snd_wl2 == ti->ti_ack
1446 && tiwin > tp->snd_wnd)
1447 tcpstat.tcps_rcvwinupd++;
1448 tp->snd_wnd = tiwin;
1449 tp->snd_wl1 = ti->ti_seq;
1450 tp->snd_wl2 = ti->ti_ack;
1451 if (tp->snd_wnd > tp->max_sndwnd)
1452 tp->max_sndwnd = tp->snd_wnd;
1453 needoutput = 1;
1454 }
1455
1456 /*
1457 * Process segments with URG.
1458 */
1459 if ((tiflags & TH_URG) && ti->ti_urp &&
1460 TCPS_HAVERCVDFIN(tp->t_state) == 0)
1461 {
1462 /* BSD's sbufs are auto extent so we shouldn't worry here */
1463#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1464 /*
1465 * This is a kludge, but if we receive and accept
1466 * random urgent pointers, we'll crash in
1467 * soreceive. It's hard to imagine someone
1468 * actually wanting to send this much urgent data.
1469 */
1470 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen)
1471 {
1472 ti->ti_urp = 0;
1473 tiflags &= ~TH_URG;
1474 goto dodata;
1475 }
1476#endif
1477 /*
1478 * If this segment advances the known urgent pointer,
1479 * then mark the data stream. This should not happen
1480 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1481 * a FIN has been received from the remote side.
1482 * In these states we ignore the URG.
1483 *
1484 * According to RFC961 (Assigned Protocols),
1485 * the urgent pointer points to the last octet
1486 * of urgent data. We continue, however,
1487 * to consider it to indicate the first octet
1488 * of data past the urgent section as the original
1489 * spec states (in one of two places).
1490 */
1491 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up))
1492 {
1493 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1494 so->so_urgc = SBUF_LEN(&so->so_rcv) +
1495 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1496 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1497 }
1498 }
1499 else
1500 /*
1501 * If no out of band data is expected,
1502 * pull receive urgent pointer along
1503 * with the receive window.
1504 */
1505 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1506 tp->rcv_up = tp->rcv_nxt;
1507dodata:
1508
1509 /*
1510 * If this is a small packet, then ACK now - with Nagel
1511 * congestion avoidance sender won't send more until
1512 * he gets an ACK.
1513 *
1514 * See above.
1515 */
1516 if ( ti->ti_len
1517 && (unsigned)ti->ti_len <= 5
1518 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27)
1519 {
1520 tp->t_flags |= TF_ACKNOW;
1521 }
1522
1523 /*
1524 * Process the segment text, merging it into the TCP sequencing queue,
1525 * and arranging for acknowledgment of receipt if necessary.
1526 * This process logically involves adjusting tp->rcv_wnd as data
1527 * is presented to the user (this happens in tcp_usrreq.c,
1528 * case PRU_RCVD). If a FIN has already been received on this
1529 * connection then we just ignore the text.
1530 */
1531 if ( (ti->ti_len || (tiflags&TH_FIN))
1532 && TCPS_HAVERCVDFIN(tp->t_state) == 0)
1533 {
1534 if ( ti->ti_seq == tp->rcv_nxt
1535 && LIST_EMPTY(&tp->t_segq)
1536 && tp->t_state == TCPS_ESTABLISHED)
1537 {
1538 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1539 tp->rcv_nxt += tlen;
1540 tiflags = ti->ti_t.th_flags & TH_FIN;
1541 tcpstat.tcps_rcvpack++;
1542 tcpstat.tcps_rcvbyte += tlen;
1543 if (so->so_state & SS_FCANTRCVMORE)
1544 m_freem(pData, m);
1545 else
1546 sbappend(pData, so, m);
1547 }
1548 else
1549 {
1550 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1551 tiflags |= TF_ACKNOW;
1552 }
1553 /*
1554 * Note the amount of data that peer has sent into
1555 * our window, in order to estimate the sender's
1556 * buffer size.
1557 */
1558 len = SBUF_SIZE(&so->so_rcv) - (tp->rcv_adv - tp->rcv_nxt);
1559 }
1560 else
1561 {
1562 m_freem(pData, m);
1563 tiflags &= ~TH_FIN;
1564 }
1565
1566 /*
1567 * If FIN is received ACK the FIN and let the user know
1568 * that the connection is closing.
1569 */
1570 if (tiflags & TH_FIN)
1571 {
1572 if (TCPS_HAVERCVDFIN(tp->t_state) == 0)
1573 {
1574 /*
1575 * If we receive a FIN we can't send more data,
1576 * set it SS_FDRAIN
1577 * Shutdown the socket if there is no rx data in the
1578 * buffer.
1579 * soread() is called on completion of shutdown() and
1580 * will got to TCPS_LAST_ACK, and use tcp_output()
1581 * to send the FIN.
1582 */
1583/* sofcantrcvmore(so); */
1584 sofwdrain(so);
1585
1586 tp->t_flags |= TF_ACKNOW;
1587 tp->rcv_nxt++;
1588 }
1589 switch (tp->t_state)
1590 {
1591 /*
1592 * In SYN_RECEIVED and ESTABLISHED STATES
1593 * enter the CLOSE_WAIT state.
1594 */
1595 case TCPS_SYN_RECEIVED:
1596 case TCPS_ESTABLISHED:
1597 tp->t_state = TCPS_CLOSE_WAIT;
1598 break;
1599
1600 /*
1601 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1602 * enter the CLOSING state.
1603 */
1604 case TCPS_FIN_WAIT_1:
1605 tp->t_state = TCPS_CLOSING;
1606 break;
1607
1608 /*
1609 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1610 * starting the time-wait timer, turning off the other
1611 * standard timers.
1612 */
1613 case TCPS_FIN_WAIT_2:
1614 tp->t_state = TCPS_TIME_WAIT;
1615 tcp_canceltimers(tp);
1616 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1617 soisfdisconnected(so);
1618 break;
1619
1620 /*
1621 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1622 */
1623 case TCPS_TIME_WAIT:
1624 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1625 break;
1626 }
1627 }
1628
1629 /*
1630 * Return any desired output.
1631 */
1632 if (needoutput || (tp->t_flags & TF_ACKNOW))
1633 tcp_output(pData, tp);
1634
1635 SOCKET_UNLOCK(so);
1636 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1637 return;
1638
1639dropafterack:
1640 Log2(("drop after ack\n"));
1641 /*
1642 * Generate an ACK dropping incoming segment if it occupies
1643 * sequence space, where the ACK reflects our state.
1644 */
1645 if (tiflags & TH_RST)
1646 goto drop;
1647 m_freem(pData, m);
1648 tp->t_flags |= TF_ACKNOW;
1649 (void) tcp_output(pData, tp);
1650 SOCKET_UNLOCK(so);
1651 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1652 return;
1653
1654dropwithreset:
1655 /* reuses m if m!=NULL, m_free() unnecessary */
1656 if (tiflags & TH_ACK)
1657 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1658 else
1659 {
1660 if (tiflags & TH_SYN)
1661 ti->ti_len++;
1662 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1663 TH_RST|TH_ACK);
1664 }
1665
1666 if (so != &tcb)
1667 SOCKET_UNLOCK(so);
1668 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1669 return;
1670
1671drop:
1672 /*
1673 * Drop space held by incoming segment and return.
1674 */
1675 m_freem(pData, m);
1676
1677#ifdef VBOX_WITH_SLIRP_MT
1678 if (RTCritSectIsOwned(&so->so_mutex))
1679 {
1680 SOCKET_UNLOCK(so);
1681 }
1682#endif
1683
1684 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1685 return;
1686}
1687
1688void
1689tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1690{
1691 u_int16_t mss;
1692 int opt, optlen;
1693
1694 LogFlow(("tcp_dooptions: tp = %lx, cnt=%i\n", (long)tp, cnt));
1695
1696 for (; cnt > 0; cnt -= optlen, cp += optlen)
1697 {
1698 opt = cp[0];
1699 if (opt == TCPOPT_EOL)
1700 break;
1701 if (opt == TCPOPT_NOP)
1702 optlen = 1;
1703 else
1704 {
1705 optlen = cp[1];
1706 if (optlen <= 0)
1707 break;
1708 }
1709 switch (opt)
1710 {
1711 default:
1712 continue;
1713
1714 case TCPOPT_MAXSEG:
1715 if (optlen != TCPOLEN_MAXSEG)
1716 continue;
1717 if (!(ti->ti_flags & TH_SYN))
1718 continue;
1719 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1720 NTOHS(mss);
1721 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1722 break;
1723
1724#if 0
1725 case TCPOPT_WINDOW:
1726 if (optlen != TCPOLEN_WINDOW)
1727 continue;
1728 if (!(ti->ti_flags & TH_SYN))
1729 continue;
1730 tp->t_flags |= TF_RCVD_SCALE;
1731 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1732 break;
1733
1734 case TCPOPT_TIMESTAMP:
1735 if (optlen != TCPOLEN_TIMESTAMP)
1736 continue;
1737 *ts_present = 1;
1738 memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1739 NTOHL(*ts_val);
1740 memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1741 NTOHL(*ts_ecr);
1742
1743 /*
1744 * A timestamp received in a SYN makes
1745 * it ok to send timestamp requests and replies.
1746 */
1747 if (ti->ti_flags & TH_SYN)
1748 {
1749 tp->t_flags |= TF_RCVD_TSTMP;
1750 tp->ts_recent = *ts_val;
1751 tp->ts_recent_age = tcp_now;
1752 }
1753 break;
1754#endif
1755 }
1756 }
1757}
1758
1759
1760/*
1761 * Pull out of band byte out of a segment so
1762 * it doesn't appear in the user's data queue.
1763 * It is still reflected in the segment length for
1764 * sequencing purposes.
1765 */
1766
1767#if 0
1768void
1769tcp_pulloutofband(struct socket *so, struct tcpiphdr *ti, struct mbuf *m)
1770{
1771 int cnt = ti->ti_urp - 1;
1772
1773 while (cnt >= 0)
1774 {
1775 if (m->m_len > cnt)
1776 {
1777 char *cp = mtod(m, caddr_t) + cnt;
1778 struct tcpcb *tp = sototcpcb(so);
1779
1780 tp->t_iobc = *cp;
1781 tp->t_oobflags |= TCPOOB_HAVEDATA;
1782 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1783 m->m_len--;
1784 return;
1785 }
1786 cnt -= m->m_len;
1787 m = m->m_next; /* XXX WRONG! Fix it! */
1788 if (m == 0)
1789 break;
1790 }
1791 panic("tcp_pulloutofband");
1792}
1793#endif
1794
1795/*
1796 * Collect new round-trip time estimate
1797 * and update averages and current timeout.
1798 */
1799
1800void
1801tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1802{
1803 register short delta;
1804
1805 LogFlow(("tcp_xmit_timer: tp = %lx rtt = %d\n", (long)tp, rtt));
1806
1807 tcpstat.tcps_rttupdated++;
1808 if (tp->t_srtt != 0)
1809 {
1810 /*
1811 * srtt is stored as fixed point with 3 bits after the
1812 * binary point (i.e., scaled by 8). The following magic
1813 * is equivalent to the smoothing algorithm in rfc793 with
1814 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1815 * point). Adjust rtt to origin 0.
1816 */
1817 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1818 if ((tp->t_srtt += delta) <= 0)
1819 tp->t_srtt = 1;
1820 /*
1821 * We accumulate a smoothed rtt variance (actually, a
1822 * smoothed mean difference), then set the retransmit
1823 * timer to smoothed rtt + 4 times the smoothed variance.
1824 * rttvar is stored as fixed point with 2 bits after the
1825 * binary point (scaled by 4). The following is
1826 * equivalent to rfc793 smoothing with an alpha of .75
1827 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1828 * rfc793's wired-in beta.
1829 */
1830 if (delta < 0)
1831 delta = -delta;
1832 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1833 if ((tp->t_rttvar += delta) <= 0)
1834 tp->t_rttvar = 1;
1835 }
1836 else
1837 {
1838 /*
1839 * No rtt measurement yet - use the unsmoothed rtt.
1840 * Set the variance to half the rtt (so our first
1841 * retransmit happens at 3*rtt).
1842 */
1843 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1844 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1845 }
1846 tp->t_rtt = 0;
1847 tp->t_rxtshift = 0;
1848
1849 /*
1850 * the retransmit should happen at rtt + 4 * rttvar.
1851 * Because of the way we do the smoothing, srtt and rttvar
1852 * will each average +1/2 tick of bias. When we compute
1853 * the retransmit timer, we want 1/2 tick of rounding and
1854 * 1 extra tick because of +-1/2 tick uncertainty in the
1855 * firing of the timer. The bias will give us exactly the
1856 * 1.5 tick we need. But, because the bias is
1857 * statistical, we have to test that we don't drop below
1858 * the minimum feasible timer (which is 2 ticks).
1859 */
1860 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1861 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1862
1863 /*
1864 * We received an ack for a packet that wasn't retransmitted;
1865 * it is probably safe to discard any error indications we've
1866 * received recently. This isn't quite right, but close enough
1867 * for now (a route might have failed after we sent a segment,
1868 * and the return path might not be symmetrical).
1869 */
1870 tp->t_softerror = 0;
1871}
1872
1873/*
1874 * Determine a reasonable value for maxseg size.
1875 * If the route is known, check route for mtu.
1876 * If none, use an mss that can be handled on the outgoing
1877 * interface without forcing IP to fragment; if bigger than
1878 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1879 * to utilize large mbufs. If no route is found, route has no mtu,
1880 * or the destination isn't local, use a default, hopefully conservative
1881 * size (usually 512 or the default IP max size, but no more than the mtu
1882 * of the interface), as we can't discover anything about intervening
1883 * gateways or networks. We also initialize the congestion/slow start
1884 * window to be a single segment if the destination isn't local.
1885 * While looking at the routing entry, we also initialize other path-dependent
1886 * parameters from pre-set or cached values in the routing entry.
1887 */
1888
1889int
1890tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1891{
1892 struct socket *so = tp->t_socket;
1893 int mss;
1894
1895 LogFlow(("tcp_mss: tp = %lx, offet = %d\n", (long)tp, offer));
1896
1897 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1898 if (offer)
1899 mss = min(mss, offer);
1900 mss = max(mss, 32);
1901 if (mss < tp->t_maxseg || offer != 0)
1902 tp->t_maxseg = mss;
1903
1904 tp->snd_cwnd = mss;
1905
1906#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1907 sbreserve(pData, &so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1908 sbreserve(pData, &so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1909#else
1910 sbuf_new(&so->so_snd, NULL, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0), SBUF_AUTOEXTEND);
1911 sbuf_new(&so->so_rcv, NULL, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0), SBUF_AUTOEXTEND);
1912#endif
1913
1914 Log2((" returning mss = %d\n", mss));
1915
1916 return mss;
1917}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette