VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 16292

Last change on this file since 16292 was 16291, checked in by vboxsync, 16 years ago

NAT: multi threading.
Introduces set of macroces to use locking/unlocking/creating/destroing mutexes
Also every socket enqueueing is accomponement with lock creation
(it doen't work yet, compilable on Windows don't know about Linux)

  • Property svn:eol-style set to native
File size: 59.8 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
50
51/* for modulo comparisons of timestamps */
52#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
53#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
54
55#ifndef TCP_ACK_HACK
56#define DELAY_ACK(tp, ti) \
57 if (ti->ti_flags & TH_PUSH) \
58 tp->t_flags |= TF_ACKNOW; \
59 else \
60 tp->t_flags |= TF_DELACK;
61#else /* !TCP_ACK_HACK */
62#define DELAY_ACK(tp, ign) \
63 tp->t_flags |= TF_DELACK;
64#endif /* TCP_ACK_HACK */
65
66
67/*
68 * deps: netinet/tcp_reass.c
69 * tcp_reass_maxqlen = 48 (deafault)
70 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
71 */
72int
73tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
74{
75 struct tseg_qent *q;
76 struct tseg_qent *p = NULL;
77 struct tseg_qent *nq;
78 struct tseg_qent *te = NULL;
79 struct socket *so = tp->t_socket;
80 int flags;
81
82 /*
83 * XXX: tcp_reass() is rather inefficient with its data structures
84 * and should be rewritten (see NetBSD for optimizations). While
85 * doing that it should move to its own file tcp_reass.c.
86 */
87
88 /*
89 * Call with th==NULL after become established to
90 * force pre-ESTABLISHED data up to user socket.
91 */
92 if (th == NULL)
93 goto present;
94
95 /*
96 * Limit the number of segments in the reassembly queue to prevent
97 * holding on to too many segments (and thus running out of mbufs).
98 * Make sure to let the missing segment through which caused this
99 * queue. Always keep one global queue entry spare to be able to
100 * process the missing segment.
101 */
102 if ( th->th_seq != tp->rcv_nxt
103 && ( tcp_reass_qsize + 1 >= tcp_reass_maxseg
104 || tp->t_segqlen >= tcp_reass_maxqlen))
105 {
106 tcp_reass_overflows++;
107 tcpstat.tcps_rcvmemdrop++;
108 m_freem(pData, m);
109 *tlenp = 0;
110 return (0);
111 }
112
113 /*
114 * Allocate a new queue entry. If we can't, or hit the zone limit
115 * just drop the pkt.
116 */
117 te = RTMemAlloc(sizeof(struct tseg_qent));
118 if (te == NULL)
119 {
120 tcpstat.tcps_rcvmemdrop++;
121 m_freem(pData, m);
122 *tlenp = 0;
123 return (0);
124 }
125 tp->t_segqlen++;
126 tcp_reass_qsize++;
127
128 /*
129 * Find a segment which begins after this one does.
130 */
131 LIST_FOREACH(q, &tp->t_segq, tqe_q)
132 {
133 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
134 break;
135 p = q;
136 }
137
138 /*
139 * If there is a preceding segment, it may provide some of
140 * our data already. If so, drop the data from the incoming
141 * segment. If it provides all of our data, drop us.
142 */
143 if (p != NULL)
144 {
145 int i;
146 /* conversion to int (in i) handles seq wraparound */
147 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
148 if (i > 0)
149 {
150 if (i >= *tlenp)
151 {
152 tcpstat.tcps_rcvduppack++;
153 tcpstat.tcps_rcvdupbyte += *tlenp;
154 m_freem(pData, m);
155 RTMemFree(te);
156 tp->t_segqlen--;
157 tcp_reass_qsize--;
158 /*
159 * Try to present any queued data
160 * at the left window edge to the user.
161 * This is needed after the 3-WHS
162 * completes.
163 */
164 goto present; /* ??? */
165 }
166 m_adj(m, i);
167 *tlenp -= i;
168 th->th_seq += i;
169 }
170 }
171 tcpstat.tcps_rcvoopack++;
172 tcpstat.tcps_rcvoobyte += *tlenp;
173
174 /*
175 * While we overlap succeeding segments trim them or,
176 * if they are completely covered, dequeue them.
177 */
178 while (q)
179 {
180 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
181 if (i <= 0)
182 break;
183 if (i < q->tqe_len)
184 {
185 q->tqe_th->th_seq += i;
186 q->tqe_len -= i;
187 m_adj(q->tqe_m, i);
188 break;
189 }
190
191 nq = LIST_NEXT(q, tqe_q);
192 LIST_REMOVE(q, tqe_q);
193 m_freem(pData, q->tqe_m);
194 RTMemFree(q);
195 tp->t_segqlen--;
196 tcp_reass_qsize--;
197 q = nq;
198 }
199
200 /* Insert the new segment queue entry into place. */
201 te->tqe_m = m;
202 te->tqe_th = th;
203 te->tqe_len = *tlenp;
204
205 if (p == NULL)
206 {
207 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
208 }
209 else
210 {
211 LIST_INSERT_AFTER(p, te, tqe_q);
212 }
213
214present:
215 /*
216 * Present data to user, advancing rcv_nxt through
217 * completed sequence space.
218 */
219 if (!TCPS_HAVEESTABLISHED(tp->t_state))
220 return (0);
221 q = LIST_FIRST(&tp->t_segq);
222 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
223 return (0);
224 do
225 {
226 tp->rcv_nxt += q->tqe_len;
227 flags = q->tqe_th->th_flags & TH_FIN;
228 nq = LIST_NEXT(q, tqe_q);
229 LIST_REMOVE(q, tqe_q);
230 /* XXX: This place should be checked for the same code in
231 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
232 */
233 if (so->so_state & SS_FCANTSENDMORE)
234 m_freem(pData, q->tqe_m);
235 else
236 {
237 if (so->so_emu)
238 {
239 if (tcp_emu(pData, so, q->tqe_m))
240 sbappend(pData, so, q->tqe_m);
241 }
242 else
243 sbappend(pData, so, q->tqe_m);
244 }
245 RTMemFree(q);
246 tp->t_segqlen--;
247 tcp_reass_qsize--;
248 q = nq;
249 }
250 while (q && q->tqe_th->th_seq == tp->rcv_nxt);
251
252 return flags;
253}
254
255/*
256 * TCP input routine, follows pages 65-76 of the
257 * protocol specification dated September, 1981 very closely.
258 */
259void
260tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
261{
262 struct ip save_ip, *ip;
263 register struct tcpiphdr *ti;
264 caddr_t optp = NULL;
265 int optlen = 0;
266 int len, tlen, off;
267 register struct tcpcb *tp = 0;
268 register int tiflags;
269 struct socket *so = 0;
270 int todrop, acked, ourfinisacked, needoutput = 0;
271/* int dropsocket = 0; */
272 int iss = 0;
273 u_long tiwin;
274/* int ts_present = 0; */
275
276 DEBUG_CALL("tcp_input");
277 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
278 (long )m, iphlen, (long )inso ));
279
280 /*
281 * If called with m == 0, then we're continuing the connect
282 */
283 if (m == NULL)
284 {
285 so = inso;
286
287 /* Re-set a few variables */
288 tp = sototcpcb(so);
289 m = so->so_m;
290 so->so_m = 0;
291 ti = so->so_ti;
292 tiwin = ti->ti_win;
293 tiflags = ti->ti_flags;
294
295 goto cont_conn;
296 }
297
298 tcpstat.tcps_rcvtotal++;
299 /*
300 * Get IP and TCP header together in first mbuf.
301 * Note: IP leaves IP header in first mbuf.
302 */
303 ti = mtod(m, struct tcpiphdr *);
304 if (iphlen > sizeof(struct ip ))
305 {
306 ip_stripoptions(m, (struct mbuf *)0);
307 iphlen = sizeof(struct ip );
308 }
309 /* XXX Check if too short */
310
311
312 /*
313 * Save a copy of the IP header in case we want restore it
314 * for sending an ICMP error message in response.
315 */
316 ip = mtod(m, struct ip *);
317 save_ip = *ip;
318 save_ip.ip_len+= iphlen;
319
320 /*
321 * Checksum extended TCP header and data.
322 */
323 tlen = ((struct ip *)ti)->ip_len;
324 memset(ti->ti_x1, 0, 9);
325 ti->ti_len = htons((u_int16_t)tlen);
326 len = sizeof(struct ip ) + tlen;
327 /* keep checksum for ICMP reply
328 * ti->ti_sum = cksum(m, len);
329 * if (ti->ti_sum) { */
330 if (cksum(m, len))
331 {
332 tcpstat.tcps_rcvbadsum++;
333 Log2(("checksum is invalid => drop\n"));
334 goto drop;
335 }
336
337 /*
338 * Check that TCP offset makes sense,
339 * pull out TCP options and adjust length. XXX
340 */
341 off = ti->ti_off << 2;
342 if ( off < sizeof (struct tcphdr)
343 || off > tlen)
344 {
345 tcpstat.tcps_rcvbadoff++;
346 Log2(("ti_off(tlen(%d)<%d<(tcphdr(%d))) is invalid =>drop\n", tlen, off, sizeof(struct tcphdr)));
347 goto drop;
348 }
349 tlen -= off;
350 ti->ti_len = tlen;
351 if (off > sizeof (struct tcphdr))
352 {
353 optlen = off - sizeof (struct tcphdr);
354 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
355
356 /*
357 * Do quick retrieval of timestamp options ("options
358 * prediction?"). If timestamp is the only option and it's
359 * formatted as recommended in RFC 1323 appendix A, we
360 * quickly get the values now and not bother calling
361 * tcp_dooptions(), etc.
362 */
363#if 0
364 if (( optlen == TCPOLEN_TSTAMP_APPA
365 || ( optlen > TCPOLEN_TSTAMP_APPA
366 && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
367 *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
368 (ti->ti_flags & TH_SYN) == 0)
369 {
370 ts_present = 1;
371 ts_val = ntohl(*(u_int32_t *)(optp + 4));
372 ts_ecr = ntohl(*(u_int32_t *)(optp + 8));
373 optp = NULL; / * we have parsed the options * /
374 }
375#endif
376 }
377 tiflags = ti->ti_flags;
378
379 /*
380 * Convert TCP protocol specific fields to host format.
381 */
382 NTOHL(ti->ti_seq);
383 NTOHL(ti->ti_ack);
384 NTOHS(ti->ti_win);
385 NTOHS(ti->ti_urp);
386
387 /*
388 * Drop TCP, IP headers and TCP options.
389 */
390 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
391 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
392
393 /*
394 * Locate pcb for segment.
395 */
396findso:
397 so = tcp_last_so;
398 if ( so->so_fport != ti->ti_dport
399 || so->so_lport != ti->ti_sport
400 || so->so_laddr.s_addr != ti->ti_src.s_addr
401 || so->so_faddr.s_addr != ti->ti_dst.s_addr)
402 {
403 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
404 ti->ti_dst, ti->ti_dport);
405 if (so)
406 tcp_last_so = so;
407 ++tcpstat.tcps_socachemiss;
408 }
409
410 /*
411 * If the state is CLOSED (i.e., TCB does not exist) then
412 * all data in the incoming segment is discarded.
413 * If the TCB exists but is in CLOSED state, it is embryonic,
414 * but should either do a listen or a connect soon.
415 *
416 * state == CLOSED means we've done socreate() but haven't
417 * attached it to a protocol yet...
418 *
419 * XXX If a TCB does not exist, and the TH_SYN flag is
420 * the only flag set, then create a session, mark it
421 * as if it was LISTENING, and continue...
422 */
423 if (so == 0)
424 {
425 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
426 goto dropwithreset;
427
428 if ((so = socreate()) == NULL)
429 goto dropwithreset;
430 if (tcp_attach(pData, so) < 0)
431 {
432 RTMemFree(so); /* Not sofree (if it failed, it's not insqued) */
433 goto dropwithreset;
434 }
435
436 SOCKET_LOCK(so);
437 sbreserve(&so->so_snd, tcp_sndspace);
438 sbreserve(&so->so_rcv, tcp_rcvspace);
439
440/* tcp_last_so = so; */ /* XXX ? */
441/* tp = sototcpcb(so); */
442
443 so->so_laddr = ti->ti_src;
444 so->so_lport = ti->ti_sport;
445 so->so_faddr = ti->ti_dst;
446 so->so_fport = ti->ti_dport;
447
448 if ((so->so_iptos = tcp_tos(so)) == 0)
449 so->so_iptos = ((struct ip *)ti)->ip_tos;
450
451 tp = sototcpcb(so);
452 tp->t_state = TCPS_LISTEN;
453 }
454
455 /*
456 * If this is a still-connecting socket, this probably
457 * a retransmit of the SYN. Whether it's a retransmit SYN
458 * or something else, we nuke it.
459 */
460 if (so->so_state & SS_ISFCONNECTING)
461 {
462 Log2(("so_state(%x) of %R[natsock] is still connecting =>drop\n", so->so_state, so));
463 goto drop;
464 }
465
466 tp = sototcpcb(so);
467
468 /* XXX Should never fail */
469 if (tp == 0)
470 goto dropwithreset;
471 if (tp->t_state == TCPS_CLOSED)
472 {
473 Log2(("t_state(%x) is closed =>drop\n", tp->t_state));
474 goto drop;
475 }
476
477 /* Unscale the window into a 32-bit value. */
478/* if ((tiflags & TH_SYN) == 0)
479 * tiwin = ti->ti_win << tp->snd_scale;
480 * else
481 */
482 tiwin = ti->ti_win;
483
484 /*
485 * Segment received on connection.
486 * Reset idle time and keep-alive timer.
487 */
488 tp->t_idle = 0;
489 if (so_options)
490 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
491 else
492 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
493
494 /*
495 * Process options if not in LISTEN state,
496 * else do it below (after getting remote address).
497 */
498 if (optp && tp->t_state != TCPS_LISTEN)
499 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
500/* , */
501/* &ts_present, &ts_val, &ts_ecr); */
502
503 /*
504 * Header prediction: check for the two common cases
505 * of a uni-directional data xfer. If the packet has
506 * no control flags, is in-sequence, the window didn't
507 * change and we're not retransmitting, it's a
508 * candidate. If the length is zero and the ack moved
509 * forward, we're the sender side of the xfer. Just
510 * free the data acked & wake any higher level process
511 * that was blocked waiting for space. If the length
512 * is non-zero and the ack didn't move, we're the
513 * receiver side. If we're getting packets in-order
514 * (the reassembly queue is empty), add the data to
515 * the socket buffer and note that we need a delayed ack.
516 *
517 * XXX Some of these tests are not needed
518 * eg: the tiwin == tp->snd_wnd prevents many more
519 * predictions.. with no *real* advantage..
520 */
521 if ( tp->t_state == TCPS_ESTABLISHED
522 && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK
523/* && (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) */
524 && ti->ti_seq == tp->rcv_nxt
525 && tiwin && tiwin == tp->snd_wnd
526 && tp->snd_nxt == tp->snd_max)
527 {
528 /*
529 * If last ACK falls within this segment's sequence numbers,
530 * record the timestamp.
531 */
532#if 0
533 if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
534 SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len))
535 {
536 tp->ts_recent_age = tcp_now;
537 tp->ts_recent = ts_val;
538 }
539#endif
540
541 if (ti->ti_len == 0)
542 {
543 if ( SEQ_GT(ti->ti_ack, tp->snd_una)
544 && SEQ_LEQ(ti->ti_ack, tp->snd_max)
545 && tp->snd_cwnd >= tp->snd_wnd)
546 {
547 /*
548 * this is a pure ack for outstanding data.
549 */
550 ++tcpstat.tcps_predack;
551#if 0
552 if (ts_present)
553 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
554 else
555#endif
556 if ( tp->t_rtt
557 && SEQ_GT(ti->ti_ack, tp->t_rtseq))
558 tcp_xmit_timer(pData, tp, tp->t_rtt);
559 acked = ti->ti_ack - tp->snd_una;
560 tcpstat.tcps_rcvackpack++;
561 tcpstat.tcps_rcvackbyte += acked;
562 sbdrop(&so->so_snd, acked);
563 tp->snd_una = ti->ti_ack;
564 m_freem(pData, m);
565
566 /*
567 * If all outstanding data are acked, stop
568 * retransmit timer, otherwise restart timer
569 * using current (possibly backed-off) value.
570 * If process is waiting for space,
571 * wakeup/selwakeup/signal. If data
572 * are ready to send, let tcp_output
573 * decide between more output or persist.
574 */
575 if (tp->snd_una == tp->snd_max)
576 tp->t_timer[TCPT_REXMT] = 0;
577 else if (tp->t_timer[TCPT_PERSIST] == 0)
578 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
579
580 /*
581 * There's room in so_snd, sowwakup will read()
582 * from the socket if we can
583 */
584#if 0
585 if (so->so_snd.sb_flags & SB_NOTIFY)
586 sowwakeup(so);
587#endif
588 /*
589 * This is called because sowwakeup might have
590 * put data into so_snd. Since we don't so sowwakeup,
591 * we don't need this.. XXX???
592 */
593 if (so->so_snd.sb_cc)
594 (void) tcp_output(pData, tp);
595
596 return;
597 }
598 }
599 else if ( ti->ti_ack == tp->snd_una
600 && LIST_FIRST(&tp->t_segq)
601 && ti->ti_len <= sbspace(&so->so_rcv))
602 {
603 /*
604 * this is a pure, in-sequence data packet
605 * with nothing on the reassembly queue and
606 * we have enough buffer space to take it.
607 */
608 ++tcpstat.tcps_preddat;
609 tp->rcv_nxt += ti->ti_len;
610 tcpstat.tcps_rcvpack++;
611 tcpstat.tcps_rcvbyte += ti->ti_len;
612 /*
613 * Add data to socket buffer.
614 */
615 if (so->so_emu)
616 {
617 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
618 }
619 else
620 sbappend(pData, so, m);
621
622 /*
623 * XXX This is called when data arrives. Later, check
624 * if we can actually write() to the socket
625 * XXX Need to check? It's be NON_BLOCKING
626 */
627/* sorwakeup(so); */
628
629 /*
630 * If this is a short packet, then ACK now - with Nagel
631 * congestion avoidance sender won't send more until
632 * he gets an ACK.
633 *
634 * It is better to not delay acks at all to maximize
635 * TCP throughput. See RFC 2581.
636 */
637 tp->t_flags |= TF_ACKNOW;
638 tcp_output(pData, tp);
639 return;
640 }
641 } /* header prediction */
642 /*
643 * Calculate amount of space in receive window,
644 * and then do TCP input processing.
645 * Receive window is amount of space in rcv queue,
646 * but not less than advertised window.
647 */
648 {
649 int win;
650 win = sbspace(&so->so_rcv);
651 if (win < 0)
652 win = 0;
653 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
654 }
655
656 switch (tp->t_state)
657 {
658 /*
659 * If the state is LISTEN then ignore segment if it contains an RST.
660 * If the segment contains an ACK then it is bad and send a RST.
661 * If it does not contain a SYN then it is not interesting; drop it.
662 * Don't bother responding if the destination was a broadcast.
663 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
664 * tp->iss, and send a segment:
665 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
666 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
667 * Fill in remote peer address fields if not previously specified.
668 * Enter SYN_RECEIVED state, and process any other fields of this
669 * segment in this state.
670 */
671 case TCPS_LISTEN:
672 {
673 if (tiflags & TH_RST) {
674 Log2(("RST(%x) is on listen =>drop\n", tiflags));
675 goto drop;
676 }
677 if (tiflags & TH_ACK)
678 goto dropwithreset;
679 if ((tiflags & TH_SYN) == 0)
680 {
681 Log2(("SYN(%x) is off on listen =>drop\n", tiflags));
682 goto drop;
683 }
684
685 /*
686 * This has way too many gotos...
687 * But a bit of spaghetti code never hurt anybody :)
688 */
689
690 if (so->so_emu & EMU_NOCONNECT)
691 {
692 so->so_emu &= ~EMU_NOCONNECT;
693 goto cont_input;
694 }
695
696 if ( (tcp_fconnect(pData, so) == -1)
697 && errno != EINPROGRESS
698 && errno != EWOULDBLOCK)
699 {
700 u_char code = ICMP_UNREACH_NET;
701 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
702 errno,strerror(errno)));
703 if (errno == ECONNREFUSED)
704 {
705 /* ACK the SYN, send RST to refuse the connection */
706 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
707 TH_RST|TH_ACK);
708 }
709 else
710 {
711 if (errno == EHOSTUNREACH)
712 code = ICMP_UNREACH_HOST;
713 HTONL(ti->ti_seq); /* restore tcp header */
714 HTONL(ti->ti_ack);
715 HTONS(ti->ti_win);
716 HTONS(ti->ti_urp);
717 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
718 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
719 *ip = save_ip;
720 icmp_error(pData, m, ICMP_UNREACH,code, 0,strerror(errno));
721 }
722 tp = tcp_close(pData, tp);
723 m_free(pData, m);
724 }
725 else
726 {
727 /*
728 * Haven't connected yet, save the current mbuf
729 * and ti, and return
730 * XXX Some OS's don't tell us whether the connect()
731 * succeeded or not. So we must time it out.
732 */
733 so->so_m = m;
734 so->so_ti = ti;
735 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
736 tp->t_state = TCPS_SYN_RECEIVED;
737 }
738 return;
739
740cont_conn:
741 /* m==NULL
742 * Check if the connect succeeded
743 */
744 if (so->so_state & SS_NOFDREF)
745 {
746 tp = tcp_close(pData, tp);
747 goto dropwithreset;
748 }
749cont_input:
750 tcp_template(tp);
751
752 if (optp)
753 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
754
755 if (iss)
756 tp->iss = iss;
757 else
758 tp->iss = tcp_iss;
759 tcp_iss += TCP_ISSINCR/2;
760 tp->irs = ti->ti_seq;
761 tcp_sendseqinit(tp);
762 tcp_rcvseqinit(tp);
763 tp->t_flags |= TF_ACKNOW;
764 tp->t_state = TCPS_SYN_RECEIVED;
765 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
766 tcpstat.tcps_accepts++;
767 goto trimthenstep6;
768 } /* case TCPS_LISTEN */
769
770 /*
771 * If the state is SYN_SENT:
772 * if seg contains an ACK, but not for our SYN, drop the input.
773 * if seg contains a RST, then drop the connection.
774 * if seg does not contain SYN, then drop it.
775 * Otherwise this is an acceptable SYN segment
776 * initialize tp->rcv_nxt and tp->irs
777 * if seg contains ack then advance tp->snd_una
778 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
779 * arrange for segment to be acked (eventually)
780 * continue processing rest of data/controls, beginning with URG
781 */
782 case TCPS_SYN_SENT:
783 if ( (tiflags & TH_ACK)
784 && ( SEQ_LEQ(ti->ti_ack, tp->iss)
785 || SEQ_GT(ti->ti_ack, tp->snd_max)))
786 goto dropwithreset;
787
788 if (tiflags & TH_RST)
789 {
790 if (tiflags & TH_ACK)
791 tp = tcp_drop(pData, tp,0); /* XXX Check t_softerror! */
792 Log2(("RST(%x) is on SYN_SENT =>drop\n", tiflags));
793 goto drop;
794 }
795
796 if ((tiflags & TH_SYN) == 0)
797 {
798 Log2(("SYN(%x) bit is off on SYN_SENT =>drop\n", tiflags));
799 goto drop;
800 }
801 if (tiflags & TH_ACK)
802 {
803 tp->snd_una = ti->ti_ack;
804 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
805 tp->snd_nxt = tp->snd_una;
806 }
807
808 tp->t_timer[TCPT_REXMT] = 0;
809 tp->irs = ti->ti_seq;
810 tcp_rcvseqinit(tp);
811 tp->t_flags |= TF_ACKNOW;
812 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss))
813 {
814 tcpstat.tcps_connects++;
815 soisfconnected(so);
816 tp->t_state = TCPS_ESTABLISHED;
817
818 /* Do window scaling on this connection? */
819#if 0
820 if (( tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
821 == (TF_RCVD_SCALE|TF_REQ_SCALE))
822 {
823 tp->snd_scale = tp->requested_s_scale;
824 tp->rcv_scale = tp->request_r_scale;
825 }
826#endif
827 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
828 /*
829 * if we didn't have to retransmit the SYN,
830 * use its rtt as our initial srtt & rtt var.
831 */
832 if (tp->t_rtt)
833 tcp_xmit_timer(pData, tp, tp->t_rtt);
834 }
835 else
836 tp->t_state = TCPS_SYN_RECEIVED;
837
838trimthenstep6:
839 /*
840 * Advance ti->ti_seq to correspond to first data byte.
841 * If data, trim to stay within window,
842 * dropping FIN if necessary.
843 */
844 ti->ti_seq++;
845 if (ti->ti_len > tp->rcv_wnd)
846 {
847 todrop = ti->ti_len - tp->rcv_wnd;
848 m_adj(m, -todrop);
849 ti->ti_len = tp->rcv_wnd;
850 tiflags &= ~TH_FIN;
851 tcpstat.tcps_rcvpackafterwin++;
852 tcpstat.tcps_rcvbyteafterwin += todrop;
853 }
854 tp->snd_wl1 = ti->ti_seq - 1;
855 tp->rcv_up = ti->ti_seq;
856 goto step6;
857 } /* switch tp->t_state */
858 /*
859 * States other than LISTEN or SYN_SENT.
860 * First check timestamp, if present.
861 * Then check that at least some bytes of segment are within
862 * receive window. If segment begins before rcv_nxt,
863 * drop leading data (and SYN); if nothing left, just ack.
864 *
865 * RFC 1323 PAWS: If we have a timestamp reply on this segment
866 * and it's less than ts_recent, drop it.
867 */
868#if 0
869 if ( ts_present
870 && (tiflags & TH_RST) == 0
871 && tp->ts_recent
872 && TSTMP_LT(ts_val, tp->ts_recent))
873 {
874 /* Check to see if ts_recent is over 24 days old. */
875 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE)
876 {
877 /*
878 * Invalidate ts_recent. If this segment updates
879 * ts_recent, the age will be reset later and ts_recent
880 * will get a valid value. If it does not, setting
881 * ts_recent to zero will at least satisfy the
882 * requirement that zero be placed in the timestamp
883 * echo reply when ts_recent isn't valid. The
884 * age isn't reset until we get a valid ts_recent
885 * because we don't want out-of-order segments to be
886 * dropped when ts_recent is old.
887 */
888 tp->ts_recent = 0;
889 }
890 else
891 {
892 tcpstat.tcps_rcvduppack++;
893 tcpstat.tcps_rcvdupbyte += ti->ti_len;
894 tcpstat.tcps_pawsdrop++;
895 goto dropafterack;
896 }
897 }
898#endif
899
900 todrop = tp->rcv_nxt - ti->ti_seq;
901 if (todrop > 0)
902 {
903 if (tiflags & TH_SYN)
904 {
905 tiflags &= ~TH_SYN;
906 ti->ti_seq++;
907 if (ti->ti_urp > 1)
908 ti->ti_urp--;
909 else
910 tiflags &= ~TH_URG;
911 todrop--;
912 }
913 /*
914 * Following if statement from Stevens, vol. 2, p. 960.
915 */
916 if ( todrop > ti->ti_len
917 || ( todrop == ti->ti_len
918 && (tiflags & TH_FIN) == 0))
919 {
920 /*
921 * Any valid FIN must be to the left of the window.
922 * At this point the FIN must be a duplicate or out
923 * of sequence; drop it.
924 */
925 tiflags &= ~TH_FIN;
926
927 /*
928 * Send an ACK to resynchronize and drop any data.
929 * But keep on processing for RST or ACK.
930 */
931 tp->t_flags |= TF_ACKNOW;
932 todrop = ti->ti_len;
933 tcpstat.tcps_rcvduppack++;
934 tcpstat.tcps_rcvdupbyte += todrop;
935 }
936 else
937 {
938 tcpstat.tcps_rcvpartduppack++;
939 tcpstat.tcps_rcvpartdupbyte += todrop;
940 }
941 m_adj(m, todrop);
942 ti->ti_seq += todrop;
943 ti->ti_len -= todrop;
944 if (ti->ti_urp > todrop)
945 ti->ti_urp -= todrop;
946 else
947 {
948 tiflags &= ~TH_URG;
949 ti->ti_urp = 0;
950 }
951 }
952 /*
953 * If new data are received on a connection after the
954 * user processes are gone, then RST the other end.
955 */
956 if ( (so->so_state & SS_NOFDREF)
957 && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len)
958 {
959 tp = tcp_close(pData, tp);
960 tcpstat.tcps_rcvafterclose++;
961 goto dropwithreset;
962 }
963
964 /*
965 * If segment ends after window, drop trailing data
966 * (and PUSH and FIN); if nothing left, just ACK.
967 */
968 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
969 if (todrop > 0)
970 {
971 tcpstat.tcps_rcvpackafterwin++;
972 if (todrop >= ti->ti_len)
973 {
974 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
975 /*
976 * If a new connection request is received
977 * while in TIME_WAIT, drop the old connection
978 * and start over if the sequence numbers
979 * are above the previous ones.
980 */
981 if ( tiflags & TH_SYN
982 && tp->t_state == TCPS_TIME_WAIT
983 && SEQ_GT(ti->ti_seq, tp->rcv_nxt))
984 {
985 iss = tp->rcv_nxt + TCP_ISSINCR;
986 tp = tcp_close(pData, tp);
987 goto findso;
988 }
989 /*
990 * If window is closed can only take segments at
991 * window edge, and have to drop data and PUSH from
992 * incoming segments. Continue processing, but
993 * remember to ack. Otherwise, drop segment
994 * and ack.
995 */
996 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt)
997 {
998 tp->t_flags |= TF_ACKNOW;
999 tcpstat.tcps_rcvwinprobe++;
1000 }
1001 else
1002 goto dropafterack;
1003 }
1004 else
1005 tcpstat.tcps_rcvbyteafterwin += todrop;
1006 m_adj(m, -todrop);
1007 ti->ti_len -= todrop;
1008 tiflags &= ~(TH_PUSH|TH_FIN);
1009 }
1010
1011 /*
1012 * If last ACK falls within this segment's sequence numbers,
1013 * record its timestamp.
1014 */
1015#if 0
1016 if ( ts_present
1017 && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)
1018 && SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + ((tiflags & (TH_SYN|TH_FIN)) != 0)))
1019 {
1020 tp->ts_recent_age = tcp_now;
1021 tp->ts_recent = ts_val;
1022 }
1023#endif
1024
1025 /*
1026 * If the RST bit is set examine the state:
1027 * SYN_RECEIVED STATE:
1028 * If passive open, return to LISTEN state.
1029 * If active open, inform user that connection was refused.
1030 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1031 * Inform user that connection was reset, and close tcb.
1032 * CLOSING, LAST_ACK, TIME_WAIT STATES
1033 * Close the tcb.
1034 */
1035 if (tiflags&TH_RST)
1036 switch (tp->t_state)
1037 {
1038 case TCPS_SYN_RECEIVED:
1039/* so->so_error = ECONNREFUSED; */
1040 goto close;
1041
1042 case TCPS_ESTABLISHED:
1043 case TCPS_FIN_WAIT_1:
1044 case TCPS_FIN_WAIT_2:
1045 case TCPS_CLOSE_WAIT:
1046/* so->so_error = ECONNRESET; */
1047close:
1048 Log2(("closing...=>drop\n", tp->t_state));
1049 tp->t_state = TCPS_CLOSED;
1050 tcpstat.tcps_drops++;
1051 tp = tcp_close(pData, tp);
1052 goto drop;
1053
1054 case TCPS_CLOSING:
1055 case TCPS_LAST_ACK:
1056 case TCPS_TIME_WAIT:
1057 Log2(("t_state is (%x) sort of close =>drop\n", tp->t_state));
1058 tp = tcp_close(pData, tp);
1059 goto drop;
1060 }
1061
1062 /*
1063 * If a SYN is in the window, then this is an
1064 * error and we send an RST and drop the connection.
1065 */
1066 if (tiflags & TH_SYN)
1067 {
1068 tp = tcp_drop(pData, tp,0);
1069 goto dropwithreset;
1070 }
1071
1072 /*
1073 * If the ACK bit is off we drop the segment and return.
1074 */
1075 if ((tiflags & TH_ACK) == 0)
1076 {
1077 Log2(("ACK(%x) bit is off =>drop\n", tiflags));
1078 goto drop;
1079 }
1080
1081 /*
1082 * Ack processing.
1083 */
1084 switch (tp->t_state)
1085 {
1086 /*
1087 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1088 * ESTABLISHED state and continue processing, otherwise
1089 * send an RST. una<=ack<=max
1090 */
1091 case TCPS_SYN_RECEIVED:
1092 if ( SEQ_GT(tp->snd_una, ti->ti_ack)
1093 || SEQ_GT(ti->ti_ack, tp->snd_max))
1094 goto dropwithreset;
1095 tcpstat.tcps_connects++;
1096 tp->t_state = TCPS_ESTABLISHED;
1097 /*
1098 * The sent SYN is ack'ed with our sequence number +1
1099 * The first data byte already in the buffer will get
1100 * lost if no correction is made. This is only needed for
1101 * SS_CTL since the buffer is empty otherwise.
1102 * tp->snd_una++; or:
1103 */
1104 tp->snd_una = ti->ti_ack;
1105 soisfconnected(so);
1106
1107 /* Do window scaling? */
1108#if 0
1109 if ( (tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
1110 == (TF_RCVD_SCALE|TF_REQ_SCALE))
1111 {
1112 tp->snd_scale = tp->requested_s_scale;
1113 tp->rcv_scale = tp->request_r_scale;
1114 }
1115#endif
1116 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1117 tp->snd_wl1 = ti->ti_seq - 1;
1118 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1119 goto synrx_to_est;
1120 /* fall into ... */
1121
1122 /*
1123 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1124 * ACKs. If the ack is in the range
1125 * tp->snd_una < ti->ti_ack <= tp->snd_max
1126 * then advance tp->snd_una to ti->ti_ack and drop
1127 * data from the retransmission queue. If this ACK reflects
1128 * more up to date window information we update our window information.
1129 */
1130 case TCPS_ESTABLISHED:
1131 case TCPS_FIN_WAIT_1:
1132 case TCPS_FIN_WAIT_2:
1133 case TCPS_CLOSE_WAIT:
1134 case TCPS_CLOSING:
1135 case TCPS_LAST_ACK:
1136 case TCPS_TIME_WAIT:
1137 if (SEQ_LEQ(ti->ti_ack, tp->snd_una))
1138 {
1139 if (ti->ti_len == 0 && tiwin == tp->snd_wnd)
1140 {
1141 tcpstat.tcps_rcvdupack++;
1142 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
1143 (long )m, (long )so));
1144 /*
1145 * If we have outstanding data (other than
1146 * a window probe), this is a completely
1147 * duplicate ack (ie, window info didn't
1148 * change), the ack is the biggest we've
1149 * seen and we've seen exactly our rexmt
1150 * threshold of them, assume a packet
1151 * has been dropped and retransmit it.
1152 * Kludge snd_nxt & the congestion
1153 * window so we send only this one
1154 * packet.
1155 *
1156 * We know we're losing at the current
1157 * window size so do congestion avoidance
1158 * (set ssthresh to half the current window
1159 * and pull our congestion window back to
1160 * the new ssthresh).
1161 *
1162 * Dup acks mean that packets have left the
1163 * network (they're now cached at the receiver)
1164 * so bump cwnd by the amount in the receiver
1165 * to keep a constant cwnd packets in the
1166 * network.
1167 */
1168 if ( tp->t_timer[TCPT_REXMT] == 0
1169 || ti->ti_ack != tp->snd_una)
1170 tp->t_dupacks = 0;
1171 else if (++tp->t_dupacks == tcprexmtthresh)
1172 {
1173 tcp_seq onxt = tp->snd_nxt;
1174 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
1175 if (win < 2)
1176 win = 2;
1177 tp->snd_ssthresh = win * tp->t_maxseg;
1178 tp->t_timer[TCPT_REXMT] = 0;
1179 tp->t_rtt = 0;
1180 tp->snd_nxt = ti->ti_ack;
1181 tp->snd_cwnd = tp->t_maxseg;
1182 (void) tcp_output(pData, tp);
1183 tp->snd_cwnd = tp->snd_ssthresh +
1184 tp->t_maxseg * tp->t_dupacks;
1185 if (SEQ_GT(onxt, tp->snd_nxt))
1186 tp->snd_nxt = onxt;
1187 Log2(("t_dupacks(%d) == tcprexmtthresh(%d)=>drop\n", tp->t_dupacks, tcprexmtthresh));
1188 goto drop;
1189 }
1190 else if (tp->t_dupacks > tcprexmtthresh)
1191 {
1192 tp->snd_cwnd += tp->t_maxseg;
1193 (void) tcp_output(pData, tp);
1194 Log2(("t_dupacks(%d) > tcprexmtthresh(%d)=>drop\n", tp->t_dupacks, tcprexmtthresh));
1195 goto drop;
1196 }
1197 }
1198 else
1199 tp->t_dupacks = 0;
1200 break;
1201 }
1202synrx_to_est:
1203 /*
1204 * If the congestion window was inflated to account
1205 * for the other side's cached packets, retract it.
1206 */
1207 if ( tp->t_dupacks > tcprexmtthresh
1208 && tp->snd_cwnd > tp->snd_ssthresh)
1209 tp->snd_cwnd = tp->snd_ssthresh;
1210 tp->t_dupacks = 0;
1211 if (SEQ_GT(ti->ti_ack, tp->snd_max))
1212 {
1213 tcpstat.tcps_rcvacktoomuch++;
1214 goto dropafterack;
1215 }
1216 acked = ti->ti_ack - tp->snd_una;
1217 tcpstat.tcps_rcvackpack++;
1218 tcpstat.tcps_rcvackbyte += acked;
1219
1220 /*
1221 * If we have a timestamp reply, update smoothed
1222 * round trip time. If no timestamp is present but
1223 * transmit timer is running and timed sequence
1224 * number was acked, update smoothed round trip time.
1225 * Since we now have an rtt measurement, cancel the
1226 * timer backoff (cf., Phil Karn's retransmit alg.).
1227 * Recompute the initial retransmit timer.
1228 */
1229#if 0
1230 if (ts_present)
1231 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1232 else
1233#endif
1234 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1235 tcp_xmit_timer(pData, tp,tp->t_rtt);
1236
1237 /*
1238 * If all outstanding data is acked, stop retransmit
1239 * timer and remember to restart (more output or persist).
1240 * If there is more data to be acked, restart retransmit
1241 * timer, using current (possibly backed-off) value.
1242 */
1243 if (ti->ti_ack == tp->snd_max)
1244 {
1245 tp->t_timer[TCPT_REXMT] = 0;
1246 needoutput = 1;
1247 }
1248 else if (tp->t_timer[TCPT_PERSIST] == 0)
1249 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1250 /*
1251 * When new data is acked, open the congestion window.
1252 * If the window gives us less than ssthresh packets
1253 * in flight, open exponentially (maxseg per packet).
1254 * Otherwise open linearly: maxseg per window
1255 * (maxseg^2 / cwnd per packet).
1256 */
1257 {
1258 register u_int cw = tp->snd_cwnd;
1259 register u_int incr = tp->t_maxseg;
1260
1261 if (cw > tp->snd_ssthresh)
1262 incr = incr * incr / cw;
1263 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1264 }
1265 if (acked > so->so_snd.sb_cc)
1266 {
1267 tp->snd_wnd -= so->so_snd.sb_cc;
1268 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1269 ourfinisacked = 1;
1270 }
1271 else
1272 {
1273 sbdrop(&so->so_snd, acked);
1274 tp->snd_wnd -= acked;
1275 ourfinisacked = 0;
1276 }
1277 /*
1278 * XXX sowwakup is called when data is acked and there's room for
1279 * for more data... it should read() the socket
1280 */
1281#if 0
1282 if (so->so_snd.sb_flags & SB_NOTIFY)
1283 sowwakeup(so);
1284#endif
1285 tp->snd_una = ti->ti_ack;
1286 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1287 tp->snd_nxt = tp->snd_una;
1288
1289 switch (tp->t_state)
1290 {
1291 /*
1292 * In FIN_WAIT_1 STATE in addition to the processing
1293 * for the ESTABLISHED state if our FIN is now acknowledged
1294 * then enter FIN_WAIT_2.
1295 */
1296 case TCPS_FIN_WAIT_1:
1297 if (ourfinisacked)
1298 {
1299 /*
1300 * If we can't receive any more
1301 * data, then closing user can proceed.
1302 * Starting the timer is contrary to the
1303 * specification, but if we don't get a FIN
1304 * we'll hang forever.
1305 */
1306 if (so->so_state & SS_FCANTRCVMORE)
1307 {
1308 soisfdisconnected(so);
1309 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1310 }
1311 tp->t_state = TCPS_FIN_WAIT_2;
1312 }
1313 break;
1314
1315 /*
1316 * In CLOSING STATE in addition to the processing for
1317 * the ESTABLISHED state if the ACK acknowledges our FIN
1318 * then enter the TIME-WAIT state, otherwise ignore
1319 * the segment.
1320 */
1321 case TCPS_CLOSING:
1322 if (ourfinisacked)
1323 {
1324 tp->t_state = TCPS_TIME_WAIT;
1325 tcp_canceltimers(tp);
1326 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1327 soisfdisconnected(so);
1328 }
1329 break;
1330
1331 /*
1332 * In LAST_ACK, we may still be waiting for data to drain
1333 * and/or to be acked, as well as for the ack of our FIN.
1334 * If our FIN is now acknowledged, delete the TCB,
1335 * enter the closed state and return.
1336 */
1337 case TCPS_LAST_ACK:
1338 if (ourfinisacked)
1339 {
1340 Log2(("ourfinisacked=>drop\n"));
1341 tp = tcp_close(pData, tp);
1342 goto drop;
1343 }
1344 break;
1345
1346 /*
1347 * In TIME_WAIT state the only thing that should arrive
1348 * is a retransmission of the remote FIN. Acknowledge
1349 * it and restart the finack timer.
1350 */
1351 case TCPS_TIME_WAIT:
1352 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1353 goto dropafterack;
1354 }
1355 } /* switch(tp->t_state) */
1356
1357step6:
1358 /*
1359 * Update window information.
1360 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1361 */
1362 if ( (tiflags & TH_ACK)
1363 && ( SEQ_LT(tp->snd_wl1, ti->ti_seq)
1364 || ( tp->snd_wl1 == ti->ti_seq
1365 && ( SEQ_LT(tp->snd_wl2, ti->ti_ack)
1366 || ( tp->snd_wl2 == ti->ti_ack
1367 && tiwin > tp->snd_wnd)))))
1368 {
1369 /* keep track of pure window updates */
1370 if ( ti->ti_len == 0
1371 && tp->snd_wl2 == ti->ti_ack
1372 && tiwin > tp->snd_wnd)
1373 tcpstat.tcps_rcvwinupd++;
1374 tp->snd_wnd = tiwin;
1375 tp->snd_wl1 = ti->ti_seq;
1376 tp->snd_wl2 = ti->ti_ack;
1377 if (tp->snd_wnd > tp->max_sndwnd)
1378 tp->max_sndwnd = tp->snd_wnd;
1379 needoutput = 1;
1380 }
1381
1382 /*
1383 * Process segments with URG.
1384 */
1385 if ((tiflags & TH_URG) && ti->ti_urp &&
1386 TCPS_HAVERCVDFIN(tp->t_state) == 0)
1387 {
1388 /*
1389 * This is a kludge, but if we receive and accept
1390 * random urgent pointers, we'll crash in
1391 * soreceive. It's hard to imagine someone
1392 * actually wanting to send this much urgent data.
1393 */
1394 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen)
1395 {
1396 ti->ti_urp = 0;
1397 tiflags &= ~TH_URG;
1398 goto dodata;
1399 }
1400 /*
1401 * If this segment advances the known urgent pointer,
1402 * then mark the data stream. This should not happen
1403 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1404 * a FIN has been received from the remote side.
1405 * In these states we ignore the URG.
1406 *
1407 * According to RFC961 (Assigned Protocols),
1408 * the urgent pointer points to the last octet
1409 * of urgent data. We continue, however,
1410 * to consider it to indicate the first octet
1411 * of data past the urgent section as the original
1412 * spec states (in one of two places).
1413 */
1414 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up))
1415 {
1416 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1417 so->so_urgc = so->so_rcv.sb_cc +
1418 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1419 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1420 }
1421 }
1422 else
1423 /*
1424 * If no out of band data is expected,
1425 * pull receive urgent pointer along
1426 * with the receive window.
1427 */
1428 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1429 tp->rcv_up = tp->rcv_nxt;
1430dodata:
1431
1432 /*
1433 * If this is a small packet, then ACK now - with Nagel
1434 * congestion avoidance sender won't send more until
1435 * he gets an ACK.
1436 *
1437 * See above.
1438 */
1439 if ( ti->ti_len
1440 && (unsigned)ti->ti_len <= 5
1441 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27)
1442 {
1443 tp->t_flags |= TF_ACKNOW;
1444 }
1445
1446 /*
1447 * Process the segment text, merging it into the TCP sequencing queue,
1448 * and arranging for acknowledgment of receipt if necessary.
1449 * This process logically involves adjusting tp->rcv_wnd as data
1450 * is presented to the user (this happens in tcp_usrreq.c,
1451 * case PRU_RCVD). If a FIN has already been received on this
1452 * connection then we just ignore the text.
1453 */
1454 if ( (ti->ti_len || (tiflags&TH_FIN))
1455 && TCPS_HAVERCVDFIN(tp->t_state) == 0)
1456 {
1457 if ( ti->ti_seq == tp->rcv_nxt
1458 && LIST_EMPTY(&tp->t_segq)
1459 && tp->t_state == TCPS_ESTABLISHED)
1460 {
1461 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1462 tp->rcv_nxt += tlen;
1463 tiflags = ti->ti_t.th_flags & TH_FIN;
1464 tcpstat.tcps_rcvpack++;
1465 tcpstat.tcps_rcvbyte += tlen;
1466 if (so->so_state & SS_FCANTRCVMORE)
1467 m_freem(pData, m);
1468 else
1469 {
1470 if (so->so_emu)
1471 {
1472 if (tcp_emu(pData, so,m))
1473 sbappend(pData, so, m);
1474 }
1475 else
1476 sbappend(pData, so, m);
1477 }
1478 }
1479 else
1480 {
1481 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1482 tiflags |= TF_ACKNOW;
1483 }
1484 /*
1485 * Note the amount of data that peer has sent into
1486 * our window, in order to estimate the sender's
1487 * buffer size.
1488 */
1489 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1490 }
1491 else
1492 {
1493 m_free(pData, m);
1494 tiflags &= ~TH_FIN;
1495 }
1496
1497 /*
1498 * If FIN is received ACK the FIN and let the user know
1499 * that the connection is closing.
1500 */
1501 if (tiflags & TH_FIN)
1502 {
1503 if (TCPS_HAVERCVDFIN(tp->t_state) == 0)
1504 {
1505 /*
1506 * If we receive a FIN we can't send more data,
1507 * set it SS_FDRAIN
1508 * Shutdown the socket if there is no rx data in the
1509 * buffer.
1510 * soread() is called on completion of shutdown() and
1511 * will got to TCPS_LAST_ACK, and use tcp_output()
1512 * to send the FIN.
1513 */
1514/* sofcantrcvmore(so); */
1515 sofwdrain(so);
1516
1517 tp->t_flags |= TF_ACKNOW;
1518 tp->rcv_nxt++;
1519 }
1520 switch (tp->t_state)
1521 {
1522 /*
1523 * In SYN_RECEIVED and ESTABLISHED STATES
1524 * enter the CLOSE_WAIT state.
1525 */
1526 case TCPS_SYN_RECEIVED:
1527 case TCPS_ESTABLISHED:
1528 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1529 tp->t_state = TCPS_LAST_ACK;
1530 else
1531 tp->t_state = TCPS_CLOSE_WAIT;
1532 break;
1533
1534 /*
1535 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1536 * enter the CLOSING state.
1537 */
1538 case TCPS_FIN_WAIT_1:
1539 tp->t_state = TCPS_CLOSING;
1540 break;
1541
1542 /*
1543 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1544 * starting the time-wait timer, turning off the other
1545 * standard timers.
1546 */
1547 case TCPS_FIN_WAIT_2:
1548 tp->t_state = TCPS_TIME_WAIT;
1549 tcp_canceltimers(tp);
1550 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1551 soisfdisconnected(so);
1552 break;
1553
1554 /*
1555 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1556 */
1557 case TCPS_TIME_WAIT:
1558 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1559 break;
1560 }
1561 }
1562
1563 /*
1564 * Return any desired output.
1565 */
1566 if (needoutput || (tp->t_flags & TF_ACKNOW))
1567 tcp_output(pData, tp);
1568
1569 return;
1570
1571dropafterack:
1572 Log2(("drop after ack\n"));
1573 /*
1574 * Generate an ACK dropping incoming segment if it occupies
1575 * sequence space, where the ACK reflects our state.
1576 */
1577 if (tiflags & TH_RST)
1578 goto drop;
1579 m_freem(pData, m);
1580 tp->t_flags |= TF_ACKNOW;
1581 (void) tcp_output(pData, tp);
1582 return;
1583
1584dropwithreset:
1585 /* reuses m if m!=NULL, m_free() unnecessary */
1586 Log2(("drop with reset\n"));
1587 if (tiflags & TH_ACK)
1588 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1589 else
1590 {
1591 if (tiflags & TH_SYN) ti->ti_len++;
1592 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1593 TH_RST|TH_ACK);
1594 }
1595
1596 return;
1597
1598drop:
1599 /*
1600 * Drop space held by incoming segment and return.
1601 */
1602 Log2(("drop\n"));
1603 m_free(pData, m);
1604
1605 return;
1606}
1607
1608void
1609tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1610{
1611 u_int16_t mss;
1612 int opt, optlen;
1613
1614 DEBUG_CALL("tcp_dooptions");
1615 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1616
1617 for (; cnt > 0; cnt -= optlen, cp += optlen)
1618 {
1619 opt = cp[0];
1620 if (opt == TCPOPT_EOL)
1621 break;
1622 if (opt == TCPOPT_NOP)
1623 optlen = 1;
1624 else
1625 {
1626 optlen = cp[1];
1627 if (optlen <= 0)
1628 break;
1629 }
1630 switch (opt)
1631 {
1632 default:
1633 continue;
1634
1635 case TCPOPT_MAXSEG:
1636 if (optlen != TCPOLEN_MAXSEG)
1637 continue;
1638 if (!(ti->ti_flags & TH_SYN))
1639 continue;
1640 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1641 NTOHS(mss);
1642 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1643 break;
1644
1645#if 0
1646 case TCPOPT_WINDOW:
1647 if (optlen != TCPOLEN_WINDOW)
1648 continue;
1649 if (!(ti->ti_flags & TH_SYN))
1650 continue;
1651 tp->t_flags |= TF_RCVD_SCALE;
1652 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1653 break;
1654
1655 case TCPOPT_TIMESTAMP:
1656 if (optlen != TCPOLEN_TIMESTAMP)
1657 continue;
1658 *ts_present = 1;
1659 memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1660 NTOHL(*ts_val);
1661 memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1662 NTOHL(*ts_ecr);
1663
1664 /*
1665 * A timestamp received in a SYN makes
1666 * it ok to send timestamp requests and replies.
1667 */
1668 if (ti->ti_flags & TH_SYN)
1669 {
1670 tp->t_flags |= TF_RCVD_TSTMP;
1671 tp->ts_recent = *ts_val;
1672 tp->ts_recent_age = tcp_now;
1673 }
1674 break;
1675#endif
1676 }
1677 }
1678}
1679
1680
1681/*
1682 * Pull out of band byte out of a segment so
1683 * it doesn't appear in the user's data queue.
1684 * It is still reflected in the segment length for
1685 * sequencing purposes.
1686 */
1687
1688#if 0
1689void
1690tcp_pulloutofband(struct socket *so, struct tcpiphdr *ti, struct mbuf *m)
1691{
1692 int cnt = ti->ti_urp - 1;
1693
1694 while (cnt >= 0)
1695 {
1696 if (m->m_len > cnt)
1697 {
1698 char *cp = mtod(m, caddr_t) + cnt;
1699 struct tcpcb *tp = sototcpcb(so);
1700
1701 tp->t_iobc = *cp;
1702 tp->t_oobflags |= TCPOOB_HAVEDATA;
1703 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1704 m->m_len--;
1705 return;
1706 }
1707 cnt -= m->m_len;
1708 m = m->m_next; /* XXX WRONG! Fix it! */
1709 if (m == 0)
1710 break;
1711 }
1712 panic("tcp_pulloutofband");
1713}
1714#endif
1715
1716/*
1717 * Collect new round-trip time estimate
1718 * and update averages and current timeout.
1719 */
1720
1721void
1722tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1723{
1724 register short delta;
1725
1726 DEBUG_CALL("tcp_xmit_timer");
1727 DEBUG_ARG("tp = %lx", (long)tp);
1728 DEBUG_ARG("rtt = %d", rtt);
1729
1730 tcpstat.tcps_rttupdated++;
1731 if (tp->t_srtt != 0)
1732 {
1733 /*
1734 * srtt is stored as fixed point with 3 bits after the
1735 * binary point (i.e., scaled by 8). The following magic
1736 * is equivalent to the smoothing algorithm in rfc793 with
1737 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1738 * point). Adjust rtt to origin 0.
1739 */
1740 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1741 if ((tp->t_srtt += delta) <= 0)
1742 tp->t_srtt = 1;
1743 /*
1744 * We accumulate a smoothed rtt variance (actually, a
1745 * smoothed mean difference), then set the retransmit
1746 * timer to smoothed rtt + 4 times the smoothed variance.
1747 * rttvar is stored as fixed point with 2 bits after the
1748 * binary point (scaled by 4). The following is
1749 * equivalent to rfc793 smoothing with an alpha of .75
1750 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1751 * rfc793's wired-in beta.
1752 */
1753 if (delta < 0)
1754 delta = -delta;
1755 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1756 if ((tp->t_rttvar += delta) <= 0)
1757 tp->t_rttvar = 1;
1758 }
1759 else
1760 {
1761 /*
1762 * No rtt measurement yet - use the unsmoothed rtt.
1763 * Set the variance to half the rtt (so our first
1764 * retransmit happens at 3*rtt).
1765 */
1766 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1767 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1768 }
1769 tp->t_rtt = 0;
1770 tp->t_rxtshift = 0;
1771
1772 /*
1773 * the retransmit should happen at rtt + 4 * rttvar.
1774 * Because of the way we do the smoothing, srtt and rttvar
1775 * will each average +1/2 tick of bias. When we compute
1776 * the retransmit timer, we want 1/2 tick of rounding and
1777 * 1 extra tick because of +-1/2 tick uncertainty in the
1778 * firing of the timer. The bias will give us exactly the
1779 * 1.5 tick we need. But, because the bias is
1780 * statistical, we have to test that we don't drop below
1781 * the minimum feasible timer (which is 2 ticks).
1782 */
1783 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1784 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1785
1786 /*
1787 * We received an ack for a packet that wasn't retransmitted;
1788 * it is probably safe to discard any error indications we've
1789 * received recently. This isn't quite right, but close enough
1790 * for now (a route might have failed after we sent a segment,
1791 * and the return path might not be symmetrical).
1792 */
1793 tp->t_softerror = 0;
1794}
1795
1796/*
1797 * Determine a reasonable value for maxseg size.
1798 * If the route is known, check route for mtu.
1799 * If none, use an mss that can be handled on the outgoing
1800 * interface without forcing IP to fragment; if bigger than
1801 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1802 * to utilize large mbufs. If no route is found, route has no mtu,
1803 * or the destination isn't local, use a default, hopefully conservative
1804 * size (usually 512 or the default IP max size, but no more than the mtu
1805 * of the interface), as we can't discover anything about intervening
1806 * gateways or networks. We also initialize the congestion/slow start
1807 * window to be a single segment if the destination isn't local.
1808 * While looking at the routing entry, we also initialize other path-dependent
1809 * parameters from pre-set or cached values in the routing entry.
1810 */
1811
1812int
1813tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1814{
1815 struct socket *so = tp->t_socket;
1816 int mss;
1817
1818 DEBUG_CALL("tcp_mss");
1819 DEBUG_ARG("tp = %lx", (long)tp);
1820 DEBUG_ARG("offer = %d", offer);
1821
1822 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1823 if (offer)
1824 mss = min(mss, offer);
1825 mss = max(mss, 32);
1826 if (mss < tp->t_maxseg || offer != 0)
1827 tp->t_maxseg = mss;
1828
1829 tp->snd_cwnd = mss;
1830
1831 sbreserve(&so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1832 sbreserve(&so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1833
1834 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1835
1836 return mss;
1837}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette