VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 20383

Last change on this file since 20383 was 20377, checked in by vboxsync, 16 years ago

NAT: sbuf perfomance counters

  • Property svn:eol-style set to native
File size: 60.4 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
50
51/* for modulo comparisons of timestamps */
52#define TSTMP_LT(a, b) ((int)((a)-(b)) < 0)
53#define TSTMP_GEQ(a, b) ((int)((a)-(b)) >= 0)
54
55#ifndef TCP_ACK_HACK
56#define DELAY_ACK(tp, ti) \
57 if (ti->ti_flags & TH_PUSH) \
58 tp->t_flags |= TF_ACKNOW; \
59 else \
60 tp->t_flags |= TF_DELACK;
61#else /* !TCP_ACK_HACK */
62#define DELAY_ACK(tp, ign) \
63 tp->t_flags |= TF_DELACK;
64#endif /* TCP_ACK_HACK */
65
66
67/*
68 * deps: netinet/tcp_reass.c
69 * tcp_reass_maxqlen = 48 (deafault)
70 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
71 */
72int
73tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
74{
75 struct tseg_qent *q;
76 struct tseg_qent *p = NULL;
77 struct tseg_qent *nq;
78 struct tseg_qent *te = NULL;
79 struct socket *so = tp->t_socket;
80 int flags;
81
82 /*
83 * XXX: tcp_reass() is rather inefficient with its data structures
84 * and should be rewritten (see NetBSD for optimizations). While
85 * doing that it should move to its own file tcp_reass.c.
86 */
87
88 /*
89 * Call with th==NULL after become established to
90 * force pre-ESTABLISHED data up to user socket.
91 */
92 if (th == NULL)
93 goto present;
94
95 /*
96 * Limit the number of segments in the reassembly queue to prevent
97 * holding on to too many segments (and thus running out of mbufs).
98 * Make sure to let the missing segment through which caused this
99 * queue. Always keep one global queue entry spare to be able to
100 * process the missing segment.
101 */
102 if ( th->th_seq != tp->rcv_nxt
103 && ( tcp_reass_qsize + 1 >= tcp_reass_maxseg
104 || tp->t_segqlen >= tcp_reass_maxqlen))
105 {
106 tcp_reass_overflows++;
107 tcpstat.tcps_rcvmemdrop++;
108 m_freem(pData, m);
109 *tlenp = 0;
110 return (0);
111 }
112
113 /*
114 * Allocate a new queue entry. If we can't, or hit the zone limit
115 * just drop the pkt.
116 */
117 te = RTMemAlloc(sizeof(struct tseg_qent));
118 if (te == NULL)
119 {
120 tcpstat.tcps_rcvmemdrop++;
121 m_freem(pData, m);
122 *tlenp = 0;
123 return (0);
124 }
125 tp->t_segqlen++;
126 tcp_reass_qsize++;
127
128 /*
129 * Find a segment which begins after this one does.
130 */
131 LIST_FOREACH(q, &tp->t_segq, tqe_q)
132 {
133 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
134 break;
135 p = q;
136 }
137
138 /*
139 * If there is a preceding segment, it may provide some of
140 * our data already. If so, drop the data from the incoming
141 * segment. If it provides all of our data, drop us.
142 */
143 if (p != NULL)
144 {
145 int i;
146 /* conversion to int (in i) handles seq wraparound */
147 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
148 if (i > 0)
149 {
150 if (i >= *tlenp)
151 {
152 tcpstat.tcps_rcvduppack++;
153 tcpstat.tcps_rcvdupbyte += *tlenp;
154 m_freem(pData, m);
155 RTMemFree(te);
156 tp->t_segqlen--;
157 tcp_reass_qsize--;
158 /*
159 * Try to present any queued data
160 * at the left window edge to the user.
161 * This is needed after the 3-WHS
162 * completes.
163 */
164 goto present; /* ??? */
165 }
166 m_adj(m, i);
167 *tlenp -= i;
168 th->th_seq += i;
169 }
170 }
171 tcpstat.tcps_rcvoopack++;
172 tcpstat.tcps_rcvoobyte += *tlenp;
173
174 /*
175 * While we overlap succeeding segments trim them or,
176 * if they are completely covered, dequeue them.
177 */
178 while (q)
179 {
180 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
181 if (i <= 0)
182 break;
183 if (i < q->tqe_len)
184 {
185 q->tqe_th->th_seq += i;
186 q->tqe_len -= i;
187 m_adj(q->tqe_m, i);
188 break;
189 }
190
191 nq = LIST_NEXT(q, tqe_q);
192 LIST_REMOVE(q, tqe_q);
193 m_freem(pData, q->tqe_m);
194 RTMemFree(q);
195 tp->t_segqlen--;
196 tcp_reass_qsize--;
197 q = nq;
198 }
199
200 /* Insert the new segment queue entry into place. */
201 te->tqe_m = m;
202 te->tqe_th = th;
203 te->tqe_len = *tlenp;
204
205 if (p == NULL)
206 {
207 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
208 }
209 else
210 {
211 LIST_INSERT_AFTER(p, te, tqe_q);
212 }
213
214present:
215 /*
216 * Present data to user, advancing rcv_nxt through
217 * completed sequence space.
218 */
219 if (!TCPS_HAVEESTABLISHED(tp->t_state))
220 return (0);
221 q = LIST_FIRST(&tp->t_segq);
222 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
223 return (0);
224 do
225 {
226 tp->rcv_nxt += q->tqe_len;
227 flags = q->tqe_th->th_flags & TH_FIN;
228 nq = LIST_NEXT(q, tqe_q);
229 LIST_REMOVE(q, tqe_q);
230 /* XXX: This place should be checked for the same code in
231 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
232 */
233 if (so->so_state & SS_FCANTSENDMORE)
234 m_freem(pData, q->tqe_m);
235 else
236 {
237 if (so->so_emu)
238 {
239 if (tcp_emu(pData, so, q->tqe_m))
240 sbappend(pData, so, q->tqe_m);
241 }
242 else
243 sbappend(pData, so, q->tqe_m);
244 }
245 RTMemFree(q);
246 tp->t_segqlen--;
247 tcp_reass_qsize--;
248 q = nq;
249 }
250 while (q && q->tqe_th->th_seq == tp->rcv_nxt);
251
252 return flags;
253}
254
255/*
256 * TCP input routine, follows pages 65-76 of the
257 * protocol specification dated September, 1981 very closely.
258 */
259void
260tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
261{
262 struct ip save_ip, *ip;
263 register struct tcpiphdr *ti;
264 caddr_t optp = NULL;
265 int optlen = 0;
266 int len, tlen, off;
267 register struct tcpcb *tp = 0;
268 register int tiflags;
269 struct socket *so = 0;
270 int todrop, acked, ourfinisacked, needoutput = 0;
271/* int dropsocket = 0; */
272 int iss = 0;
273 u_long tiwin;
274/* int ts_present = 0; */
275
276 DEBUG_CALL("tcp_input");
277 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
278 (long )m, iphlen, (long )inso ));
279
280 if (inso != NULL)
281 {
282 QSOCKET_LOCK(tcb);
283 SOCKET_LOCK(inso);
284 QSOCKET_UNLOCK(tcb);
285 }
286 /*
287 * If called with m == 0, then we're continuing the connect
288 */
289 if (m == NULL)
290 {
291 so = inso;
292 Log4(("NAT: tcp_input: %R[natsock]\n", so));
293 /* Re-set a few variables */
294 tp = sototcpcb(so);
295 m = so->so_m;
296
297 so->so_m = 0;
298 ti = so->so_ti;
299 /* @todo (r -vvl) clarify why it might happens */
300 if (ti == NULL)
301 {
302 LogRel(("NAT: ti is null. can't do any reseting connection actions\n"));
303 /* mbuf should be cleared in sofree called from tcp_close */
304 tcp_close(pData, tp);
305 return;
306 }
307 tiwin = ti->ti_win;
308 tiflags = ti->ti_flags;
309
310 goto cont_conn;
311 }
312
313 tcpstat.tcps_rcvtotal++;
314 /*
315 * Get IP and TCP header together in first mbuf.
316 * Note: IP leaves IP header in first mbuf.
317 */
318 ti = mtod(m, struct tcpiphdr *);
319 if (iphlen > sizeof(struct ip ))
320 {
321 ip_stripoptions(m, (struct mbuf *)0);
322 iphlen = sizeof(struct ip );
323 }
324 /* XXX Check if too short */
325
326
327 /*
328 * Save a copy of the IP header in case we want restore it
329 * for sending an ICMP error message in response.
330 */
331 ip = mtod(m, struct ip *);
332 save_ip = *ip;
333 save_ip.ip_len+= iphlen;
334
335 /*
336 * Checksum extended TCP header and data.
337 */
338 tlen = ((struct ip *)ti)->ip_len;
339 memset(ti->ti_x1, 0, 9);
340 ti->ti_len = htons((u_int16_t)tlen);
341 len = sizeof(struct ip ) + tlen;
342 /* keep checksum for ICMP reply
343 * ti->ti_sum = cksum(m, len);
344 * if (ti->ti_sum) { */
345 if (cksum(m, len))
346 {
347 tcpstat.tcps_rcvbadsum++;
348 goto drop;
349 }
350
351 /*
352 * Check that TCP offset makes sense,
353 * pull out TCP options and adjust length. XXX
354 */
355 off = ti->ti_off << 2;
356 if ( off < sizeof (struct tcphdr)
357 || off > tlen)
358 {
359 tcpstat.tcps_rcvbadoff++;
360 goto drop;
361 }
362 tlen -= off;
363 ti->ti_len = tlen;
364 if (off > sizeof (struct tcphdr))
365 {
366 optlen = off - sizeof (struct tcphdr);
367 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
368
369 /*
370 * Do quick retrieval of timestamp options ("options
371 * prediction?"). If timestamp is the only option and it's
372 * formatted as recommended in RFC 1323 appendix A, we
373 * quickly get the values now and not bother calling
374 * tcp_dooptions(), etc.
375 */
376#if 0
377 if (( optlen == TCPOLEN_TSTAMP_APPA
378 || ( optlen > TCPOLEN_TSTAMP_APPA
379 && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
380 *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
381 (ti->ti_flags & TH_SYN) == 0)
382 {
383 ts_present = 1;
384 ts_val = ntohl(*(u_int32_t *)(optp + 4));
385 ts_ecr = ntohl(*(u_int32_t *)(optp + 8));
386 optp = NULL; / * we have parsed the options * /
387 }
388#endif
389 }
390 tiflags = ti->ti_flags;
391
392 /*
393 * Convert TCP protocol specific fields to host format.
394 */
395 NTOHL(ti->ti_seq);
396 NTOHL(ti->ti_ack);
397 NTOHS(ti->ti_win);
398 NTOHS(ti->ti_urp);
399
400 /*
401 * Drop TCP, IP headers and TCP options.
402 */
403 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
404 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
405
406 /*
407 * Locate pcb for segment.
408 */
409findso:
410 if (so != NULL && so != &tcb)
411 SOCKET_UNLOCK(so);
412 QSOCKET_LOCK(tcb);
413 so = tcp_last_so;
414 if ( so->so_fport != ti->ti_dport
415 || so->so_lport != ti->ti_sport
416 || so->so_laddr.s_addr != ti->ti_src.s_addr
417 || so->so_faddr.s_addr != ti->ti_dst.s_addr)
418 {
419 struct socket *sonxt;
420 QSOCKET_UNLOCK(tcb);
421 /* @todo fix SOLOOKUP macrodefinition to be usable here */
422#ifndef VBOX_WITH_SLIRP_MT
423 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
424 ti->ti_dst, ti->ti_dport);
425#else
426 so = NULL;
427 QSOCKET_FOREACH(so, sonxt, tcp)
428 /* { */
429 if ( so->so_lport == ti->ti_sport
430 && so->so_laddr.s_addr == ti->ti_src.s_addr
431 && so->so_faddr.s_addr == ti->ti_dst.s_addr
432 && so->so_fport == ti->ti_dport
433 && so->so_deleted != 1)
434 {
435 break; /* so is locked here */
436 }
437 LOOP_LABEL(tcp, so, sonxt);
438 }
439 if (so == &tcb) {
440 so = NULL;
441 }
442#endif
443 if (so)
444 {
445 tcp_last_so = so;
446 }
447 ++tcpstat.tcps_socachemiss;
448 }
449 else
450 {
451 SOCKET_LOCK(so);
452 QSOCKET_UNLOCK(tcb);
453 }
454
455 /*
456 * If the state is CLOSED (i.e., TCB does not exist) then
457 * all data in the incoming segment is discarded.
458 * If the TCB exists but is in CLOSED state, it is embryonic,
459 * but should either do a listen or a connect soon.
460 *
461 * state == CLOSED means we've done socreate() but haven't
462 * attached it to a protocol yet...
463 *
464 * XXX If a TCB does not exist, and the TH_SYN flag is
465 * the only flag set, then create a session, mark it
466 * as if it was LISTENING, and continue...
467 */
468 if (so == 0)
469 {
470 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
471 goto dropwithreset;
472
473 if ((so = socreate()) == NULL)
474 goto dropwithreset;
475 if (tcp_attach(pData, so) < 0)
476 {
477 RTMemFree(so); /* Not sofree (if it failed, it's not insqued) */
478 goto dropwithreset;
479 }
480 SOCKET_LOCK(so);
481 sbreserve(pData, &so->so_snd, tcp_sndspace);
482 sbreserve(pData, &so->so_rcv, tcp_rcvspace);
483
484/* tcp_last_so = so; */ /* XXX ? */
485/* tp = sototcpcb(so); */
486
487 so->so_laddr = ti->ti_src;
488 so->so_lport = ti->ti_sport;
489 so->so_faddr = ti->ti_dst;
490 so->so_fport = ti->ti_dport;
491
492 if ((so->so_iptos = tcp_tos(so)) == 0)
493 so->so_iptos = ((struct ip *)ti)->ip_tos;
494
495 tp = sototcpcb(so);
496 tp->t_state = TCPS_LISTEN;
497 }
498
499 /*
500 * If this is a still-connecting socket, this probably
501 * a retransmit of the SYN. Whether it's a retransmit SYN
502 * or something else, we nuke it.
503 */
504 if (so->so_state & SS_ISFCONNECTING)
505 {
506 goto drop;
507 }
508
509 tp = sototcpcb(so);
510
511 /* XXX Should never fail */
512 if (tp == 0)
513 goto dropwithreset;
514 if (tp->t_state == TCPS_CLOSED)
515 {
516 goto drop;
517 }
518
519 /* Unscale the window into a 32-bit value. */
520/* if ((tiflags & TH_SYN) == 0)
521 * tiwin = ti->ti_win << tp->snd_scale;
522 * else
523 */
524 tiwin = ti->ti_win;
525
526 /*
527 * Segment received on connection.
528 * Reset idle time and keep-alive timer.
529 */
530 tp->t_idle = 0;
531 if (so_options)
532 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
533 else
534 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
535
536 /*
537 * Process options if not in LISTEN state,
538 * else do it below (after getting remote address).
539 */
540 if (optp && tp->t_state != TCPS_LISTEN)
541 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
542/* , */
543/* &ts_present, &ts_val, &ts_ecr); */
544
545 /*
546 * Header prediction: check for the two common cases
547 * of a uni-directional data xfer. If the packet has
548 * no control flags, is in-sequence, the window didn't
549 * change and we're not retransmitting, it's a
550 * candidate. If the length is zero and the ack moved
551 * forward, we're the sender side of the xfer. Just
552 * free the data acked & wake any higher level process
553 * that was blocked waiting for space. If the length
554 * is non-zero and the ack didn't move, we're the
555 * receiver side. If we're getting packets in-order
556 * (the reassembly queue is empty), add the data to
557 * the socket buffer and note that we need a delayed ack.
558 *
559 * XXX Some of these tests are not needed
560 * eg: the tiwin == tp->snd_wnd prevents many more
561 * predictions.. with no *real* advantage..
562 */
563 if ( tp->t_state == TCPS_ESTABLISHED
564 && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK
565/* && (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) */
566 && ti->ti_seq == tp->rcv_nxt
567 && tiwin && tiwin == tp->snd_wnd
568 && tp->snd_nxt == tp->snd_max)
569 {
570 /*
571 * If last ACK falls within this segment's sequence numbers,
572 * record the timestamp.
573 */
574#if 0
575 if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
576 SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len))
577 {
578 tp->ts_recent_age = tcp_now;
579 tp->ts_recent = ts_val;
580 }
581#endif
582
583 if (ti->ti_len == 0)
584 {
585 if ( SEQ_GT(ti->ti_ack, tp->snd_una)
586 && SEQ_LEQ(ti->ti_ack, tp->snd_max)
587 && tp->snd_cwnd >= tp->snd_wnd)
588 {
589 /*
590 * this is a pure ack for outstanding data.
591 */
592 ++tcpstat.tcps_predack;
593#if 0
594 if (ts_present)
595 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
596 else
597#endif
598 if ( tp->t_rtt
599 && SEQ_GT(ti->ti_ack, tp->t_rtseq))
600 tcp_xmit_timer(pData, tp, tp->t_rtt);
601 acked = ti->ti_ack - tp->snd_una;
602 tcpstat.tcps_rcvackpack++;
603 tcpstat.tcps_rcvackbyte += acked;
604 sbdrop(&so->so_snd, acked);
605 tp->snd_una = ti->ti_ack;
606 m_freem(pData, m);
607
608 /*
609 * If all outstanding data are acked, stop
610 * retransmit timer, otherwise restart timer
611 * using current (possibly backed-off) value.
612 * If process is waiting for space,
613 * wakeup/selwakeup/signal. If data
614 * are ready to send, let tcp_output
615 * decide between more output or persist.
616 */
617 if (tp->snd_una == tp->snd_max)
618 tp->t_timer[TCPT_REXMT] = 0;
619 else if (tp->t_timer[TCPT_PERSIST] == 0)
620 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
621
622 /*
623 * There's room in so_snd, sowwakup will read()
624 * from the socket if we can
625 */
626#if 0
627 if (so->so_snd.sb_flags & SB_NOTIFY)
628 sowwakeup(so);
629#endif
630 /*
631 * This is called because sowwakeup might have
632 * put data into so_snd. Since we don't so sowwakeup,
633 * we don't need this.. XXX???
634 */
635 if (so->so_snd.sb_cc)
636 (void) tcp_output(pData, tp);
637
638 SOCKET_UNLOCK(so);
639 return;
640 }
641 }
642 else if ( ti->ti_ack == tp->snd_una
643 && LIST_FIRST(&tp->t_segq)
644 && ti->ti_len <= sbspace(&so->so_rcv))
645 {
646 /*
647 * this is a pure, in-sequence data packet
648 * with nothing on the reassembly queue and
649 * we have enough buffer space to take it.
650 */
651 ++tcpstat.tcps_preddat;
652 tp->rcv_nxt += ti->ti_len;
653 tcpstat.tcps_rcvpack++;
654 tcpstat.tcps_rcvbyte += ti->ti_len;
655 /*
656 * Add data to socket buffer.
657 */
658 if (so->so_emu)
659 {
660 if (tcp_emu(pData, so, m))
661 sbappend(pData, so, m);
662 }
663 else
664 sbappend(pData, so, m);
665
666 /*
667 * XXX This is called when data arrives. Later, check
668 * if we can actually write() to the socket
669 * XXX Need to check? It's be NON_BLOCKING
670 */
671/* sorwakeup(so); */
672
673 /*
674 * If this is a short packet, then ACK now - with Nagel
675 * congestion avoidance sender won't send more until
676 * he gets an ACK.
677 *
678 * It is better to not delay acks at all to maximize
679 * TCP throughput. See RFC 2581.
680 */
681 tp->t_flags |= TF_ACKNOW;
682 tcp_output(pData, tp);
683 SOCKET_UNLOCK(so);
684 return;
685 }
686 } /* header prediction */
687 /*
688 * Calculate amount of space in receive window,
689 * and then do TCP input processing.
690 * Receive window is amount of space in rcv queue,
691 * but not less than advertised window.
692 */
693 {
694 int win;
695 win = sbspace(&so->so_rcv);
696 if (win < 0)
697 win = 0;
698 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
699 }
700
701 switch (tp->t_state)
702 {
703 /*
704 * If the state is LISTEN then ignore segment if it contains an RST.
705 * If the segment contains an ACK then it is bad and send a RST.
706 * If it does not contain a SYN then it is not interesting; drop it.
707 * Don't bother responding if the destination was a broadcast.
708 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
709 * tp->iss, and send a segment:
710 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
711 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
712 * Fill in remote peer address fields if not previously specified.
713 * Enter SYN_RECEIVED state, and process any other fields of this
714 * segment in this state.
715 */
716 case TCPS_LISTEN:
717 {
718 if (tiflags & TH_RST) {
719 goto drop;
720 }
721 if (tiflags & TH_ACK)
722 goto dropwithreset;
723 if ((tiflags & TH_SYN) == 0)
724 {
725 goto drop;
726 }
727
728 /*
729 * This has way too many gotos...
730 * But a bit of spaghetti code never hurt anybody :)
731 */
732
733 if (so->so_emu & EMU_NOCONNECT)
734 {
735 so->so_emu &= ~EMU_NOCONNECT;
736 goto cont_input;
737 }
738
739 if ( (tcp_fconnect(pData, so) == -1)
740 && errno != EINPROGRESS
741 && errno != EWOULDBLOCK)
742 {
743 u_char code = ICMP_UNREACH_NET;
744 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
745 errno, strerror(errno)));
746 if (errno == ECONNREFUSED)
747 {
748 /* ACK the SYN, send RST to refuse the connection */
749 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
750 TH_RST|TH_ACK);
751 }
752 else
753 {
754 if (errno == EHOSTUNREACH)
755 code = ICMP_UNREACH_HOST;
756 HTONL(ti->ti_seq); /* restore tcp header */
757 HTONL(ti->ti_ack);
758 HTONS(ti->ti_win);
759 HTONS(ti->ti_urp);
760 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
761 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
762 *ip = save_ip;
763 icmp_error(pData, m, ICMP_UNREACH, code, 0, strerror(errno));
764 tp->t_socket->so_m = NULL;
765 }
766 tp = tcp_close(pData, tp);
767 m_free(pData, m);
768 }
769 else
770 {
771 /*
772 * Haven't connected yet, save the current mbuf
773 * and ti, and return
774 * XXX Some OS's don't tell us whether the connect()
775 * succeeded or not. So we must time it out.
776 */
777 so->so_m = m;
778 so->so_ti = ti;
779 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
780 tp->t_state = TCPS_SYN_RECEIVED;
781 }
782 SOCKET_UNLOCK(so);
783 return;
784
785cont_conn:
786 /* m==NULL
787 * Check if the connect succeeded
788 */
789 if (so->so_state & SS_NOFDREF)
790 {
791 tp = tcp_close(pData, tp);
792 goto dropwithreset;
793 }
794cont_input:
795 tcp_template(tp);
796
797 if (optp)
798 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
799
800 if (iss)
801 tp->iss = iss;
802 else
803 tp->iss = tcp_iss;
804 tcp_iss += TCP_ISSINCR/2;
805 tp->irs = ti->ti_seq;
806 tcp_sendseqinit(tp);
807 tcp_rcvseqinit(tp);
808 tp->t_flags |= TF_ACKNOW;
809 tp->t_state = TCPS_SYN_RECEIVED;
810 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
811 tcpstat.tcps_accepts++;
812 goto trimthenstep6;
813 } /* case TCPS_LISTEN */
814
815 /*
816 * If the state is SYN_SENT:
817 * if seg contains an ACK, but not for our SYN, drop the input.
818 * if seg contains a RST, then drop the connection.
819 * if seg does not contain SYN, then drop it.
820 * Otherwise this is an acceptable SYN segment
821 * initialize tp->rcv_nxt and tp->irs
822 * if seg contains ack then advance tp->snd_una
823 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
824 * arrange for segment to be acked (eventually)
825 * continue processing rest of data/controls, beginning with URG
826 */
827 case TCPS_SYN_SENT:
828 if ( (tiflags & TH_ACK)
829 && ( SEQ_LEQ(ti->ti_ack, tp->iss)
830 || SEQ_GT(ti->ti_ack, tp->snd_max)))
831 goto dropwithreset;
832
833 if (tiflags & TH_RST)
834 {
835 if (tiflags & TH_ACK)
836 tp = tcp_drop(pData, tp, 0); /* XXX Check t_softerror! */
837 goto drop;
838 }
839
840 if ((tiflags & TH_SYN) == 0)
841 {
842 goto drop;
843 }
844 if (tiflags & TH_ACK)
845 {
846 tp->snd_una = ti->ti_ack;
847 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
848 tp->snd_nxt = tp->snd_una;
849 }
850
851 tp->t_timer[TCPT_REXMT] = 0;
852 tp->irs = ti->ti_seq;
853 tcp_rcvseqinit(tp);
854 tp->t_flags |= TF_ACKNOW;
855 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss))
856 {
857 tcpstat.tcps_connects++;
858 soisfconnected(so);
859 tp->t_state = TCPS_ESTABLISHED;
860
861 /* Do window scaling on this connection? */
862#if 0
863 if (( tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
864 == (TF_RCVD_SCALE|TF_REQ_SCALE))
865 {
866 tp->snd_scale = tp->requested_s_scale;
867 tp->rcv_scale = tp->request_r_scale;
868 }
869#endif
870 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
871 /*
872 * if we didn't have to retransmit the SYN,
873 * use its rtt as our initial srtt & rtt var.
874 */
875 if (tp->t_rtt)
876 tcp_xmit_timer(pData, tp, tp->t_rtt);
877 }
878 else
879 tp->t_state = TCPS_SYN_RECEIVED;
880
881trimthenstep6:
882 /*
883 * Advance ti->ti_seq to correspond to first data byte.
884 * If data, trim to stay within window,
885 * dropping FIN if necessary.
886 */
887 ti->ti_seq++;
888 if (ti->ti_len > tp->rcv_wnd)
889 {
890 todrop = ti->ti_len - tp->rcv_wnd;
891 m_adj(m, -todrop);
892 ti->ti_len = tp->rcv_wnd;
893 tiflags &= ~TH_FIN;
894 tcpstat.tcps_rcvpackafterwin++;
895 tcpstat.tcps_rcvbyteafterwin += todrop;
896 }
897 tp->snd_wl1 = ti->ti_seq - 1;
898 tp->rcv_up = ti->ti_seq;
899 Log2(("hit6"));
900 goto step6;
901 } /* switch tp->t_state */
902 /*
903 * States other than LISTEN or SYN_SENT.
904 * First check timestamp, if present.
905 * Then check that at least some bytes of segment are within
906 * receive window. If segment begins before rcv_nxt,
907 * drop leading data (and SYN); if nothing left, just ack.
908 *
909 * RFC 1323 PAWS: If we have a timestamp reply on this segment
910 * and it's less than ts_recent, drop it.
911 */
912#if 0
913 if ( ts_present
914 && (tiflags & TH_RST) == 0
915 && tp->ts_recent
916 && TSTMP_LT(ts_val, tp->ts_recent))
917 {
918 /* Check to see if ts_recent is over 24 days old. */
919 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE)
920 {
921 /*
922 * Invalidate ts_recent. If this segment updates
923 * ts_recent, the age will be reset later and ts_recent
924 * will get a valid value. If it does not, setting
925 * ts_recent to zero will at least satisfy the
926 * requirement that zero be placed in the timestamp
927 * echo reply when ts_recent isn't valid. The
928 * age isn't reset until we get a valid ts_recent
929 * because we don't want out-of-order segments to be
930 * dropped when ts_recent is old.
931 */
932 tp->ts_recent = 0;
933 }
934 else
935 {
936 tcpstat.tcps_rcvduppack++;
937 tcpstat.tcps_rcvdupbyte += ti->ti_len;
938 tcpstat.tcps_pawsdrop++;
939 goto dropafterack;
940 }
941 }
942#endif
943
944 todrop = tp->rcv_nxt - ti->ti_seq;
945 if (todrop > 0)
946 {
947 if (tiflags & TH_SYN)
948 {
949 tiflags &= ~TH_SYN;
950 ti->ti_seq++;
951 if (ti->ti_urp > 1)
952 ti->ti_urp--;
953 else
954 tiflags &= ~TH_URG;
955 todrop--;
956 }
957 /*
958 * Following if statement from Stevens, vol. 2, p. 960.
959 */
960 if ( todrop > ti->ti_len
961 || ( todrop == ti->ti_len
962 && (tiflags & TH_FIN) == 0))
963 {
964 /*
965 * Any valid FIN must be to the left of the window.
966 * At this point the FIN must be a duplicate or out
967 * of sequence; drop it.
968 */
969 tiflags &= ~TH_FIN;
970
971 /*
972 * Send an ACK to resynchronize and drop any data.
973 * But keep on processing for RST or ACK.
974 */
975 tp->t_flags |= TF_ACKNOW;
976 todrop = ti->ti_len;
977 tcpstat.tcps_rcvduppack++;
978 tcpstat.tcps_rcvdupbyte += todrop;
979 }
980 else
981 {
982 tcpstat.tcps_rcvpartduppack++;
983 tcpstat.tcps_rcvpartdupbyte += todrop;
984 }
985 m_adj(m, todrop);
986 ti->ti_seq += todrop;
987 ti->ti_len -= todrop;
988 if (ti->ti_urp > todrop)
989 ti->ti_urp -= todrop;
990 else
991 {
992 tiflags &= ~TH_URG;
993 ti->ti_urp = 0;
994 }
995 }
996 /*
997 * If new data are received on a connection after the
998 * user processes are gone, then RST the other end.
999 */
1000 if ( (so->so_state & SS_NOFDREF)
1001 && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len)
1002 {
1003 tp = tcp_close(pData, tp);
1004 tcpstat.tcps_rcvafterclose++;
1005 goto dropwithreset;
1006 }
1007
1008 /*
1009 * If segment ends after window, drop trailing data
1010 * (and PUSH and FIN); if nothing left, just ACK.
1011 */
1012 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
1013 if (todrop > 0)
1014 {
1015 tcpstat.tcps_rcvpackafterwin++;
1016 if (todrop >= ti->ti_len)
1017 {
1018 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
1019 /*
1020 * If a new connection request is received
1021 * while in TIME_WAIT, drop the old connection
1022 * and start over if the sequence numbers
1023 * are above the previous ones.
1024 */
1025 if ( tiflags & TH_SYN
1026 && tp->t_state == TCPS_TIME_WAIT
1027 && SEQ_GT(ti->ti_seq, tp->rcv_nxt))
1028 {
1029 iss = tp->rcv_nxt + TCP_ISSINCR;
1030 tp = tcp_close(pData, tp);
1031 SOCKET_UNLOCK(tp->t_socket);
1032 goto findso;
1033 }
1034 /*
1035 * If window is closed can only take segments at
1036 * window edge, and have to drop data and PUSH from
1037 * incoming segments. Continue processing, but
1038 * remember to ack. Otherwise, drop segment
1039 * and ack.
1040 */
1041 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt)
1042 {
1043 tp->t_flags |= TF_ACKNOW;
1044 tcpstat.tcps_rcvwinprobe++;
1045 }
1046 else
1047 goto dropafterack;
1048 }
1049 else
1050 tcpstat.tcps_rcvbyteafterwin += todrop;
1051 m_adj(m, -todrop);
1052 ti->ti_len -= todrop;
1053 tiflags &= ~(TH_PUSH|TH_FIN);
1054 }
1055
1056 /*
1057 * If last ACK falls within this segment's sequence numbers,
1058 * record its timestamp.
1059 */
1060#if 0
1061 if ( ts_present
1062 && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)
1063 && SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + ((tiflags & (TH_SYN|TH_FIN)) != 0)))
1064 {
1065 tp->ts_recent_age = tcp_now;
1066 tp->ts_recent = ts_val;
1067 }
1068#endif
1069
1070 /*
1071 * If the RST bit is set examine the state:
1072 * SYN_RECEIVED STATE:
1073 * If passive open, return to LISTEN state.
1074 * If active open, inform user that connection was refused.
1075 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1076 * Inform user that connection was reset, and close tcb.
1077 * CLOSING, LAST_ACK, TIME_WAIT STATES
1078 * Close the tcb.
1079 */
1080 if (tiflags&TH_RST)
1081 switch (tp->t_state)
1082 {
1083 case TCPS_SYN_RECEIVED:
1084/* so->so_error = ECONNREFUSED; */
1085 goto close;
1086
1087 case TCPS_ESTABLISHED:
1088 case TCPS_FIN_WAIT_1:
1089 case TCPS_FIN_WAIT_2:
1090 case TCPS_CLOSE_WAIT:
1091/* so->so_error = ECONNRESET; */
1092close:
1093 tp->t_state = TCPS_CLOSED;
1094 tcpstat.tcps_drops++;
1095 tp = tcp_close(pData, tp);
1096 goto drop;
1097
1098 case TCPS_CLOSING:
1099 case TCPS_LAST_ACK:
1100 case TCPS_TIME_WAIT:
1101 tp = tcp_close(pData, tp);
1102 goto drop;
1103 }
1104
1105 /*
1106 * If a SYN is in the window, then this is an
1107 * error and we send an RST and drop the connection.
1108 */
1109 if (tiflags & TH_SYN)
1110 {
1111 tp = tcp_drop(pData, tp, 0);
1112 goto dropwithreset;
1113 }
1114
1115 /*
1116 * If the ACK bit is off we drop the segment and return.
1117 */
1118 if ((tiflags & TH_ACK) == 0)
1119 {
1120 goto drop;
1121 }
1122
1123 /*
1124 * Ack processing.
1125 */
1126 switch (tp->t_state)
1127 {
1128 /*
1129 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1130 * ESTABLISHED state and continue processing, otherwise
1131 * send an RST. una<=ack<=max
1132 */
1133 case TCPS_SYN_RECEIVED:
1134 if ( SEQ_GT(tp->snd_una, ti->ti_ack)
1135 || SEQ_GT(ti->ti_ack, tp->snd_max))
1136 goto dropwithreset;
1137 tcpstat.tcps_connects++;
1138 tp->t_state = TCPS_ESTABLISHED;
1139 /*
1140 * The sent SYN is ack'ed with our sequence number +1
1141 * The first data byte already in the buffer will get
1142 * lost if no correction is made. This is only needed for
1143 * SS_CTL since the buffer is empty otherwise.
1144 * tp->snd_una++; or:
1145 */
1146 tp->snd_una = ti->ti_ack;
1147 soisfconnected(so);
1148
1149 /* Do window scaling? */
1150#if 0
1151 if ( (tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
1152 == (TF_RCVD_SCALE|TF_REQ_SCALE))
1153 {
1154 tp->snd_scale = tp->requested_s_scale;
1155 tp->rcv_scale = tp->request_r_scale;
1156 }
1157#endif
1158 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1159 tp->snd_wl1 = ti->ti_seq - 1;
1160 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1161 goto synrx_to_est;
1162 /* fall into ... */
1163
1164 /*
1165 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1166 * ACKs. If the ack is in the range
1167 * tp->snd_una < ti->ti_ack <= tp->snd_max
1168 * then advance tp->snd_una to ti->ti_ack and drop
1169 * data from the retransmission queue. If this ACK reflects
1170 * more up to date window information we update our window information.
1171 */
1172 case TCPS_ESTABLISHED:
1173 case TCPS_FIN_WAIT_1:
1174 case TCPS_FIN_WAIT_2:
1175 case TCPS_CLOSE_WAIT:
1176 case TCPS_CLOSING:
1177 case TCPS_LAST_ACK:
1178 case TCPS_TIME_WAIT:
1179 if (SEQ_LEQ(ti->ti_ack, tp->snd_una))
1180 {
1181 if (ti->ti_len == 0 && tiwin == tp->snd_wnd)
1182 {
1183 tcpstat.tcps_rcvdupack++;
1184 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
1185 (long )m, (long )so));
1186 /*
1187 * If we have outstanding data (other than
1188 * a window probe), this is a completely
1189 * duplicate ack (ie, window info didn't
1190 * change), the ack is the biggest we've
1191 * seen and we've seen exactly our rexmt
1192 * threshold of them, assume a packet
1193 * has been dropped and retransmit it.
1194 * Kludge snd_nxt & the congestion
1195 * window so we send only this one
1196 * packet.
1197 *
1198 * We know we're losing at the current
1199 * window size so do congestion avoidance
1200 * (set ssthresh to half the current window
1201 * and pull our congestion window back to
1202 * the new ssthresh).
1203 *
1204 * Dup acks mean that packets have left the
1205 * network (they're now cached at the receiver)
1206 * so bump cwnd by the amount in the receiver
1207 * to keep a constant cwnd packets in the
1208 * network.
1209 */
1210 if ( tp->t_timer[TCPT_REXMT] == 0
1211 || ti->ti_ack != tp->snd_una)
1212 tp->t_dupacks = 0;
1213 else if (++tp->t_dupacks == tcprexmtthresh)
1214 {
1215 tcp_seq onxt = tp->snd_nxt;
1216 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
1217 if (win < 2)
1218 win = 2;
1219 tp->snd_ssthresh = win * tp->t_maxseg;
1220 tp->t_timer[TCPT_REXMT] = 0;
1221 tp->t_rtt = 0;
1222 tp->snd_nxt = ti->ti_ack;
1223 tp->snd_cwnd = tp->t_maxseg;
1224 (void) tcp_output(pData, tp);
1225 tp->snd_cwnd = tp->snd_ssthresh +
1226 tp->t_maxseg * tp->t_dupacks;
1227 if (SEQ_GT(onxt, tp->snd_nxt))
1228 tp->snd_nxt = onxt;
1229 goto drop;
1230 }
1231 else if (tp->t_dupacks > tcprexmtthresh)
1232 {
1233 tp->snd_cwnd += tp->t_maxseg;
1234 (void) tcp_output(pData, tp);
1235 goto drop;
1236 }
1237 }
1238 else
1239 tp->t_dupacks = 0;
1240 break;
1241 }
1242synrx_to_est:
1243 /*
1244 * If the congestion window was inflated to account
1245 * for the other side's cached packets, retract it.
1246 */
1247 if ( tp->t_dupacks > tcprexmtthresh
1248 && tp->snd_cwnd > tp->snd_ssthresh)
1249 tp->snd_cwnd = tp->snd_ssthresh;
1250 tp->t_dupacks = 0;
1251 if (SEQ_GT(ti->ti_ack, tp->snd_max))
1252 {
1253 tcpstat.tcps_rcvacktoomuch++;
1254 goto dropafterack;
1255 }
1256 acked = ti->ti_ack - tp->snd_una;
1257 tcpstat.tcps_rcvackpack++;
1258 tcpstat.tcps_rcvackbyte += acked;
1259
1260 /*
1261 * If we have a timestamp reply, update smoothed
1262 * round trip time. If no timestamp is present but
1263 * transmit timer is running and timed sequence
1264 * number was acked, update smoothed round trip time.
1265 * Since we now have an rtt measurement, cancel the
1266 * timer backoff (cf., Phil Karn's retransmit alg.).
1267 * Recompute the initial retransmit timer.
1268 */
1269#if 0
1270 if (ts_present)
1271 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1272 else
1273#endif
1274 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1275 tcp_xmit_timer(pData, tp, tp->t_rtt);
1276
1277 /*
1278 * If all outstanding data is acked, stop retransmit
1279 * timer and remember to restart (more output or persist).
1280 * If there is more data to be acked, restart retransmit
1281 * timer, using current (possibly backed-off) value.
1282 */
1283 if (ti->ti_ack == tp->snd_max)
1284 {
1285 tp->t_timer[TCPT_REXMT] = 0;
1286 needoutput = 1;
1287 }
1288 else if (tp->t_timer[TCPT_PERSIST] == 0)
1289 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1290 /*
1291 * When new data is acked, open the congestion window.
1292 * If the window gives us less than ssthresh packets
1293 * in flight, open exponentially (maxseg per packet).
1294 * Otherwise open linearly: maxseg per window
1295 * (maxseg^2 / cwnd per packet).
1296 */
1297 {
1298 register u_int cw = tp->snd_cwnd;
1299 register u_int incr = tp->t_maxseg;
1300
1301 if (cw > tp->snd_ssthresh)
1302 incr = incr * incr / cw;
1303 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1304 }
1305 if (acked > so->so_snd.sb_cc)
1306 {
1307 tp->snd_wnd -= so->so_snd.sb_cc;
1308 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1309 ourfinisacked = 1;
1310 }
1311 else
1312 {
1313 sbdrop(&so->so_snd, acked);
1314 tp->snd_wnd -= acked;
1315 ourfinisacked = 0;
1316 }
1317 /*
1318 * XXX sowwakup is called when data is acked and there's room for
1319 * for more data... it should read() the socket
1320 */
1321#if 0
1322 if (so->so_snd.sb_flags & SB_NOTIFY)
1323 sowwakeup(so);
1324#endif
1325 tp->snd_una = ti->ti_ack;
1326 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1327 tp->snd_nxt = tp->snd_una;
1328
1329 switch (tp->t_state)
1330 {
1331 /*
1332 * In FIN_WAIT_1 STATE in addition to the processing
1333 * for the ESTABLISHED state if our FIN is now acknowledged
1334 * then enter FIN_WAIT_2.
1335 */
1336 case TCPS_FIN_WAIT_1:
1337 if (ourfinisacked)
1338 {
1339 /*
1340 * If we can't receive any more
1341 * data, then closing user can proceed.
1342 * Starting the timer is contrary to the
1343 * specification, but if we don't get a FIN
1344 * we'll hang forever.
1345 */
1346 if (so->so_state & SS_FCANTRCVMORE)
1347 {
1348 soisfdisconnected(so);
1349 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1350 }
1351 tp->t_state = TCPS_FIN_WAIT_2;
1352 }
1353 break;
1354
1355 /*
1356 * In CLOSING STATE in addition to the processing for
1357 * the ESTABLISHED state if the ACK acknowledges our FIN
1358 * then enter the TIME-WAIT state, otherwise ignore
1359 * the segment.
1360 */
1361 case TCPS_CLOSING:
1362 if (ourfinisacked)
1363 {
1364 tp->t_state = TCPS_TIME_WAIT;
1365 tcp_canceltimers(tp);
1366 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1367 soisfdisconnected(so);
1368 }
1369 break;
1370
1371 /*
1372 * In LAST_ACK, we may still be waiting for data to drain
1373 * and/or to be acked, as well as for the ack of our FIN.
1374 * If our FIN is now acknowledged, delete the TCB,
1375 * enter the closed state and return.
1376 */
1377 case TCPS_LAST_ACK:
1378 if (ourfinisacked)
1379 {
1380 tp = tcp_close(pData, tp);
1381 goto drop;
1382 }
1383 break;
1384
1385 /*
1386 * In TIME_WAIT state the only thing that should arrive
1387 * is a retransmission of the remote FIN. Acknowledge
1388 * it and restart the finack timer.
1389 */
1390 case TCPS_TIME_WAIT:
1391 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1392 goto dropafterack;
1393 }
1394 } /* switch(tp->t_state) */
1395
1396step6:
1397 /*
1398 * Update window information.
1399 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1400 */
1401 if ( (tiflags & TH_ACK)
1402 && ( SEQ_LT(tp->snd_wl1, ti->ti_seq)
1403 || ( tp->snd_wl1 == ti->ti_seq
1404 && ( SEQ_LT(tp->snd_wl2, ti->ti_ack)
1405 || ( tp->snd_wl2 == ti->ti_ack
1406 && tiwin > tp->snd_wnd)))))
1407 {
1408 /* keep track of pure window updates */
1409 if ( ti->ti_len == 0
1410 && tp->snd_wl2 == ti->ti_ack
1411 && tiwin > tp->snd_wnd)
1412 tcpstat.tcps_rcvwinupd++;
1413 tp->snd_wnd = tiwin;
1414 tp->snd_wl1 = ti->ti_seq;
1415 tp->snd_wl2 = ti->ti_ack;
1416 if (tp->snd_wnd > tp->max_sndwnd)
1417 tp->max_sndwnd = tp->snd_wnd;
1418 needoutput = 1;
1419 }
1420
1421 /*
1422 * Process segments with URG.
1423 */
1424 if ((tiflags & TH_URG) && ti->ti_urp &&
1425 TCPS_HAVERCVDFIN(tp->t_state) == 0)
1426 {
1427 /*
1428 * This is a kludge, but if we receive and accept
1429 * random urgent pointers, we'll crash in
1430 * soreceive. It's hard to imagine someone
1431 * actually wanting to send this much urgent data.
1432 */
1433 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen)
1434 {
1435 ti->ti_urp = 0;
1436 tiflags &= ~TH_URG;
1437 goto dodata;
1438 }
1439 /*
1440 * If this segment advances the known urgent pointer,
1441 * then mark the data stream. This should not happen
1442 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1443 * a FIN has been received from the remote side.
1444 * In these states we ignore the URG.
1445 *
1446 * According to RFC961 (Assigned Protocols),
1447 * the urgent pointer points to the last octet
1448 * of urgent data. We continue, however,
1449 * to consider it to indicate the first octet
1450 * of data past the urgent section as the original
1451 * spec states (in one of two places).
1452 */
1453 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up))
1454 {
1455 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1456 so->so_urgc = so->so_rcv.sb_cc +
1457 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1458 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1459 }
1460 }
1461 else
1462 /*
1463 * If no out of band data is expected,
1464 * pull receive urgent pointer along
1465 * with the receive window.
1466 */
1467 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1468 tp->rcv_up = tp->rcv_nxt;
1469dodata:
1470
1471 /*
1472 * If this is a small packet, then ACK now - with Nagel
1473 * congestion avoidance sender won't send more until
1474 * he gets an ACK.
1475 *
1476 * See above.
1477 */
1478 if ( ti->ti_len
1479 && (unsigned)ti->ti_len <= 5
1480 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27)
1481 {
1482 tp->t_flags |= TF_ACKNOW;
1483 }
1484
1485 /*
1486 * Process the segment text, merging it into the TCP sequencing queue,
1487 * and arranging for acknowledgment of receipt if necessary.
1488 * This process logically involves adjusting tp->rcv_wnd as data
1489 * is presented to the user (this happens in tcp_usrreq.c,
1490 * case PRU_RCVD). If a FIN has already been received on this
1491 * connection then we just ignore the text.
1492 */
1493 if ( (ti->ti_len || (tiflags&TH_FIN))
1494 && TCPS_HAVERCVDFIN(tp->t_state) == 0)
1495 {
1496 if ( ti->ti_seq == tp->rcv_nxt
1497 && LIST_EMPTY(&tp->t_segq)
1498 && tp->t_state == TCPS_ESTABLISHED)
1499 {
1500 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1501 tp->rcv_nxt += tlen;
1502 tiflags = ti->ti_t.th_flags & TH_FIN;
1503 tcpstat.tcps_rcvpack++;
1504 tcpstat.tcps_rcvbyte += tlen;
1505 if (so->so_state & SS_FCANTRCVMORE)
1506 m_freem(pData, m);
1507 else
1508 {
1509 if (so->so_emu)
1510 {
1511 if (tcp_emu(pData, so, m))
1512 sbappend(pData, so, m);
1513 }
1514 else
1515 sbappend(pData, so, m);
1516 }
1517 }
1518 else
1519 {
1520 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1521 tiflags |= TF_ACKNOW;
1522 }
1523 /*
1524 * Note the amount of data that peer has sent into
1525 * our window, in order to estimate the sender's
1526 * buffer size.
1527 */
1528 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1529 }
1530 else
1531 {
1532 m_free(pData, m);
1533 tiflags &= ~TH_FIN;
1534 }
1535
1536 /*
1537 * If FIN is received ACK the FIN and let the user know
1538 * that the connection is closing.
1539 */
1540 if (tiflags & TH_FIN)
1541 {
1542 if (TCPS_HAVERCVDFIN(tp->t_state) == 0)
1543 {
1544 /*
1545 * If we receive a FIN we can't send more data,
1546 * set it SS_FDRAIN
1547 * Shutdown the socket if there is no rx data in the
1548 * buffer.
1549 * soread() is called on completion of shutdown() and
1550 * will got to TCPS_LAST_ACK, and use tcp_output()
1551 * to send the FIN.
1552 */
1553/* sofcantrcvmore(so); */
1554 sofwdrain(so);
1555
1556 tp->t_flags |= TF_ACKNOW;
1557 tp->rcv_nxt++;
1558 }
1559 switch (tp->t_state)
1560 {
1561 /*
1562 * In SYN_RECEIVED and ESTABLISHED STATES
1563 * enter the CLOSE_WAIT state.
1564 */
1565 case TCPS_SYN_RECEIVED:
1566 case TCPS_ESTABLISHED:
1567 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1568 tp->t_state = TCPS_LAST_ACK;
1569 else
1570 tp->t_state = TCPS_CLOSE_WAIT;
1571 break;
1572
1573 /*
1574 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1575 * enter the CLOSING state.
1576 */
1577 case TCPS_FIN_WAIT_1:
1578 tp->t_state = TCPS_CLOSING;
1579 break;
1580
1581 /*
1582 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1583 * starting the time-wait timer, turning off the other
1584 * standard timers.
1585 */
1586 case TCPS_FIN_WAIT_2:
1587 tp->t_state = TCPS_TIME_WAIT;
1588 tcp_canceltimers(tp);
1589 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1590 soisfdisconnected(so);
1591 break;
1592
1593 /*
1594 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1595 */
1596 case TCPS_TIME_WAIT:
1597 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1598 break;
1599 }
1600 }
1601
1602 /*
1603 * Return any desired output.
1604 */
1605 if (needoutput || (tp->t_flags & TF_ACKNOW))
1606 tcp_output(pData, tp);
1607
1608 SOCKET_UNLOCK(so);
1609 return;
1610
1611dropafterack:
1612 Log2(("drop after ack\n"));
1613 /*
1614 * Generate an ACK dropping incoming segment if it occupies
1615 * sequence space, where the ACK reflects our state.
1616 */
1617 if (tiflags & TH_RST)
1618 goto drop;
1619 m_freem(pData, m);
1620 tp->t_flags |= TF_ACKNOW;
1621 (void) tcp_output(pData, tp);
1622 SOCKET_UNLOCK(so);
1623 return;
1624
1625dropwithreset:
1626 /* reuses m if m!=NULL, m_free() unnecessary */
1627 if (tiflags & TH_ACK)
1628 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1629 else
1630 {
1631 if (tiflags & TH_SYN) ti->ti_len++;
1632 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1633 TH_RST|TH_ACK);
1634 }
1635
1636 if (so != &tcb)
1637 SOCKET_UNLOCK(so);
1638 return;
1639
1640drop:
1641 /*
1642 * Drop space held by incoming segment and return.
1643 */
1644 m_free(pData, m);
1645
1646#ifdef VBOX_WITH_SLIRP_MT
1647 if (RTCritSectIsOwned(&so->so_mutex))
1648 {
1649 SOCKET_UNLOCK(so);
1650 }
1651#endif
1652
1653 return;
1654}
1655
1656void
1657tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1658{
1659 u_int16_t mss;
1660 int opt, optlen;
1661
1662 DEBUG_CALL("tcp_dooptions");
1663 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1664
1665 for (; cnt > 0; cnt -= optlen, cp += optlen)
1666 {
1667 opt = cp[0];
1668 if (opt == TCPOPT_EOL)
1669 break;
1670 if (opt == TCPOPT_NOP)
1671 optlen = 1;
1672 else
1673 {
1674 optlen = cp[1];
1675 if (optlen <= 0)
1676 break;
1677 }
1678 switch (opt)
1679 {
1680 default:
1681 continue;
1682
1683 case TCPOPT_MAXSEG:
1684 if (optlen != TCPOLEN_MAXSEG)
1685 continue;
1686 if (!(ti->ti_flags & TH_SYN))
1687 continue;
1688 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1689 NTOHS(mss);
1690 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1691 break;
1692
1693#if 0
1694 case TCPOPT_WINDOW:
1695 if (optlen != TCPOLEN_WINDOW)
1696 continue;
1697 if (!(ti->ti_flags & TH_SYN))
1698 continue;
1699 tp->t_flags |= TF_RCVD_SCALE;
1700 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1701 break;
1702
1703 case TCPOPT_TIMESTAMP:
1704 if (optlen != TCPOLEN_TIMESTAMP)
1705 continue;
1706 *ts_present = 1;
1707 memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1708 NTOHL(*ts_val);
1709 memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1710 NTOHL(*ts_ecr);
1711
1712 /*
1713 * A timestamp received in a SYN makes
1714 * it ok to send timestamp requests and replies.
1715 */
1716 if (ti->ti_flags & TH_SYN)
1717 {
1718 tp->t_flags |= TF_RCVD_TSTMP;
1719 tp->ts_recent = *ts_val;
1720 tp->ts_recent_age = tcp_now;
1721 }
1722 break;
1723#endif
1724 }
1725 }
1726}
1727
1728
1729/*
1730 * Pull out of band byte out of a segment so
1731 * it doesn't appear in the user's data queue.
1732 * It is still reflected in the segment length for
1733 * sequencing purposes.
1734 */
1735
1736#if 0
1737void
1738tcp_pulloutofband(struct socket *so, struct tcpiphdr *ti, struct mbuf *m)
1739{
1740 int cnt = ti->ti_urp - 1;
1741
1742 while (cnt >= 0)
1743 {
1744 if (m->m_len > cnt)
1745 {
1746 char *cp = mtod(m, caddr_t) + cnt;
1747 struct tcpcb *tp = sototcpcb(so);
1748
1749 tp->t_iobc = *cp;
1750 tp->t_oobflags |= TCPOOB_HAVEDATA;
1751 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1752 m->m_len--;
1753 return;
1754 }
1755 cnt -= m->m_len;
1756 m = m->m_next; /* XXX WRONG! Fix it! */
1757 if (m == 0)
1758 break;
1759 }
1760 panic("tcp_pulloutofband");
1761}
1762#endif
1763
1764/*
1765 * Collect new round-trip time estimate
1766 * and update averages and current timeout.
1767 */
1768
1769void
1770tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1771{
1772 register short delta;
1773
1774 DEBUG_CALL("tcp_xmit_timer");
1775 DEBUG_ARG("tp = %lx", (long)tp);
1776 DEBUG_ARG("rtt = %d", rtt);
1777
1778 tcpstat.tcps_rttupdated++;
1779 if (tp->t_srtt != 0)
1780 {
1781 /*
1782 * srtt is stored as fixed point with 3 bits after the
1783 * binary point (i.e., scaled by 8). The following magic
1784 * is equivalent to the smoothing algorithm in rfc793 with
1785 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1786 * point). Adjust rtt to origin 0.
1787 */
1788 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1789 if ((tp->t_srtt += delta) <= 0)
1790 tp->t_srtt = 1;
1791 /*
1792 * We accumulate a smoothed rtt variance (actually, a
1793 * smoothed mean difference), then set the retransmit
1794 * timer to smoothed rtt + 4 times the smoothed variance.
1795 * rttvar is stored as fixed point with 2 bits after the
1796 * binary point (scaled by 4). The following is
1797 * equivalent to rfc793 smoothing with an alpha of .75
1798 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1799 * rfc793's wired-in beta.
1800 */
1801 if (delta < 0)
1802 delta = -delta;
1803 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1804 if ((tp->t_rttvar += delta) <= 0)
1805 tp->t_rttvar = 1;
1806 }
1807 else
1808 {
1809 /*
1810 * No rtt measurement yet - use the unsmoothed rtt.
1811 * Set the variance to half the rtt (so our first
1812 * retransmit happens at 3*rtt).
1813 */
1814 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1815 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1816 }
1817 tp->t_rtt = 0;
1818 tp->t_rxtshift = 0;
1819
1820 /*
1821 * the retransmit should happen at rtt + 4 * rttvar.
1822 * Because of the way we do the smoothing, srtt and rttvar
1823 * will each average +1/2 tick of bias. When we compute
1824 * the retransmit timer, we want 1/2 tick of rounding and
1825 * 1 extra tick because of +-1/2 tick uncertainty in the
1826 * firing of the timer. The bias will give us exactly the
1827 * 1.5 tick we need. But, because the bias is
1828 * statistical, we have to test that we don't drop below
1829 * the minimum feasible timer (which is 2 ticks).
1830 */
1831 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1832 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1833
1834 /*
1835 * We received an ack for a packet that wasn't retransmitted;
1836 * it is probably safe to discard any error indications we've
1837 * received recently. This isn't quite right, but close enough
1838 * for now (a route might have failed after we sent a segment,
1839 * and the return path might not be symmetrical).
1840 */
1841 tp->t_softerror = 0;
1842}
1843
1844/*
1845 * Determine a reasonable value for maxseg size.
1846 * If the route is known, check route for mtu.
1847 * If none, use an mss that can be handled on the outgoing
1848 * interface without forcing IP to fragment; if bigger than
1849 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1850 * to utilize large mbufs. If no route is found, route has no mtu,
1851 * or the destination isn't local, use a default, hopefully conservative
1852 * size (usually 512 or the default IP max size, but no more than the mtu
1853 * of the interface), as we can't discover anything about intervening
1854 * gateways or networks. We also initialize the congestion/slow start
1855 * window to be a single segment if the destination isn't local.
1856 * While looking at the routing entry, we also initialize other path-dependent
1857 * parameters from pre-set or cached values in the routing entry.
1858 */
1859
1860int
1861tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1862{
1863 struct socket *so = tp->t_socket;
1864 int mss;
1865
1866 DEBUG_CALL("tcp_mss");
1867 DEBUG_ARG("tp = %lx", (long)tp);
1868 DEBUG_ARG("offer = %d", offer);
1869
1870 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1871 if (offer)
1872 mss = min(mss, offer);
1873 mss = max(mss, 32);
1874 if (mss < tp->t_maxseg || offer != 0)
1875 tp->t_maxseg = mss;
1876
1877 tp->snd_cwnd = mss;
1878
1879 sbreserve(pData, &so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1880 sbreserve(pData, &so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1881
1882 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1883
1884 return mss;
1885}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette