VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 53814

Last change on this file since 53814 was 52798, checked in by vboxsync, 11 years ago

NAT: when outgoing connect(2) fails decide what to do based on the
errno. The code to call icmp_error() is a bit icky because
tcp_input() vivisects incoming datagram and original mbuf can't be
used.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 67.0 KB
Line 
1/* $Id: tcp_input.c 52798 2014-09-21 21:19:38Z vboxsync $ */
2/** @file
3 * NAT - TCP input.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
53 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66
67
68#if 0 /* code using this macroses is commented out */
69# define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
70
71/* for modulo comparisons of timestamps */
72# define TSTMP_LT(a, b) ((int)((a)-(b)) < 0)
73# define TSTMP_GEQ(a, b) ((int)((a)-(b)) >= 0)
74#endif
75
76#ifndef TCP_ACK_HACK
77#define DELAY_ACK(tp, ti) \
78 if (ti->ti_flags & TH_PUSH) \
79 tp->t_flags |= TF_ACKNOW; \
80 else \
81 tp->t_flags |= TF_DELACK;
82#else /* !TCP_ACK_HACK */
83#define DELAY_ACK(tp, ign) \
84 tp->t_flags |= TF_DELACK;
85#endif /* TCP_ACK_HACK */
86
87
88/*
89 * deps: netinet/tcp_reass.c
90 * tcp_reass_maxqlen = 48 (deafault)
91 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
92 */
93int
94tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
95{
96 struct tseg_qent *q;
97 struct tseg_qent *p = NULL;
98 struct tseg_qent *nq;
99 struct tseg_qent *te = NULL;
100 struct socket *so = tp->t_socket;
101 int flags;
102 STAM_PROFILE_START(&pData->StatTCP_reassamble, tcp_reassamble);
103 LogFlowFunc(("ENTER: pData:%p, tp:%R[tcpcb793], th:%p, tlenp:%p, m:%p\n", pData, tp, th, tlenp, m));
104
105 /*
106 * XXX: tcp_reass() is rather inefficient with its data structures
107 * and should be rewritten (see NetBSD for optimizations). While
108 * doing that it should move to its own file tcp_reass.c.
109 */
110
111 /*
112 * Call with th==NULL after become established to
113 * force pre-ESTABLISHED data up to user socket.
114 */
115 if (th == NULL)
116 {
117 LogFlowFunc(("%d -> present\n", __LINE__));
118 goto present;
119 }
120
121 /*
122 * Limit the number of segments in the reassembly queue to prevent
123 * holding on to too many segments (and thus running out of mbufs).
124 * Make sure to let the missing segment through which caused this
125 * queue. Always keep one global queue entry spare to be able to
126 * process the missing segment.
127 */
128 if ( th->th_seq != tp->rcv_nxt
129 && ( tcp_reass_qsize + 1 >= tcp_reass_maxseg
130 || tp->t_segqlen >= tcp_reass_maxqlen))
131 {
132 tcp_reass_overflows++;
133 tcpstat.tcps_rcvmemdrop++;
134 m_freem(pData, m);
135 *tlenp = 0;
136 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
137 LogFlowFuncLeave();
138 return (0);
139 }
140
141 /*
142 * Allocate a new queue entry. If we can't, or hit the zone limit
143 * just drop the pkt.
144 */
145 te = RTMemAlloc(sizeof(struct tseg_qent));
146 if (te == NULL)
147 {
148 tcpstat.tcps_rcvmemdrop++;
149 m_freem(pData, m);
150 *tlenp = 0;
151 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
152 LogFlowFuncLeave();
153 return (0);
154 }
155 tp->t_segqlen++;
156 tcp_reass_qsize++;
157
158 /*
159 * Find a segment which begins after this one does.
160 */
161 LIST_FOREACH(q, &tp->t_segq, tqe_q)
162 {
163 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
164 break;
165 p = q;
166 }
167
168 /*
169 * If there is a preceding segment, it may provide some of
170 * our data already. If so, drop the data from the incoming
171 * segment. If it provides all of our data, drop us.
172 */
173 if (p != NULL)
174 {
175 int i;
176 /* conversion to int (in i) handles seq wraparound */
177 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
178 if (i > 0)
179 {
180 if (i >= *tlenp)
181 {
182 tcpstat.tcps_rcvduppack++;
183 tcpstat.tcps_rcvdupbyte += *tlenp;
184 m_freem(pData, m);
185 RTMemFree(te);
186 tp->t_segqlen--;
187 tcp_reass_qsize--;
188 /*
189 * Try to present any queued data
190 * at the left window edge to the user.
191 * This is needed after the 3-WHS
192 * completes.
193 */
194 LogFlowFunc(("%d -> present\n", __LINE__));
195 goto present; /* ??? */
196 }
197 m_adj(m, i);
198 *tlenp -= i;
199 th->th_seq += i;
200 }
201 }
202 tcpstat.tcps_rcvoopack++;
203 tcpstat.tcps_rcvoobyte += *tlenp;
204
205 /*
206 * While we overlap succeeding segments trim them or,
207 * if they are completely covered, dequeue them.
208 */
209 while (q)
210 {
211 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
212 if (i <= 0)
213 break;
214 if (i < q->tqe_len)
215 {
216 q->tqe_th->th_seq += i;
217 q->tqe_len -= i;
218 m_adj(q->tqe_m, i);
219 break;
220 }
221
222 nq = LIST_NEXT(q, tqe_q);
223 LIST_REMOVE(q, tqe_q);
224 m_freem(pData, q->tqe_m);
225 RTMemFree(q);
226 tp->t_segqlen--;
227 tcp_reass_qsize--;
228 q = nq;
229 }
230
231 /* Insert the new segment queue entry into place. */
232 te->tqe_m = m;
233 te->tqe_th = th;
234 te->tqe_len = *tlenp;
235
236 if (p == NULL)
237 {
238 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
239 }
240 else
241 {
242 LIST_INSERT_AFTER(p, te, tqe_q);
243 }
244
245present:
246 /*
247 * Present data to user, advancing rcv_nxt through
248 * completed sequence space.
249 */
250 if (!TCPS_HAVEESTABLISHED(tp->t_state))
251 {
252 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
253 return (0);
254 }
255 q = LIST_FIRST(&tp->t_segq);
256 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
257 {
258 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
259 return (0);
260 }
261 do
262 {
263 tp->rcv_nxt += q->tqe_len;
264 flags = q->tqe_th->th_flags & TH_FIN;
265 nq = LIST_NEXT(q, tqe_q);
266 LIST_REMOVE(q, tqe_q);
267 /* XXX: This place should be checked for the same code in
268 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
269 */
270 if (so->so_state & SS_FCANTSENDMORE)
271 m_freem(pData, q->tqe_m);
272 else
273 sbappend(pData, so, q->tqe_m);
274 RTMemFree(q);
275 tp->t_segqlen--;
276 tcp_reass_qsize--;
277 q = nq;
278 }
279 while (q && q->tqe_th->th_seq == tp->rcv_nxt);
280
281 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
282 return flags;
283}
284
285/*
286 * TCP input routine, follows pages 65-76 of the
287 * protocol specification dated September, 1981 very closely.
288 */
289void
290tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
291{
292 struct ip *ip, *save_ip;
293 register struct tcpiphdr *ti;
294 caddr_t optp = NULL;
295 int optlen = 0;
296 int len, tlen, off;
297 register struct tcpcb *tp = 0;
298 register int tiflags;
299 struct socket *so = 0;
300 int todrop, acked, ourfinisacked, needoutput = 0;
301/* int dropsocket = 0; */
302 int iss = 0;
303 u_long tiwin;
304/* int ts_present = 0; */
305 size_t ohdrlen;
306 uint8_t ohdr[60 + 8]; /* max IP header plus 8 bytes of payload for icmp */
307
308 STAM_PROFILE_START(&pData->StatTCP_input, counter_input);
309
310 LogFlow(("tcp_input: m = %8lx, iphlen = %2d, inso = %R[natsock]\n",
311 (long)m, iphlen, inso));
312
313 if (inso != NULL)
314 {
315 QSOCKET_LOCK(tcb);
316 SOCKET_LOCK(inso);
317 QSOCKET_UNLOCK(tcb);
318 }
319 /*
320 * If called with m == 0, then we're continuing the connect
321 */
322 if (m == NULL)
323 {
324 so = inso;
325 Log4(("NAT: tcp_input: %R[natsock]\n", so));
326 /* Re-set a few variables */
327 tp = sototcpcb(so);
328 m = so->so_m;
329 so->so_m = 0;
330
331 if (RT_LIKELY(so->so_ohdr != NULL))
332 {
333 RTMemFree(so->so_ohdr);
334 so->so_ohdr = NULL;
335 }
336
337 ti = so->so_ti;
338
339 /** @todo (vvl) clarify why it might happens */
340 if (ti == NULL)
341 {
342 LogRel(("NAT: ti is null. can't do any reseting connection actions\n"));
343 /* mbuf should be cleared in sofree called from tcp_close */
344 tcp_close(pData, tp);
345 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
346 LogFlowFuncLeave();
347 return;
348 }
349
350 tiwin = ti->ti_win;
351 tiflags = ti->ti_flags;
352
353 LogFlowFunc(("%d -> cont_conn\n", __LINE__));
354 goto cont_conn;
355 }
356
357 tcpstat.tcps_rcvtotal++;
358
359 ip = mtod(m, struct ip *);
360
361 /* ip_input() subtracts iphlen from ip::ip_len */
362 AssertStmt((ip->ip_len + iphlen == m_length(m, NULL)), goto drop);
363 if (RT_UNLIKELY(ip->ip_len < sizeof(struct tcphdr)))
364 {
365 /* tcps_rcvshort++; */
366 goto drop;
367 }
368
369 /*
370 * Save a copy of the IP header in case we want to restore it for
371 * sending an ICMP error message in response.
372 *
373 * XXX: This function should really be fixed to not strip IP
374 * options, to not overwrite IP header and to use "tlen" local
375 * variable (instead of ti->ti_len), then "m" could be passed to
376 * icmp_error() directly.
377 */
378 ohdrlen = iphlen + 8;
379 m_copydata(m, 0, ohdrlen, (caddr_t)ohdr);
380 save_ip = (struct ip *)ohdr;
381 save_ip->ip_len += iphlen; /* undo change by ip_input() */
382
383
384 /*
385 * Get IP and TCP header together in first mbuf.
386 * Note: IP leaves IP header in first mbuf.
387 */
388 ti = mtod(m, struct tcpiphdr *);
389 if (iphlen > sizeof(struct ip))
390 {
391 ip_stripoptions(m, (struct mbuf *)0);
392 iphlen = sizeof(struct ip);
393 }
394
395 /*
396 * Checksum extended TCP header and data.
397 */
398 tlen = ((struct ip *)ti)->ip_len;
399 memset(ti->ti_x1, 0, 9);
400 ti->ti_len = RT_H2N_U16((u_int16_t)tlen);
401 len = sizeof(struct ip) + tlen;
402 /* keep checksum for ICMP reply
403 * ti->ti_sum = cksum(m, len);
404 * if (ti->ti_sum) { */
405 if (cksum(m, len))
406 {
407 tcpstat.tcps_rcvbadsum++;
408 LogFlowFunc(("%d -> drop\n", __LINE__));
409 goto drop;
410 }
411
412 /*
413 * Check that TCP offset makes sense,
414 * pull out TCP options and adjust length. XXX
415 */
416 off = ti->ti_off << 2;
417 if ( off < sizeof (struct tcphdr)
418 || off > tlen)
419 {
420 tcpstat.tcps_rcvbadoff++;
421 LogFlowFunc(("%d -> drop\n", __LINE__));
422 goto drop;
423 }
424 tlen -= off;
425 ti->ti_len = tlen;
426 if (off > sizeof (struct tcphdr))
427 {
428 optlen = off - sizeof (struct tcphdr);
429 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
430
431 /*
432 * Do quick retrieval of timestamp options ("options
433 * prediction?"). If timestamp is the only option and it's
434 * formatted as recommended in RFC 1323 appendix A, we
435 * quickly get the values now and not bother calling
436 * tcp_dooptions(), etc.
437 */
438#if 0
439 if (( optlen == TCPOLEN_TSTAMP_APPA
440 || ( optlen > TCPOLEN_TSTAMP_APPA
441 && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
442 *(u_int32_t *)optp == RT_H2N_U32_C(TCPOPT_TSTAMP_HDR) &&
443 (ti->ti_flags & TH_SYN) == 0)
444 {
445 ts_present = 1;
446 ts_val = RT_N2H_U32(*(u_int32_t *)(optp + 4));
447 ts_ecr = RT_N2H_U32(*(u_int32_t *)(optp + 8));
448 optp = NULL; / * we have parsed the options * /
449 }
450#endif
451 }
452 tiflags = ti->ti_flags;
453
454 /*
455 * Convert TCP protocol specific fields to host format.
456 */
457 NTOHL(ti->ti_seq);
458 NTOHL(ti->ti_ack);
459 NTOHS(ti->ti_win);
460 NTOHS(ti->ti_urp);
461
462 /*
463 * Drop TCP, IP headers and TCP options.
464 */
465 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
466 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
467
468 /*
469 * Locate pcb for segment.
470 */
471findso:
472 LogFlowFunc(("(enter) findso: %R[natsock]\n", so));
473 if (so != NULL && so != &tcb)
474 SOCKET_UNLOCK(so);
475 QSOCKET_LOCK(tcb);
476 so = tcp_last_so;
477 if ( so->so_fport != ti->ti_dport
478 || so->so_lport != ti->ti_sport
479 || so->so_laddr.s_addr != ti->ti_src.s_addr
480 || so->so_faddr.s_addr != ti->ti_dst.s_addr)
481 {
482 QSOCKET_UNLOCK(tcb);
483 /* @todo fix SOLOOKUP macrodefinition to be usable here */
484 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
485 ti->ti_dst, ti->ti_dport);
486 if (so)
487 {
488 tcp_last_so = so;
489 }
490 ++tcpstat.tcps_socachemiss;
491 }
492 else
493 {
494 SOCKET_LOCK(so);
495 QSOCKET_UNLOCK(tcb);
496 }
497 LogFlowFunc(("(leave) findso: %R[natsock]\n", so));
498
499 /*
500 * If the state is CLOSED (i.e., TCB does not exist) then
501 * all data in the incoming segment is discarded.
502 * If the TCB exists but is in CLOSED state, it is embryonic,
503 * but should either do a listen or a connect soon.
504 *
505 * state == CLOSED means we've done socreate() but haven't
506 * attached it to a protocol yet...
507 *
508 * XXX If a TCB does not exist, and the TH_SYN flag is
509 * the only flag set, then create a session, mark it
510 * as if it was LISTENING, and continue...
511 */
512 if (so == 0)
513 {
514 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
515 {
516 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
517 goto dropwithreset;
518 }
519
520 if ((so = socreate()) == NULL)
521 {
522 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
523 goto dropwithreset;
524 }
525 if (tcp_attach(pData, so) < 0)
526 {
527 RTMemFree(so); /* Not sofree (if it failed, it's not insqued) */
528 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
529 goto dropwithreset;
530 }
531 SOCKET_LOCK(so);
532#ifndef VBOX_WITH_SLIRP_BSD_SBUF
533 sbreserve(pData, &so->so_snd, tcp_sndspace);
534 sbreserve(pData, &so->so_rcv, tcp_rcvspace);
535#else
536 sbuf_new(&so->so_snd, NULL, tcp_sndspace, SBUF_AUTOEXTEND);
537 sbuf_new(&so->so_rcv, NULL, tcp_rcvspace, SBUF_AUTOEXTEND);
538#endif
539
540/* tcp_last_so = so; */ /* XXX ? */
541/* tp = sototcpcb(so); */
542
543 so->so_laddr = ti->ti_src;
544 so->so_lport = ti->ti_sport;
545 so->so_faddr = ti->ti_dst;
546 so->so_fport = ti->ti_dport;
547
548 so->so_iptos = ((struct ip *)ti)->ip_tos;
549
550 tp = sototcpcb(so);
551 TCP_STATE_SWITCH_TO(tp, TCPS_LISTEN);
552 }
553
554 /*
555 * If this is a still-connecting socket, this probably
556 * a retransmit of the SYN. Whether it's a retransmit SYN
557 * or something else, we nuke it.
558 */
559 if (so->so_state & SS_ISFCONNECTING)
560 {
561 LogFlowFunc(("%d -> drop\n", __LINE__));
562 goto drop;
563 }
564
565 tp = sototcpcb(so);
566
567 /* XXX Should never fail */
568 if (tp == 0)
569 {
570 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
571 goto dropwithreset;
572 }
573 if (tp->t_state == TCPS_CLOSED)
574 {
575 LogFlowFunc(("%d -> drop\n", __LINE__));
576 goto drop;
577 }
578
579 /* Unscale the window into a 32-bit value. */
580/* if ((tiflags & TH_SYN) == 0)
581 * tiwin = ti->ti_win << tp->snd_scale;
582 * else
583 */
584 tiwin = ti->ti_win;
585
586 /*
587 * Segment received on connection.
588 * Reset idle time and keep-alive timer.
589 */
590 tp->t_idle = 0;
591 if (so_options)
592 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
593 else
594 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
595
596 /*
597 * Process options if not in LISTEN state,
598 * else do it below (after getting remote address).
599 */
600 if (optp && tp->t_state != TCPS_LISTEN)
601 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
602/* , */
603/* &ts_present, &ts_val, &ts_ecr); */
604
605 /*
606 * Header prediction: check for the two common cases
607 * of a uni-directional data xfer. If the packet has
608 * no control flags, is in-sequence, the window didn't
609 * change and we're not retransmitting, it's a
610 * candidate. If the length is zero and the ack moved
611 * forward, we're the sender side of the xfer. Just
612 * free the data acked & wake any higher level process
613 * that was blocked waiting for space. If the length
614 * is non-zero and the ack didn't move, we're the
615 * receiver side. If we're getting packets in-order
616 * (the reassembly queue is empty), add the data to
617 * the socket buffer and note that we need a delayed ack.
618 *
619 * XXX Some of these tests are not needed
620 * eg: the tiwin == tp->snd_wnd prevents many more
621 * predictions.. with no *real* advantage..
622 */
623 if ( tp->t_state == TCPS_ESTABLISHED
624 && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK
625/* && (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) */
626 && ti->ti_seq == tp->rcv_nxt
627 && tiwin && tiwin == tp->snd_wnd
628 && tp->snd_nxt == tp->snd_max)
629 {
630 /*
631 * If last ACK falls within this segment's sequence numbers,
632 * record the timestamp.
633 */
634#if 0
635 if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
636 SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len))
637 {
638 tp->ts_recent_age = tcp_now;
639 tp->ts_recent = ts_val;
640 }
641#endif
642
643 if (ti->ti_len == 0)
644 {
645 if ( SEQ_GT(ti->ti_ack, tp->snd_una)
646 && SEQ_LEQ(ti->ti_ack, tp->snd_max)
647 && tp->snd_cwnd >= tp->snd_wnd)
648 {
649 /*
650 * this is a pure ack for outstanding data.
651 */
652 ++tcpstat.tcps_predack;
653#if 0
654 if (ts_present)
655 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
656 else
657#endif
658 if ( tp->t_rtt
659 && SEQ_GT(ti->ti_ack, tp->t_rtseq))
660 tcp_xmit_timer(pData, tp, tp->t_rtt);
661 acked = ti->ti_ack - tp->snd_una;
662 tcpstat.tcps_rcvackpack++;
663 tcpstat.tcps_rcvackbyte += acked;
664#ifndef VBOX_WITH_SLIRP_BSD_SBUF
665 sbdrop(&so->so_snd, acked);
666#else
667 if (sbuf_len(&so->so_snd) < acked)
668 /* drop all what sbuf have */
669 sbuf_setpos(&so->so_snd, 0);
670 else
671 sbuf_setpos(&so->so_snd, sbuf_len(&so->so_snd) - acked);
672#endif
673 tp->snd_una = ti->ti_ack;
674 m_freem(pData, m);
675
676 /*
677 * If all outstanding data are acked, stop
678 * retransmit timer, otherwise restart timer
679 * using current (possibly backed-off) value.
680 * If process is waiting for space,
681 * wakeup/selwakeup/signal. If data
682 * are ready to send, let tcp_output
683 * decide between more output or persist.
684 */
685 if (tp->snd_una == tp->snd_max)
686 tp->t_timer[TCPT_REXMT] = 0;
687 else if (tp->t_timer[TCPT_PERSIST] == 0)
688 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
689
690 /*
691 * There's room in so_snd, sowwakup will read()
692 * from the socket if we can
693 */
694#if 0
695 if (so->so_snd.sb_flags & SB_NOTIFY)
696 sowwakeup(so);
697#endif
698 /*
699 * This is called because sowwakeup might have
700 * put data into so_snd. Since we don't so sowwakeup,
701 * we don't need this.. XXX???
702 */
703 if (SBUF_LEN(&so->so_snd))
704 (void) tcp_output(pData, tp);
705
706 SOCKET_UNLOCK(so);
707 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
708 return;
709 }
710 }
711 else if ( ti->ti_ack == tp->snd_una
712 && LIST_FIRST(&tp->t_segq)
713 && ti->ti_len <= sbspace(&so->so_rcv))
714 {
715 /*
716 * this is a pure, in-sequence data packet
717 * with nothing on the reassembly queue and
718 * we have enough buffer space to take it.
719 */
720 ++tcpstat.tcps_preddat;
721 tp->rcv_nxt += ti->ti_len;
722 tcpstat.tcps_rcvpack++;
723 tcpstat.tcps_rcvbyte += ti->ti_len;
724 /*
725 * Add data to socket buffer.
726 */
727 sbappend(pData, so, m);
728
729 /*
730 * XXX This is called when data arrives. Later, check
731 * if we can actually write() to the socket
732 * XXX Need to check? It's be NON_BLOCKING
733 */
734/* sorwakeup(so); */
735
736 /*
737 * If this is a short packet, then ACK now - with Nagel
738 * congestion avoidance sender won't send more until
739 * he gets an ACK.
740 *
741 * It is better to not delay acks at all to maximize
742 * TCP throughput. See RFC 2581.
743 */
744 tp->t_flags |= TF_ACKNOW;
745 tcp_output(pData, tp);
746 SOCKET_UNLOCK(so);
747 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
748 return;
749 }
750 } /* header prediction */
751 /*
752 * Calculate amount of space in receive window,
753 * and then do TCP input processing.
754 * Receive window is amount of space in rcv queue,
755 * but not less than advertised window.
756 */
757 {
758 int win;
759 win = sbspace(&so->so_rcv);
760 if (win < 0)
761 win = 0;
762 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
763 }
764
765 switch (tp->t_state)
766 {
767 /*
768 * If the state is LISTEN then ignore segment if it contains an RST.
769 * If the segment contains an ACK then it is bad and send a RST.
770 * If it does not contain a SYN then it is not interesting; drop it.
771 * Don't bother responding if the destination was a broadcast.
772 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
773 * tp->iss, and send a segment:
774 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
775 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
776 * Fill in remote peer address fields if not previously specified.
777 * Enter SYN_RECEIVED state, and process any other fields of this
778 * segment in this state.
779 */
780 case TCPS_LISTEN:
781 {
782 if (tiflags & TH_RST)
783 {
784 LogFlowFunc(("%d -> drop\n", __LINE__));
785 goto drop;
786 }
787 if (tiflags & TH_ACK)
788 {
789 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
790 goto dropwithreset;
791 }
792 if ((tiflags & TH_SYN) == 0)
793 {
794 LogFlowFunc(("%d -> drop\n", __LINE__));
795 goto drop;
796 }
797
798 /*
799 * This has way too many gotos...
800 * But a bit of spaghetti code never hurt anybody :)
801 */
802 if ( (tcp_fconnect(pData, so) == -1)
803 && errno != EINPROGRESS
804 && errno != EWOULDBLOCK)
805 {
806 u_char code = ICMP_UNREACH_NET;
807 Log2((" tcp fconnect errno = %d (%s)\n", errno, strerror(errno)));
808 if (errno == ECONNREFUSED)
809 {
810 /* ACK the SYN, send RST to refuse the connection */
811 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
812 TH_RST|TH_ACK);
813 }
814 else
815 {
816 if (errno == EHOSTUNREACH)
817 code = ICMP_UNREACH_HOST;
818 HTONL(ti->ti_seq); /* restore tcp header */
819 HTONL(ti->ti_ack);
820 HTONS(ti->ti_win);
821 HTONS(ti->ti_urp);
822 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
823 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
824 *ip = *save_ip;
825 icmp_error(pData, m, ICMP_UNREACH, code, 0, strerror(errno));
826 tp->t_socket->so_m = NULL;
827 }
828 tp = tcp_close(pData, tp);
829 }
830 else
831 {
832 /*
833 * Haven't connected yet, save the current mbuf
834 * and ti, and return
835 * XXX Some OS's don't tell us whether the connect()
836 * succeeded or not. So we must time it out.
837 */
838 so->so_m = m;
839 so->so_ti = ti;
840 so->so_ohdr = RTMemDup(ohdr, ohdrlen);
841 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
842 TCP_STATE_SWITCH_TO(tp, TCPS_SYN_RECEIVED);
843 }
844 SOCKET_UNLOCK(so);
845 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
846 LogFlowFuncLeave();
847 return;
848
849cont_conn:
850 /* m==NULL
851 * Check if the connect succeeded
852 */
853 LogFlowFunc(("cont_conn:\n"));
854 if (so->so_state & SS_NOFDREF)
855 {
856 tp = tcp_close(pData, tp);
857 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
858 goto dropwithreset;
859 }
860
861 tcp_template(tp);
862
863 if (optp)
864 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
865
866 if (iss)
867 tp->iss = iss;
868 else
869 tp->iss = tcp_iss;
870 tcp_iss += TCP_ISSINCR/2;
871 tp->irs = ti->ti_seq;
872 tcp_sendseqinit(tp);
873 tcp_rcvseqinit(tp);
874 tp->t_flags |= TF_ACKNOW;
875 TCP_STATE_SWITCH_TO(tp, TCPS_SYN_RECEIVED);
876 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
877 tcpstat.tcps_accepts++;
878 LogFlowFunc(("%d -> trimthenstep6\n", __LINE__));
879 goto trimthenstep6;
880 } /* case TCPS_LISTEN */
881
882 /*
883 * If the state is SYN_SENT:
884 * if seg contains an ACK, but not for our SYN, drop the input.
885 * if seg contains a RST, then drop the connection.
886 * if seg does not contain SYN, then drop it.
887 * Otherwise this is an acceptable SYN segment
888 * initialize tp->rcv_nxt and tp->irs
889 * if seg contains ack then advance tp->snd_una
890 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
891 * arrange for segment to be acked (eventually)
892 * continue processing rest of data/controls, beginning with URG
893 */
894 case TCPS_SYN_SENT:
895 if ( (tiflags & TH_ACK)
896 && ( SEQ_LEQ(ti->ti_ack, tp->iss)
897 || SEQ_GT(ti->ti_ack, tp->snd_max)))
898 {
899 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
900 goto dropwithreset;
901 }
902
903 if (tiflags & TH_RST)
904 {
905 if (tiflags & TH_ACK)
906 tp = tcp_drop(pData, tp, 0); /* XXX Check t_softerror! */
907 LogFlowFunc(("%d -> drop\n", __LINE__));
908 goto drop;
909 }
910
911 if ((tiflags & TH_SYN) == 0)
912 {
913 LogFlowFunc(("%d -> drop\n", __LINE__));
914 goto drop;
915 }
916 if (tiflags & TH_ACK)
917 {
918 tp->snd_una = ti->ti_ack;
919 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
920 tp->snd_nxt = tp->snd_una;
921 }
922
923 tp->t_timer[TCPT_REXMT] = 0;
924 tp->irs = ti->ti_seq;
925 tcp_rcvseqinit(tp);
926 tp->t_flags |= TF_ACKNOW;
927 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss))
928 {
929 tcpstat.tcps_connects++;
930 soisfconnected(so);
931 TCP_STATE_SWITCH_TO(tp, TCPS_ESTABLISHED);
932
933 /* Do window scaling on this connection? */
934#if 0
935 if (( tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
936 == (TF_RCVD_SCALE|TF_REQ_SCALE))
937 {
938 tp->snd_scale = tp->requested_s_scale;
939 tp->rcv_scale = tp->request_r_scale;
940 }
941#endif
942 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
943 /*
944 * if we didn't have to retransmit the SYN,
945 * use its rtt as our initial srtt & rtt var.
946 */
947 if (tp->t_rtt)
948 tcp_xmit_timer(pData, tp, tp->t_rtt);
949 }
950 else
951 TCP_STATE_SWITCH_TO(tp, TCPS_SYN_RECEIVED);
952
953trimthenstep6:
954 LogFlowFunc(("trimthenstep6:\n"));
955 /*
956 * Advance ti->ti_seq to correspond to first data byte.
957 * If data, trim to stay within window,
958 * dropping FIN if necessary.
959 */
960 ti->ti_seq++;
961 if (ti->ti_len > tp->rcv_wnd)
962 {
963 todrop = ti->ti_len - tp->rcv_wnd;
964 m_adj(m, -todrop);
965 ti->ti_len = tp->rcv_wnd;
966 tiflags &= ~TH_FIN;
967 tcpstat.tcps_rcvpackafterwin++;
968 tcpstat.tcps_rcvbyteafterwin += todrop;
969 }
970 tp->snd_wl1 = ti->ti_seq - 1;
971 tp->rcv_up = ti->ti_seq;
972 LogFlowFunc(("%d -> step6\n", __LINE__));
973 goto step6;
974 } /* switch tp->t_state */
975 /*
976 * States other than LISTEN or SYN_SENT.
977 * First check timestamp, if present.
978 * Then check that at least some bytes of segment are within
979 * receive window. If segment begins before rcv_nxt,
980 * drop leading data (and SYN); if nothing left, just ack.
981 *
982 * RFC 1323 PAWS: If we have a timestamp reply on this segment
983 * and it's less than ts_recent, drop it.
984 */
985#if 0
986 if ( ts_present
987 && (tiflags & TH_RST) == 0
988 && tp->ts_recent
989 && TSTMP_LT(ts_val, tp->ts_recent))
990 {
991 /* Check to see if ts_recent is over 24 days old. */
992 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE)
993 {
994 /*
995 * Invalidate ts_recent. If this segment updates
996 * ts_recent, the age will be reset later and ts_recent
997 * will get a valid value. If it does not, setting
998 * ts_recent to zero will at least satisfy the
999 * requirement that zero be placed in the timestamp
1000 * echo reply when ts_recent isn't valid. The
1001 * age isn't reset until we get a valid ts_recent
1002 * because we don't want out-of-order segments to be
1003 * dropped when ts_recent is old.
1004 */
1005 tp->ts_recent = 0;
1006 }
1007 else
1008 {
1009 tcpstat.tcps_rcvduppack++;
1010 tcpstat.tcps_rcvdupbyte += ti->ti_len;
1011 tcpstat.tcps_pawsdrop++;
1012 goto dropafterack;
1013 }
1014 }
1015#endif
1016
1017 todrop = tp->rcv_nxt - ti->ti_seq;
1018 if (todrop > 0)
1019 {
1020 if (tiflags & TH_SYN)
1021 {
1022 tiflags &= ~TH_SYN;
1023 ti->ti_seq++;
1024 if (ti->ti_urp > 1)
1025 ti->ti_urp--;
1026 else
1027 tiflags &= ~TH_URG;
1028 todrop--;
1029 }
1030 /*
1031 * Following if statement from Stevens, vol. 2, p. 960.
1032 */
1033 if ( todrop > ti->ti_len
1034 || ( todrop == ti->ti_len
1035 && (tiflags & TH_FIN) == 0))
1036 {
1037 /*
1038 * Any valid FIN must be to the left of the window.
1039 * At this point the FIN must be a duplicate or out
1040 * of sequence; drop it.
1041 */
1042 tiflags &= ~TH_FIN;
1043
1044 /*
1045 * Send an ACK to resynchronize and drop any data.
1046 * But keep on processing for RST or ACK.
1047 */
1048 tp->t_flags |= TF_ACKNOW;
1049 todrop = ti->ti_len;
1050 tcpstat.tcps_rcvduppack++;
1051 tcpstat.tcps_rcvdupbyte += todrop;
1052 }
1053 else
1054 {
1055 tcpstat.tcps_rcvpartduppack++;
1056 tcpstat.tcps_rcvpartdupbyte += todrop;
1057 }
1058 m_adj(m, todrop);
1059 ti->ti_seq += todrop;
1060 ti->ti_len -= todrop;
1061 if (ti->ti_urp > todrop)
1062 ti->ti_urp -= todrop;
1063 else
1064 {
1065 tiflags &= ~TH_URG;
1066 ti->ti_urp = 0;
1067 }
1068 }
1069 /*
1070 * If new data are received on a connection after the
1071 * user processes are gone, then RST the other end.
1072 */
1073 if ( (so->so_state & SS_NOFDREF)
1074 && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len)
1075 {
1076 tp = tcp_close(pData, tp);
1077 tcpstat.tcps_rcvafterclose++;
1078 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
1079 goto dropwithreset;
1080 }
1081
1082 /*
1083 * If segment ends after window, drop trailing data
1084 * (and PUSH and FIN); if nothing left, just ACK.
1085 */
1086 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
1087 if (todrop > 0)
1088 {
1089 tcpstat.tcps_rcvpackafterwin++;
1090 if (todrop >= ti->ti_len)
1091 {
1092 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
1093 /*
1094 * If a new connection request is received
1095 * while in TIME_WAIT, drop the old connection
1096 * and start over if the sequence numbers
1097 * are above the previous ones.
1098 */
1099 if ( tiflags & TH_SYN
1100 && tp->t_state == TCPS_TIME_WAIT
1101 && SEQ_GT(ti->ti_seq, tp->rcv_nxt))
1102 {
1103 iss = tp->rcv_nxt + TCP_ISSINCR;
1104 tp = tcp_close(pData, tp);
1105 SOCKET_UNLOCK(tp->t_socket);
1106 LogFlowFunc(("%d -> findso\n", __LINE__));
1107 goto findso;
1108 }
1109 /*
1110 * If window is closed can only take segments at
1111 * window edge, and have to drop data and PUSH from
1112 * incoming segments. Continue processing, but
1113 * remember to ack. Otherwise, drop segment
1114 * and ack.
1115 */
1116 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt)
1117 {
1118 tp->t_flags |= TF_ACKNOW;
1119 tcpstat.tcps_rcvwinprobe++;
1120 }
1121 else
1122 {
1123 LogFlowFunc(("%d -> dropafterack\n", __LINE__));
1124 goto dropafterack;
1125 }
1126 }
1127 else
1128 tcpstat.tcps_rcvbyteafterwin += todrop;
1129 m_adj(m, -todrop);
1130 ti->ti_len -= todrop;
1131 tiflags &= ~(TH_PUSH|TH_FIN);
1132 }
1133
1134 /*
1135 * If last ACK falls within this segment's sequence numbers,
1136 * record its timestamp.
1137 */
1138#if 0
1139 if ( ts_present
1140 && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)
1141 && SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + ((tiflags & (TH_SYN|TH_FIN)) != 0)))
1142 {
1143 tp->ts_recent_age = tcp_now;
1144 tp->ts_recent = ts_val;
1145 }
1146#endif
1147
1148 /*
1149 * If the RST bit is set examine the state:
1150 * SYN_RECEIVED STATE:
1151 * If passive open, return to LISTEN state.
1152 * If active open, inform user that connection was refused.
1153 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1154 * Inform user that connection was reset, and close tcb.
1155 * CLOSING, LAST_ACK, TIME_WAIT STATES
1156 * Close the tcb.
1157 */
1158 if (tiflags&TH_RST)
1159 switch (tp->t_state)
1160 {
1161 case TCPS_SYN_RECEIVED:
1162/* so->so_error = ECONNREFUSED; */
1163 LogFlowFunc(("%d -> close\n", __LINE__));
1164 goto close;
1165
1166 case TCPS_ESTABLISHED:
1167 case TCPS_FIN_WAIT_1:
1168 case TCPS_FIN_WAIT_2:
1169 case TCPS_CLOSE_WAIT:
1170/* so->so_error = ECONNRESET; */
1171close:
1172 LogFlowFunc(("close:\n"));
1173 TCP_STATE_SWITCH_TO(tp, TCPS_CLOSED);
1174 tcpstat.tcps_drops++;
1175 tp = tcp_close(pData, tp);
1176 LogFlowFunc(("%d -> drop\n", __LINE__));
1177 goto drop;
1178
1179 case TCPS_CLOSING:
1180 case TCPS_LAST_ACK:
1181 case TCPS_TIME_WAIT:
1182 tp = tcp_close(pData, tp);
1183 LogFlowFunc(("%d -> drop\n", __LINE__));
1184 goto drop;
1185 }
1186
1187 /*
1188 * If a SYN is in the window, then this is an
1189 * error and we send an RST and drop the connection.
1190 */
1191 if (tiflags & TH_SYN)
1192 {
1193 tp = tcp_drop(pData, tp, 0);
1194 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
1195 goto dropwithreset;
1196 }
1197
1198 /*
1199 * If the ACK bit is off we drop the segment and return.
1200 */
1201 if ((tiflags & TH_ACK) == 0)
1202 {
1203 LogFlowFunc(("%d -> drop\n", __LINE__));
1204 goto drop;
1205 }
1206
1207 /*
1208 * Ack processing.
1209 */
1210 switch (tp->t_state)
1211 {
1212 /*
1213 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1214 * ESTABLISHED state and continue processing, otherwise
1215 * send an RST. una<=ack<=max
1216 */
1217 case TCPS_SYN_RECEIVED:
1218 LogFlowFunc(("%d -> TCPS_SYN_RECEIVED\n", __LINE__));
1219 if ( SEQ_GT(tp->snd_una, ti->ti_ack)
1220 || SEQ_GT(ti->ti_ack, tp->snd_max))
1221 goto dropwithreset;
1222 tcpstat.tcps_connects++;
1223 TCP_STATE_SWITCH_TO(tp, TCPS_ESTABLISHED);
1224 /*
1225 * The sent SYN is ack'ed with our sequence number +1
1226 * The first data byte already in the buffer will get
1227 * lost if no correction is made. This is only needed for
1228 * SS_CTL since the buffer is empty otherwise.
1229 * tp->snd_una++; or:
1230 */
1231 tp->snd_una = ti->ti_ack;
1232 soisfconnected(so);
1233
1234 /* Do window scaling? */
1235#if 0
1236 if ( (tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
1237 == (TF_RCVD_SCALE|TF_REQ_SCALE))
1238 {
1239 tp->snd_scale = tp->requested_s_scale;
1240 tp->rcv_scale = tp->request_r_scale;
1241 }
1242#endif
1243 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1244 tp->snd_wl1 = ti->ti_seq - 1;
1245 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1246 LogFlowFunc(("%d -> synrx_to_est\n", __LINE__));
1247 goto synrx_to_est;
1248 /* fall into ... */
1249
1250 /*
1251 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1252 * ACKs. If the ack is in the range
1253 * tp->snd_una < ti->ti_ack <= tp->snd_max
1254 * then advance tp->snd_una to ti->ti_ack and drop
1255 * data from the retransmission queue. If this ACK reflects
1256 * more up to date window information we update our window information.
1257 */
1258 case TCPS_ESTABLISHED:
1259 case TCPS_FIN_WAIT_1:
1260 case TCPS_FIN_WAIT_2:
1261 case TCPS_CLOSE_WAIT:
1262 case TCPS_CLOSING:
1263 case TCPS_LAST_ACK:
1264 case TCPS_TIME_WAIT:
1265 LogFlowFunc(("%d -> TCPS_ESTABLISHED|TCPS_FIN_WAIT_1|TCPS_FIN_WAIT_2|TCPS_CLOSE_WAIT|"
1266 "TCPS_CLOSING|TCPS_LAST_ACK|TCPS_TIME_WAIT\n", __LINE__));
1267 if (SEQ_LEQ(ti->ti_ack, tp->snd_una))
1268 {
1269 if (ti->ti_len == 0 && tiwin == tp->snd_wnd)
1270 {
1271 tcpstat.tcps_rcvdupack++;
1272 Log2((" dup ack m = %lx, so = %lx\n", (long)m, (long)so));
1273 /*
1274 * If we have outstanding data (other than
1275 * a window probe), this is a completely
1276 * duplicate ack (ie, window info didn't
1277 * change), the ack is the biggest we've
1278 * seen and we've seen exactly our rexmt
1279 * threshold of them, assume a packet
1280 * has been dropped and retransmit it.
1281 * Kludge snd_nxt & the congestion
1282 * window so we send only this one
1283 * packet.
1284 *
1285 * We know we're losing at the current
1286 * window size so do congestion avoidance
1287 * (set ssthresh to half the current window
1288 * and pull our congestion window back to
1289 * the new ssthresh).
1290 *
1291 * Dup acks mean that packets have left the
1292 * network (they're now cached at the receiver)
1293 * so bump cwnd by the amount in the receiver
1294 * to keep a constant cwnd packets in the
1295 * network.
1296 */
1297 if ( tp->t_timer[TCPT_REXMT] == 0
1298 || ti->ti_ack != tp->snd_una)
1299 tp->t_dupacks = 0;
1300 else if (++tp->t_dupacks == tcprexmtthresh)
1301 {
1302 tcp_seq onxt = tp->snd_nxt;
1303 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
1304 if (win < 2)
1305 win = 2;
1306 tp->snd_ssthresh = win * tp->t_maxseg;
1307 tp->t_timer[TCPT_REXMT] = 0;
1308 tp->t_rtt = 0;
1309 tp->snd_nxt = ti->ti_ack;
1310 tp->snd_cwnd = tp->t_maxseg;
1311 (void) tcp_output(pData, tp);
1312 tp->snd_cwnd = tp->snd_ssthresh +
1313 tp->t_maxseg * tp->t_dupacks;
1314 if (SEQ_GT(onxt, tp->snd_nxt))
1315 tp->snd_nxt = onxt;
1316 LogFlowFunc(("%d -> drop\n", __LINE__));
1317 goto drop;
1318 }
1319 else if (tp->t_dupacks > tcprexmtthresh)
1320 {
1321 tp->snd_cwnd += tp->t_maxseg;
1322 (void) tcp_output(pData, tp);
1323 LogFlowFunc(("%d -> drop\n", __LINE__));
1324 goto drop;
1325 }
1326 }
1327 else
1328 tp->t_dupacks = 0;
1329 break;
1330 }
1331synrx_to_est:
1332 LogFlowFunc(("synrx_to_est:\n"));
1333 /*
1334 * If the congestion window was inflated to account
1335 * for the other side's cached packets, retract it.
1336 */
1337 if ( tp->t_dupacks > tcprexmtthresh
1338 && tp->snd_cwnd > tp->snd_ssthresh)
1339 tp->snd_cwnd = tp->snd_ssthresh;
1340 tp->t_dupacks = 0;
1341 if (SEQ_GT(ti->ti_ack, tp->snd_max))
1342 {
1343 tcpstat.tcps_rcvacktoomuch++;
1344 LogFlowFunc(("%d -> dropafterack\n", __LINE__));
1345 goto dropafterack;
1346 }
1347 acked = ti->ti_ack - tp->snd_una;
1348 tcpstat.tcps_rcvackpack++;
1349 tcpstat.tcps_rcvackbyte += acked;
1350
1351 /*
1352 * If we have a timestamp reply, update smoothed
1353 * round trip time. If no timestamp is present but
1354 * transmit timer is running and timed sequence
1355 * number was acked, update smoothed round trip time.
1356 * Since we now have an rtt measurement, cancel the
1357 * timer backoff (cf., Phil Karn's retransmit alg.).
1358 * Recompute the initial retransmit timer.
1359 */
1360#if 0
1361 if (ts_present)
1362 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1363 else
1364#endif
1365 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1366 tcp_xmit_timer(pData, tp, tp->t_rtt);
1367
1368 /*
1369 * If all outstanding data is acked, stop retransmit
1370 * timer and remember to restart (more output or persist).
1371 * If there is more data to be acked, restart retransmit
1372 * timer, using current (possibly backed-off) value.
1373 */
1374 if (ti->ti_ack == tp->snd_max)
1375 {
1376 tp->t_timer[TCPT_REXMT] = 0;
1377 needoutput = 1;
1378 }
1379 else if (tp->t_timer[TCPT_PERSIST] == 0)
1380 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1381 /*
1382 * When new data is acked, open the congestion window.
1383 * If the window gives us less than ssthresh packets
1384 * in flight, open exponentially (maxseg per packet).
1385 * Otherwise open linearly: maxseg per window
1386 * (maxseg^2 / cwnd per packet).
1387 */
1388 {
1389 register u_int cw = tp->snd_cwnd;
1390 register u_int incr = tp->t_maxseg;
1391
1392 if (cw > tp->snd_ssthresh)
1393 incr = incr * incr / cw;
1394 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1395 }
1396 if (acked > SBUF_LEN(&so->so_snd))
1397 {
1398 tp->snd_wnd -= SBUF_LEN(&so->so_snd);
1399#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1400 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
1401#else
1402 sbuf_clear(&so->so_snd);
1403#endif
1404 ourfinisacked = 1;
1405 }
1406 else
1407 {
1408#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1409 sbdrop(&so->so_snd, acked);
1410#else
1411 sbuf_setpos(&so->so_snd, sbuf_len(&so->so_snd) - acked);
1412#endif
1413 tp->snd_wnd -= acked;
1414 ourfinisacked = 0;
1415 }
1416 /*
1417 * XXX sowwakup is called when data is acked and there's room for
1418 * for more data... it should read() the socket
1419 */
1420#if 0
1421 if (so->so_snd.sb_flags & SB_NOTIFY)
1422 sowwakeup(so);
1423#endif
1424 tp->snd_una = ti->ti_ack;
1425 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1426 tp->snd_nxt = tp->snd_una;
1427
1428 switch (tp->t_state)
1429 {
1430 /*
1431 * In FIN_WAIT_1 STATE in addition to the processing
1432 * for the ESTABLISHED state if our FIN is now acknowledged
1433 * then enter FIN_WAIT_2.
1434 */
1435 case TCPS_FIN_WAIT_1:
1436 if (ourfinisacked)
1437 {
1438 /*
1439 * If we can't receive any more
1440 * data, then closing user can proceed.
1441 * Starting the timer is contrary to the
1442 * specification, but if we don't get a FIN
1443 * we'll hang forever.
1444 */
1445 if (so->so_state & SS_FCANTRCVMORE)
1446 {
1447 soisfdisconnected(so);
1448 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1449 }
1450 TCP_STATE_SWITCH_TO(tp, TCPS_FIN_WAIT_2);
1451 }
1452 break;
1453
1454 /*
1455 * In CLOSING STATE in addition to the processing for
1456 * the ESTABLISHED state if the ACK acknowledges our FIN
1457 * then enter the TIME-WAIT state, otherwise ignore
1458 * the segment.
1459 */
1460 case TCPS_CLOSING:
1461 if (ourfinisacked)
1462 {
1463 TCP_STATE_SWITCH_TO(tp, TCPS_TIME_WAIT);
1464 tcp_canceltimers(tp);
1465 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1466 soisfdisconnected(so);
1467 }
1468 break;
1469
1470 /*
1471 * In LAST_ACK, we may still be waiting for data to drain
1472 * and/or to be acked, as well as for the ack of our FIN.
1473 * If our FIN is now acknowledged, delete the TCB,
1474 * enter the closed state and return.
1475 */
1476 case TCPS_LAST_ACK:
1477 if (ourfinisacked)
1478 {
1479 tp = tcp_close(pData, tp);
1480 LogFlowFunc(("%d -> drop\n", __LINE__));
1481 goto drop;
1482 }
1483 break;
1484
1485 /*
1486 * In TIME_WAIT state the only thing that should arrive
1487 * is a retransmission of the remote FIN. Acknowledge
1488 * it and restart the finack timer.
1489 */
1490 case TCPS_TIME_WAIT:
1491 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1492 LogFlowFunc(("%d -> dropafterack\n", __LINE__));
1493 goto dropafterack;
1494 }
1495 } /* switch(tp->t_state) */
1496
1497step6:
1498 LogFlowFunc(("step6:\n"));
1499 /*
1500 * Update window information.
1501 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1502 */
1503 if ( (tiflags & TH_ACK)
1504 && ( SEQ_LT(tp->snd_wl1, ti->ti_seq)
1505 || ( tp->snd_wl1 == ti->ti_seq
1506 && ( SEQ_LT(tp->snd_wl2, ti->ti_ack)
1507 || ( tp->snd_wl2 == ti->ti_ack
1508 && tiwin > tp->snd_wnd)))))
1509 {
1510 /* keep track of pure window updates */
1511 if ( ti->ti_len == 0
1512 && tp->snd_wl2 == ti->ti_ack
1513 && tiwin > tp->snd_wnd)
1514 tcpstat.tcps_rcvwinupd++;
1515 tp->snd_wnd = tiwin;
1516 tp->snd_wl1 = ti->ti_seq;
1517 tp->snd_wl2 = ti->ti_ack;
1518 if (tp->snd_wnd > tp->max_sndwnd)
1519 tp->max_sndwnd = tp->snd_wnd;
1520 needoutput = 1;
1521 }
1522
1523 /*
1524 * Process segments with URG.
1525 */
1526 if ((tiflags & TH_URG) && ti->ti_urp &&
1527 TCPS_HAVERCVDFIN(tp->t_state) == 0)
1528 {
1529 /* BSD's sbufs are auto extent so we shouldn't worry here */
1530#ifndef VBOX_WITH_SLIRP_BSD_SBUF
1531 /*
1532 * This is a kludge, but if we receive and accept
1533 * random urgent pointers, we'll crash in
1534 * soreceive. It's hard to imagine someone
1535 * actually wanting to send this much urgent data.
1536 */
1537 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen)
1538 {
1539 ti->ti_urp = 0;
1540 tiflags &= ~TH_URG;
1541 LogFlowFunc(("%d -> dodata\n", __LINE__));
1542 goto dodata;
1543 }
1544#endif
1545 /*
1546 * If this segment advances the known urgent pointer,
1547 * then mark the data stream. This should not happen
1548 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1549 * a FIN has been received from the remote side.
1550 * In these states we ignore the URG.
1551 *
1552 * According to RFC961 (Assigned Protocols),
1553 * the urgent pointer points to the last octet
1554 * of urgent data. We continue, however,
1555 * to consider it to indicate the first octet
1556 * of data past the urgent section as the original
1557 * spec states (in one of two places).
1558 */
1559 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up))
1560 {
1561 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1562 so->so_urgc = SBUF_LEN(&so->so_rcv) +
1563 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1564 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1565 }
1566 }
1567 else
1568 /*
1569 * If no out of band data is expected,
1570 * pull receive urgent pointer along
1571 * with the receive window.
1572 */
1573 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1574 tp->rcv_up = tp->rcv_nxt;
1575dodata:
1576 LogFlowFunc(("dodata:\n"));
1577
1578 /*
1579 * If this is a small packet, then ACK now - with Nagel
1580 * congestion avoidance sender won't send more until
1581 * he gets an ACK.
1582 *
1583 * See above.
1584 */
1585 if ( ti->ti_len
1586 && (unsigned)ti->ti_len <= 5
1587 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27)
1588 {
1589 tp->t_flags |= TF_ACKNOW;
1590 }
1591
1592 /*
1593 * Process the segment text, merging it into the TCP sequencing queue,
1594 * and arranging for acknowledgment of receipt if necessary.
1595 * This process logically involves adjusting tp->rcv_wnd as data
1596 * is presented to the user (this happens in tcp_usrreq.c,
1597 * case PRU_RCVD). If a FIN has already been received on this
1598 * connection then we just ignore the text.
1599 */
1600 if ( (ti->ti_len || (tiflags&TH_FIN))
1601 && TCPS_HAVERCVDFIN(tp->t_state) == 0)
1602 {
1603 if ( ti->ti_seq == tp->rcv_nxt
1604 && LIST_EMPTY(&tp->t_segq)
1605 && tp->t_state == TCPS_ESTABLISHED)
1606 {
1607 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1608 tp->rcv_nxt += tlen;
1609 tiflags = ti->ti_t.th_flags & TH_FIN;
1610 tcpstat.tcps_rcvpack++;
1611 tcpstat.tcps_rcvbyte += tlen;
1612 if (so->so_state & SS_FCANTRCVMORE)
1613 m_freem(pData, m);
1614 else
1615 sbappend(pData, so, m);
1616 }
1617 else
1618 {
1619 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1620 tp->t_flags |= TF_ACKNOW;
1621 }
1622 /*
1623 * Note the amount of data that peer has sent into
1624 * our window, in order to estimate the sender's
1625 * buffer size.
1626 */
1627 len = SBUF_SIZE(&so->so_rcv) - (tp->rcv_adv - tp->rcv_nxt);
1628 }
1629 else
1630 {
1631 m_freem(pData, m);
1632 tiflags &= ~TH_FIN;
1633 }
1634
1635 /*
1636 * If FIN is received ACK the FIN and let the user know
1637 * that the connection is closing.
1638 */
1639 if (tiflags & TH_FIN)
1640 {
1641 if (TCPS_HAVERCVDFIN(tp->t_state) == 0)
1642 {
1643 /*
1644 * If we receive a FIN we can't send more data,
1645 * set it SS_FDRAIN
1646 * Shutdown the socket if there is no rx data in the
1647 * buffer.
1648 * soread() is called on completion of shutdown() and
1649 * will got to TCPS_LAST_ACK, and use tcp_output()
1650 * to send the FIN.
1651 */
1652/* sofcantrcvmore(so); */
1653 sofwdrain(so);
1654
1655 tp->t_flags |= TF_ACKNOW;
1656 tp->rcv_nxt++;
1657 }
1658 switch (tp->t_state)
1659 {
1660 /*
1661 * In SYN_RECEIVED and ESTABLISHED STATES
1662 * enter the CLOSE_WAIT state.
1663 */
1664 case TCPS_SYN_RECEIVED:
1665 case TCPS_ESTABLISHED:
1666 TCP_STATE_SWITCH_TO(tp, TCPS_CLOSE_WAIT);
1667 break;
1668
1669 /*
1670 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1671 * enter the CLOSING state.
1672 */
1673 case TCPS_FIN_WAIT_1:
1674 TCP_STATE_SWITCH_TO(tp, TCPS_CLOSING);
1675 break;
1676
1677 /*
1678 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1679 * starting the time-wait timer, turning off the other
1680 * standard timers.
1681 */
1682 case TCPS_FIN_WAIT_2:
1683 TCP_STATE_SWITCH_TO(tp, TCPS_TIME_WAIT);
1684 tcp_canceltimers(tp);
1685 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1686 soisfdisconnected(so);
1687 break;
1688
1689 /*
1690 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1691 */
1692 case TCPS_TIME_WAIT:
1693 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1694 break;
1695 }
1696 }
1697
1698 /*
1699 * Return any desired output.
1700 */
1701 if (needoutput || (tp->t_flags & TF_ACKNOW))
1702 tcp_output(pData, tp);
1703
1704 SOCKET_UNLOCK(so);
1705 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1706 LogFlowFuncLeave();
1707 return;
1708
1709dropafterack:
1710 LogFlowFunc(("dropafterack:\n"));
1711 /*
1712 * Generate an ACK dropping incoming segment if it occupies
1713 * sequence space, where the ACK reflects our state.
1714 */
1715 if (tiflags & TH_RST)
1716 {
1717 LogFlowFunc(("%d -> drop\n", __LINE__));
1718 goto drop;
1719 }
1720 m_freem(pData, m);
1721 tp->t_flags |= TF_ACKNOW;
1722 (void) tcp_output(pData, tp);
1723 SOCKET_UNLOCK(so);
1724 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1725 LogFlowFuncLeave();
1726 return;
1727
1728dropwithreset:
1729 LogFlowFunc(("dropwithreset:\n"));
1730 /* reuses m if m!=NULL, m_free() unnecessary */
1731 if (tiflags & TH_ACK)
1732 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1733 else
1734 {
1735 if (tiflags & TH_SYN)
1736 ti->ti_len++;
1737 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1738 TH_RST|TH_ACK);
1739 }
1740
1741 if (so != &tcb)
1742 SOCKET_UNLOCK(so);
1743 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1744 LogFlowFuncLeave();
1745 return;
1746
1747drop:
1748 LogFlowFunc(("drop:\n"));
1749 /*
1750 * Drop space held by incoming segment and return.
1751 */
1752 m_freem(pData, m);
1753
1754#ifdef VBOX_WITH_SLIRP_MT
1755 if (RTCritSectIsOwned(&so->so_mutex))
1756 {
1757 SOCKET_UNLOCK(so);
1758 }
1759#endif
1760
1761 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1762 LogFlowFuncLeave();
1763 return;
1764}
1765
1766
1767void
1768tcp_fconnect_failed(PNATState pData, struct socket *so, int sockerr)
1769{
1770 struct tcpcb *tp;
1771 int code;
1772
1773 Log2(("NAT: connect error %d %R[natsock]\n", sockerr, so));
1774
1775 Assert(so->so_state & SS_ISFCONNECTING);
1776 so->so_state = SS_NOFDREF;
1777
1778 if (sockerr == ECONNREFUSED || sockerr == ECONNRESET)
1779 {
1780 /* hand off to tcp_input():cont_conn to send RST */
1781 TCP_INPUT(pData, NULL, 0, so);
1782 return;
1783 }
1784
1785 tp = sototcpcb(so);
1786 if (RT_UNLIKELY(tp == NULL)) /* should never happen */
1787 {
1788 LogRel(("NAT: tp == NULL %R[natsock]\n", so));
1789 sofree(pData, so);
1790 return;
1791 }
1792
1793 if (sockerr == ENETUNREACH || sockerr == ENETDOWN)
1794 code = ICMP_UNREACH_NET;
1795 else if (sockerr == EHOSTUNREACH || sockerr == EHOSTDOWN)
1796 code = ICMP_UNREACH_HOST;
1797 else
1798 code = -1;
1799
1800 if (code >= 0)
1801 {
1802 struct ip *oip;
1803 size_t ohdrlen;
1804 struct mbuf *m;
1805
1806 if (RT_UNLIKELY(so->so_ohdr == NULL))
1807 goto out;
1808
1809 oip = (struct ip *)so->so_ohdr;
1810 ohdrlen = oip->ip_hl * 4 + 8;
1811
1812 m = m_gethdr(pData, M_NOWAIT, MT_HEADER);
1813 if (RT_UNLIKELY(m == NULL))
1814 goto out;
1815
1816 m_copyback(pData, m, 0, ohdrlen, (caddr_t)so->so_ohdr);
1817 m->m_pkthdr.header = mtod(m, void *);
1818
1819 icmp_error(pData, m, ICMP_UNREACH, code, 0, NULL);
1820 }
1821
1822 out:
1823 tcp_close(pData, tp);
1824}
1825
1826
1827void
1828tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1829{
1830 u_int16_t mss;
1831 int opt, optlen;
1832
1833 LogFlowFunc(("tcp_dooptions: tp = %R[tcpcb793], cnt=%i\n", tp, cnt));
1834
1835 for (; cnt > 0; cnt -= optlen, cp += optlen)
1836 {
1837 opt = cp[0];
1838 if (opt == TCPOPT_EOL)
1839 break;
1840 if (opt == TCPOPT_NOP)
1841 optlen = 1;
1842 else
1843 {
1844 optlen = cp[1];
1845 if (optlen <= 0)
1846 break;
1847 }
1848 switch (opt)
1849 {
1850 default:
1851 continue;
1852
1853 case TCPOPT_MAXSEG:
1854 if (optlen != TCPOLEN_MAXSEG)
1855 continue;
1856 if (!(ti->ti_flags & TH_SYN))
1857 continue;
1858 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1859 NTOHS(mss);
1860 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1861 break;
1862
1863#if 0
1864 case TCPOPT_WINDOW:
1865 if (optlen != TCPOLEN_WINDOW)
1866 continue;
1867 if (!(ti->ti_flags & TH_SYN))
1868 continue;
1869 tp->t_flags |= TF_RCVD_SCALE;
1870 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1871 break;
1872
1873 case TCPOPT_TIMESTAMP:
1874 if (optlen != TCPOLEN_TIMESTAMP)
1875 continue;
1876 *ts_present = 1;
1877 memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1878 NTOHL(*ts_val);
1879 memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1880 NTOHL(*ts_ecr);
1881
1882 /*
1883 * A timestamp received in a SYN makes
1884 * it ok to send timestamp requests and replies.
1885 */
1886 if (ti->ti_flags & TH_SYN)
1887 {
1888 tp->t_flags |= TF_RCVD_TSTMP;
1889 tp->ts_recent = *ts_val;
1890 tp->ts_recent_age = tcp_now;
1891 }
1892 break;
1893#endif
1894 }
1895 }
1896}
1897
1898
1899/*
1900 * Pull out of band byte out of a segment so
1901 * it doesn't appear in the user's data queue.
1902 * It is still reflected in the segment length for
1903 * sequencing purposes.
1904 */
1905
1906#if 0
1907void
1908tcp_pulloutofband(struct socket *so, struct tcpiphdr *ti, struct mbuf *m)
1909{
1910 int cnt = ti->ti_urp - 1;
1911
1912 while (cnt >= 0)
1913 {
1914 if (m->m_len > cnt)
1915 {
1916 char *cp = mtod(m, caddr_t) + cnt;
1917 struct tcpcb *tp = sototcpcb(so);
1918
1919 tp->t_iobc = *cp;
1920 tp->t_oobflags |= TCPOOB_HAVEDATA;
1921 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1922 m->m_len--;
1923 return;
1924 }
1925 cnt -= m->m_len;
1926 m = m->m_next; /* XXX WRONG! Fix it! */
1927 if (m == 0)
1928 break;
1929 }
1930 panic("tcp_pulloutofband");
1931}
1932#endif
1933
1934/*
1935 * Collect new round-trip time estimate
1936 * and update averages and current timeout.
1937 */
1938
1939void
1940tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1941{
1942 register short delta;
1943
1944 LogFlowFunc(("ENTER: tcp_xmit_timer: tp = %R[tcpcb793] rtt = %d\n", tp, rtt));
1945
1946 tcpstat.tcps_rttupdated++;
1947 if (tp->t_srtt != 0)
1948 {
1949 /*
1950 * srtt is stored as fixed point with 3 bits after the
1951 * binary point (i.e., scaled by 8). The following magic
1952 * is equivalent to the smoothing algorithm in rfc793 with
1953 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1954 * point). Adjust rtt to origin 0.
1955 */
1956 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1957 if ((tp->t_srtt += delta) <= 0)
1958 tp->t_srtt = 1;
1959 /*
1960 * We accumulate a smoothed rtt variance (actually, a
1961 * smoothed mean difference), then set the retransmit
1962 * timer to smoothed rtt + 4 times the smoothed variance.
1963 * rttvar is stored as fixed point with 2 bits after the
1964 * binary point (scaled by 4). The following is
1965 * equivalent to rfc793 smoothing with an alpha of .75
1966 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1967 * rfc793's wired-in beta.
1968 */
1969 if (delta < 0)
1970 delta = -delta;
1971 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1972 if ((tp->t_rttvar += delta) <= 0)
1973 tp->t_rttvar = 1;
1974 }
1975 else
1976 {
1977 /*
1978 * No rtt measurement yet - use the unsmoothed rtt.
1979 * Set the variance to half the rtt (so our first
1980 * retransmit happens at 3*rtt).
1981 */
1982 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1983 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1984 }
1985 tp->t_rtt = 0;
1986 tp->t_rxtshift = 0;
1987
1988 /*
1989 * the retransmit should happen at rtt + 4 * rttvar.
1990 * Because of the way we do the smoothing, srtt and rttvar
1991 * will each average +1/2 tick of bias. When we compute
1992 * the retransmit timer, we want 1/2 tick of rounding and
1993 * 1 extra tick because of +-1/2 tick uncertainty in the
1994 * firing of the timer. The bias will give us exactly the
1995 * 1.5 tick we need. But, because the bias is
1996 * statistical, we have to test that we don't drop below
1997 * the minimum feasible timer (which is 2 ticks).
1998 */
1999 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2000 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
2001
2002 /*
2003 * We received an ack for a packet that wasn't retransmitted;
2004 * it is probably safe to discard any error indications we've
2005 * received recently. This isn't quite right, but close enough
2006 * for now (a route might have failed after we sent a segment,
2007 * and the return path might not be symmetrical).
2008 */
2009 tp->t_softerror = 0;
2010}
2011
2012/*
2013 * Determine a reasonable value for maxseg size.
2014 * If the route is known, check route for mtu.
2015 * If none, use an mss that can be handled on the outgoing
2016 * interface without forcing IP to fragment; if bigger than
2017 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2018 * to utilize large mbufs. If no route is found, route has no mtu,
2019 * or the destination isn't local, use a default, hopefully conservative
2020 * size (usually 512 or the default IP max size, but no more than the mtu
2021 * of the interface), as we can't discover anything about intervening
2022 * gateways or networks. We also initialize the congestion/slow start
2023 * window to be a single segment if the destination isn't local.
2024 * While looking at the routing entry, we also initialize other path-dependent
2025 * parameters from pre-set or cached values in the routing entry.
2026 */
2027
2028int
2029tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
2030{
2031 struct socket *so = tp->t_socket;
2032 int mss;
2033
2034 LogFlowFunc(("ENTER: tcp_mss: tp = %R[tcpcb793], offer = %d\n", tp, offer));
2035
2036 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
2037 if (offer)
2038 mss = min(mss, offer);
2039 mss = max(mss, 32);
2040 if (mss < tp->t_maxseg || offer != 0)
2041 tp->t_maxseg = mss;
2042
2043 tp->snd_cwnd = mss;
2044
2045#ifndef VBOX_WITH_SLIRP_BSD_SBUF
2046 sbreserve(pData, &so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
2047 sbreserve(pData, &so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
2048#else
2049 sbuf_new(&so->so_snd, NULL, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0), SBUF_AUTOEXTEND);
2050 sbuf_new(&so->so_rcv, NULL, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0), SBUF_AUTOEXTEND);
2051#endif
2052
2053 Log2((" returning mss = %d\n", mss));
2054
2055 return mss;
2056}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette