VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 14309

Last change on this file since 14309 was 14309, checked in by vboxsync, 16 years ago

#else /* what comes now */

  • Property svn:eol-style set to native
File size: 54.4 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
50
51/* for modulo comparisons of timestamps */
52#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
53#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
54
55#ifndef VBOX_WITH_BSD_TCP_REASS
56/*
57 * Insert segment ti into reassembly queue of tcp with
58 * control block tp. Return TH_FIN if reassembly now includes
59 * a segment with FIN. The macro form does the common case inline
60 * (segment is the next to be received on an established connection,
61 * and the queue is empty), avoiding linkage into and removal
62 * from the queue and repetition of various conversions.
63 * Set DELACK for segments received in order, but ack immediately
64 * when segments are out of order (so fast retransmit can work).
65 */
66#ifdef TCP_ACK_HACK
67#define TCP_REASS(pData, tp, ti, m, so, flags) {\
68 if ((ti)->ti_seq == (tp)->rcv_nxt && \
69 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \
70 (tp)->t_state == TCPS_ESTABLISHED) {\
71 if (ti->ti_flags & TH_PUSH) \
72 tp->t_flags |= TF_ACKNOW; \
73 else \
74 tp->t_flags |= TF_DELACK; \
75 (tp)->rcv_nxt += (ti)->ti_len; \
76 flags = (ti)->ti_flags & TH_FIN; \
77 tcpstat.tcps_rcvpack++;\
78 tcpstat.tcps_rcvbyte += (ti)->ti_len;\
79 if (so->so_emu) { \
80 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \
81 } else \
82 sbappend((pData), (so), (m)); \
83/* sorwakeup(so); */ \
84 } else {\
85 (flags) = tcp_reass((pData), (tp), (ti), (m)); \
86 tp->t_flags |= TF_ACKNOW; \
87 } \
88}
89#else
90#define TCP_REASS(pData, tp, ti, m, so, flags) { \
91 if ((ti)->ti_seq == (tp)->rcv_nxt && \
92 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \
93 (tp)->t_state == TCPS_ESTABLISHED) { \
94 tp->t_flags |= TF_DELACK; \
95 (tp)->rcv_nxt += (ti)->ti_len; \
96 flags = (ti)->ti_flags & TH_FIN; \
97 tcpstat.tcps_rcvpack++;\
98 tcpstat.tcps_rcvbyte += (ti)->ti_len;\
99 if (so->so_emu) { \
100 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \
101 } else \
102 sbappend((pData), (so), (m)); \
103/* sorwakeup(so); */ \
104 } else { \
105 (flags) = tcp_reass((pData), (tp), (ti), (m)); \
106 tp->t_flags |= TF_ACKNOW; \
107 } \
108}
109#endif
110
111int
112tcp_reass(PNATState pData, register struct tcpcb *tp, register struct tcpiphdr *ti, struct mbuf *m)
113{
114 register struct tcpiphdr *q;
115 struct socket *so = tp->t_socket;
116 int flags;
117
118 /*
119 * Call with ti==0 after become established to
120 * force pre-ESTABLISHED data up to user socket.
121 */
122 if (ti == 0)
123 goto present;
124
125 /*
126 * Find a segment which begins after this one does.
127 */
128 for (q = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *); q != (struct tcpiphdr *)tp;
129 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *))
130 if (SEQ_GT(q->ti_seq, ti->ti_seq))
131 break;
132
133 /*
134 * If there is a preceding segment, it may provide some of
135 * our data already. If so, drop the data from the incoming
136 * segment. If it provides all of our data, drop us.
137 */
138 if (u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *) != (struct tcpiphdr *)tp) {
139 register int i;
140 q = u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *);
141 /* conversion to int (in i) handles seq wraparound */
142 i = q->ti_seq + q->ti_len - ti->ti_seq;
143 if (i > 0) {
144 if (i >= ti->ti_len) {
145 tcpstat.tcps_rcvduppack++;
146 tcpstat.tcps_rcvdupbyte += ti->ti_len;
147 m_freem(pData, m);
148 /*
149 * Try to present any queued data
150 * at the left window edge to the user.
151 * This is needed after the 3-WHS
152 * completes.
153 */
154 goto present; /* ??? */
155 }
156 m_adj(m, i);
157 ti->ti_len -= i;
158 ti->ti_seq += i;
159 }
160 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *);
161 }
162 tcpstat.tcps_rcvoopack++;
163 tcpstat.tcps_rcvoobyte += ti->ti_len;
164 REASS_MBUF_SET(ti, m); /* XXX */
165
166 /*
167 * While we overlap succeeding segments trim them or,
168 * if they are completely covered, dequeue them.
169 */
170 while (q != (struct tcpiphdr *)tp) {
171 register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq;
172 if (i <= 0)
173 break;
174 if (i < q->ti_len) {
175 q->ti_seq += i;
176 q->ti_len -= i;
177 m_adj(REASS_MBUF_GET(q), i);
178 break;
179 }
180 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *);
181 m = REASS_MBUF_GET(u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
182 remque_32(pData, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
183 m_freem(pData, m);
184 }
185
186 /*
187 * Stick new segment in its place.
188 */
189 insque_32(pData, ti, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
190
191present:
192 /*
193 * Present data to user, advancing rcv_nxt through
194 * completed sequence space.
195 */
196 if (!TCPS_HAVEESTABLISHED(tp->t_state))
197 return (0);
198 ti = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *);
199 if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt)
200 return (0);
201 if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len)
202 return (0);
203 do {
204 tp->rcv_nxt += ti->ti_len;
205 flags = ti->ti_flags & TH_FIN;
206 remque_32(pData, ti);
207 m = REASS_MBUF_GET(ti); /* XXX */
208 ti = u32_to_ptr(pData, ti->ti_next, struct tcpiphdr *);
209/* if (so->so_state & SS_FCANTRCVMORE) */
210 if (so->so_state & SS_FCANTSENDMORE)
211 m_freem(pData, m);
212 else {
213 if (so->so_emu) {
214 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
215 } else
216 sbappend(pData, so, m);
217 }
218 } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt);
219/* sorwakeup(so); */
220 return (flags);
221}
222
223#else /* VBOX_WITH_BSD_TCP_REASS */
224
225#ifndef TCP_ACK_HACK
226#define DELAY_ACK(tp, ti) \
227 if (ti->ti_flags & TH_PUSH) \
228 tp->t_flags |= TF_ACKNOW; \
229 else \
230 tp->t_flags |= TF_DELACK;
231#else /* !TCP_ACK_HACK */
232#define DELAY_ACK(tp, ign) \
233 tp->t_flags |= TF_DELACK;
234#endif /* TCP_ACK_HACK */
235
236
237/*
238 * deps: netinet/tcp_reass.c
239 * tcp_reass_maxqlen = 48 (deafault)
240 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
241 */
242int
243tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
244{
245 struct tseg_qent *q;
246 struct tseg_qent *p = NULL;
247 struct tseg_qent *nq;
248 struct tseg_qent *te = NULL;
249 struct socket *so = tp->t_socket;
250 int flags;
251
252 /*
253 * XXX: tcp_reass() is rather inefficient with its data structures
254 * and should be rewritten (see NetBSD for optimizations). While
255 * doing that it should move to its own file tcp_reass.c.
256 */
257
258 /*
259 * Call with th==NULL after become established to
260 * force pre-ESTABLISHED data up to user socket.
261 */
262 if (th == NULL)
263 goto present;
264
265 /*
266 * Limit the number of segments in the reassembly queue to prevent
267 * holding on to too many segments (and thus running out of mbufs).
268 * Make sure to let the missing segment through which caused this
269 * queue. Always keep one global queue entry spare to be able to
270 * process the missing segment.
271 */
272 if (th->th_seq != tp->rcv_nxt &&
273 (tcp_reass_qsize + 1 >= tcp_reass_maxseg ||
274 tp->t_segqlen >= tcp_reass_maxqlen)) {
275 tcp_reass_overflows++;
276 tcpstat.tcps_rcvmemdrop++;
277 m_freem(pData, m);
278 *tlenp = 0;
279 return (0);
280 }
281
282 /*
283 * Allocate a new queue entry. If we can't, or hit the zone limit
284 * just drop the pkt.
285 */
286 te = malloc(sizeof(struct tseg_qent));
287 if (te == NULL) {
288 tcpstat.tcps_rcvmemdrop++;
289 m_freem(pData, m);
290 *tlenp = 0;
291 return (0);
292 }
293 tp->t_segqlen++;
294 tcp_reass_qsize++;
295
296 /*
297 * Find a segment which begins after this one does.
298 */
299 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
300 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
301 break;
302 p = q;
303 }
304
305 /*
306 * If there is a preceding segment, it may provide some of
307 * our data already. If so, drop the data from the incoming
308 * segment. If it provides all of our data, drop us.
309 */
310 if (p != NULL) {
311 int i;
312 /* conversion to int (in i) handles seq wraparound */
313 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
314 if (i > 0) {
315 if (i >= *tlenp) {
316 tcpstat.tcps_rcvduppack++;
317 tcpstat.tcps_rcvdupbyte += *tlenp;
318 m_freem(pData, m);
319 free(te);
320 tp->t_segqlen--;
321 tcp_reass_qsize--;
322 /*
323 * Try to present any queued data
324 * at the left window edge to the user.
325 * This is needed after the 3-WHS
326 * completes.
327 */
328 goto present; /* ??? */
329 }
330 m_adj(m, i);
331 *tlenp -= i;
332 th->th_seq += i;
333 }
334 }
335 tcpstat.tcps_rcvoopack++;
336 tcpstat.tcps_rcvoobyte += *tlenp;
337
338 /*
339 * While we overlap succeeding segments trim them or,
340 * if they are completely covered, dequeue them.
341 */
342 while (q) {
343 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
344 if (i <= 0)
345 break;
346 if (i < q->tqe_len) {
347 q->tqe_th->th_seq += i;
348 q->tqe_len -= i;
349 m_adj(q->tqe_m, i);
350 break;
351 }
352
353 nq = LIST_NEXT(q, tqe_q);
354 LIST_REMOVE(q, tqe_q);
355 m_freem(pData, q->tqe_m);
356 free(q);
357 tp->t_segqlen--;
358 tcp_reass_qsize--;
359 q = nq;
360 }
361
362 /* Insert the new segment queue entry into place. */
363 te->tqe_m = m;
364 te->tqe_th = th;
365 te->tqe_len = *tlenp;
366
367 if (p == NULL) {
368 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
369 } else {
370 LIST_INSERT_AFTER(p, te, tqe_q);
371 }
372
373present:
374 /*
375 * Present data to user, advancing rcv_nxt through
376 * completed sequence space.
377 */
378 if (!TCPS_HAVEESTABLISHED(tp->t_state))
379 return (0);
380 q = LIST_FIRST(&tp->t_segq);
381 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
382 return (0);
383 do {
384 tp->rcv_nxt += q->tqe_len;
385 flags = q->tqe_th->th_flags & TH_FIN;
386 nq = LIST_NEXT(q, tqe_q);
387 LIST_REMOVE(q, tqe_q);
388 /* XXX: This place should be checked for the same code in
389 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
390 */
391 if (so->so_state & SS_FCANTSENDMORE)
392 m_freem(pData, q->tqe_m);
393 else
394 sbappend(pData, so, q->tqe_m);
395 free(q);
396 tp->t_segqlen--;
397 tcp_reass_qsize--;
398 q = nq;
399 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
400 return (flags);
401}
402#endif /* VBOX_WITH_BSD_TCP_REASS */
403
404/*
405 * TCP input routine, follows pages 65-76 of the
406 * protocol specification dated September, 1981 very closely.
407 */
408void
409tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
410{
411 struct ip save_ip, *ip;
412 register struct tcpiphdr *ti;
413 caddr_t optp = NULL;
414 int optlen = 0;
415 int len, tlen, off;
416 register struct tcpcb *tp = 0;
417 register int tiflags;
418 struct socket *so = 0;
419 int todrop, acked, ourfinisacked, needoutput = 0;
420/* int dropsocket = 0; */
421 int iss = 0;
422 u_long tiwin;
423 int ret;
424/* int ts_present = 0; */
425 int mbuf_freed = 0;
426
427 DEBUG_CALL("tcp_input");
428 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
429 (long )m, iphlen, (long )inso ));
430
431 /*
432 * If called with m == 0, then we're continuing the connect
433 */
434 if (m == NULL) {
435 so = inso;
436
437 /* Re-set a few variables */
438 tp = sototcpcb(so);
439 m = so->so_m;
440 so->so_m = 0;
441 ti = so->so_ti;
442 tiwin = ti->ti_win;
443 tiflags = ti->ti_flags;
444
445 goto cont_conn;
446 }
447
448
449 tcpstat.tcps_rcvtotal++;
450 /*
451 * Get IP and TCP header together in first mbuf.
452 * Note: IP leaves IP header in first mbuf.
453 */
454 ti = mtod(m, struct tcpiphdr *);
455 if (iphlen > sizeof(struct ip )) {
456 ip_stripoptions(m, (struct mbuf *)0);
457 iphlen=sizeof(struct ip );
458 }
459 /* XXX Check if too short */
460
461
462 /*
463 * Save a copy of the IP header in case we want restore it
464 * for sending an ICMP error message in response.
465 */
466 ip=mtod(m, struct ip *);
467 save_ip = *ip;
468 save_ip.ip_len+= iphlen;
469
470 /*
471 * Checksum extended TCP header and data.
472 */
473 tlen = ((struct ip *)ti)->ip_len;
474 ti->ti_next = ti->ti_prev = 0;
475 ti->ti_x1 = 0;
476 ti->ti_len = htons((u_int16_t)tlen);
477 len = sizeof(struct ip ) + tlen;
478 /* keep checksum for ICMP reply
479 * ti->ti_sum = cksum(m, len);
480 * if (ti->ti_sum) { */
481#ifdef VBOX_WITH_BSD_REASS_CKSUM_HACK
482 if (m->m_sum_recalculate == 0 && ip->ip_sum != 0) {
483#endif /* VBOX_WITH_BSD_REASS_CKSUM_HACK */
484 if(cksum(m, len)) {
485 tcpstat.tcps_rcvbadsum++;
486 goto drop;
487 }
488#ifdef VBOX_WITH_BSD_REASS_CKSUM_HACK
489 }
490#endif /* VBOX_WITH_BSD_REASS_CKSUM_HACK */
491
492 /*
493 * Check that TCP offset makes sense,
494 * pull out TCP options and adjust length. XXX
495 */
496 off = ti->ti_off << 2;
497 if (off < sizeof (struct tcphdr) || off > tlen) {
498 tcpstat.tcps_rcvbadoff++;
499 goto drop;
500 }
501 tlen -= off;
502 ti->ti_len = tlen;
503 if (off > sizeof (struct tcphdr)) {
504 optlen = off - sizeof (struct tcphdr);
505 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
506
507 /*
508 * Do quick retrieval of timestamp options ("options
509 * prediction?"). If timestamp is the only option and it's
510 * formatted as recommended in RFC 1323 appendix A, we
511 * quickly get the values now and not bother calling
512 * tcp_dooptions(), etc.
513 */
514/* if ((optlen == TCPOLEN_TSTAMP_APPA ||
515 * (optlen > TCPOLEN_TSTAMP_APPA &&
516 * optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
517 * *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
518 * (ti->ti_flags & TH_SYN) == 0) {
519 * ts_present = 1;
520 * ts_val = ntohl(*(u_int32_t *)(optp + 4));
521 * ts_ecr = ntohl(*(u_int32_t *)(optp + 8));
522 * optp = NULL; / * we've parsed the options * /
523 * }
524 */
525 }
526 tiflags = ti->ti_flags;
527
528 /*
529 * Convert TCP protocol specific fields to host format.
530 */
531 NTOHL(ti->ti_seq);
532 NTOHL(ti->ti_ack);
533 NTOHS(ti->ti_win);
534 NTOHS(ti->ti_urp);
535
536 /*
537 * Drop TCP, IP headers and TCP options.
538 */
539 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
540 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
541
542 /*
543 * Locate pcb for segment.
544 */
545findso:
546 so = tcp_last_so;
547 if (so->so_fport != ti->ti_dport ||
548 so->so_lport != ti->ti_sport ||
549 so->so_laddr.s_addr != ti->ti_src.s_addr ||
550 so->so_faddr.s_addr != ti->ti_dst.s_addr) {
551 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
552 ti->ti_dst, ti->ti_dport);
553 if (so)
554 tcp_last_so = so;
555 ++tcpstat.tcps_socachemiss;
556 }
557
558 /*
559 * If the state is CLOSED (i.e., TCB does not exist) then
560 * all data in the incoming segment is discarded.
561 * If the TCB exists but is in CLOSED state, it is embryonic,
562 * but should either do a listen or a connect soon.
563 *
564 * state == CLOSED means we've done socreate() but haven't
565 * attached it to a protocol yet...
566 *
567 * XXX If a TCB does not exist, and the TH_SYN flag is
568 * the only flag set, then create a session, mark it
569 * as if it was LISTENING, and continue...
570 */
571 if (so == 0) {
572 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
573 goto dropwithreset;
574
575 if ((so = socreate()) == NULL)
576 goto dropwithreset;
577 if (tcp_attach(pData, so) < 0) {
578 free(so); /* Not sofree (if it failed, it's not insqued) */
579 goto dropwithreset;
580 }
581
582 sbreserve(&so->so_snd, tcp_sndspace);
583 sbreserve(&so->so_rcv, tcp_rcvspace);
584
585 /* tcp_last_so = so; */ /* XXX ? */
586 /* tp = sototcpcb(so); */
587
588 so->so_laddr = ti->ti_src;
589 so->so_lport = ti->ti_sport;
590 so->so_faddr = ti->ti_dst;
591 so->so_fport = ti->ti_dport;
592
593 if ((so->so_iptos = tcp_tos(so)) == 0)
594 so->so_iptos = ((struct ip *)ti)->ip_tos;
595
596 tp = sototcpcb(so);
597 tp->t_state = TCPS_LISTEN;
598 }
599
600 /*
601 * If this is a still-connecting socket, this probably
602 * a retransmit of the SYN. Whether it's a retransmit SYN
603 * or something else, we nuke it.
604 */
605 if (so->so_state & SS_ISFCONNECTING)
606 goto drop;
607
608 tp = sototcpcb(so);
609
610 /* XXX Should never fail */
611 if (tp == 0)
612 goto dropwithreset;
613 if (tp->t_state == TCPS_CLOSED)
614 goto drop;
615
616 /* Unscale the window into a 32-bit value. */
617/* if ((tiflags & TH_SYN) == 0)
618 * tiwin = ti->ti_win << tp->snd_scale;
619 * else
620 */
621 tiwin = ti->ti_win;
622
623 /*
624 * Segment received on connection.
625 * Reset idle time and keep-alive timer.
626 */
627 tp->t_idle = 0;
628 if (so_options)
629 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
630 else
631 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
632
633 /*
634 * Process options if not in LISTEN state,
635 * else do it below (after getting remote address).
636 */
637 if (optp && tp->t_state != TCPS_LISTEN)
638 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
639/* , */
640/* &ts_present, &ts_val, &ts_ecr); */
641
642 /*
643 * Header prediction: check for the two common cases
644 * of a uni-directional data xfer. If the packet has
645 * no control flags, is in-sequence, the window didn't
646 * change and we're not retransmitting, it's a
647 * candidate. If the length is zero and the ack moved
648 * forward, we're the sender side of the xfer. Just
649 * free the data acked & wake any higher level process
650 * that was blocked waiting for space. If the length
651 * is non-zero and the ack didn't move, we're the
652 * receiver side. If we're getting packets in-order
653 * (the reassembly queue is empty), add the data to
654 * the socket buffer and note that we need a delayed ack.
655 *
656 * XXX Some of these tests are not needed
657 * eg: the tiwin == tp->snd_wnd prevents many more
658 * predictions.. with no *real* advantage..
659 */
660 if (tp->t_state == TCPS_ESTABLISHED &&
661 (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
662/* (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) && */
663 ti->ti_seq == tp->rcv_nxt &&
664 tiwin && tiwin == tp->snd_wnd &&
665 tp->snd_nxt == tp->snd_max) {
666 /*
667 * If last ACK falls within this segment's sequence numbers,
668 * record the timestamp.
669 */
670/* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
671 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len)) {
672 * tp->ts_recent_age = tcp_now;
673 * tp->ts_recent = ts_val;
674 * }
675 */
676 if (ti->ti_len == 0) {
677 if (SEQ_GT(ti->ti_ack, tp->snd_una) &&
678 SEQ_LEQ(ti->ti_ack, tp->snd_max) &&
679 tp->snd_cwnd >= tp->snd_wnd) {
680 /*
681 * this is a pure ack for outstanding data.
682 */
683 ++tcpstat.tcps_predack;
684/* if (ts_present)
685 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
686 * else
687 */ if (tp->t_rtt &&
688 SEQ_GT(ti->ti_ack, tp->t_rtseq))
689 tcp_xmit_timer(pData, tp, tp->t_rtt);
690 acked = ti->ti_ack - tp->snd_una;
691 tcpstat.tcps_rcvackpack++;
692 tcpstat.tcps_rcvackbyte += acked;
693 sbdrop(&so->so_snd, acked);
694 tp->snd_una = ti->ti_ack;
695 m_freem(pData, m);
696
697 /*
698 * If all outstanding data are acked, stop
699 * retransmit timer, otherwise restart timer
700 * using current (possibly backed-off) value.
701 * If process is waiting for space,
702 * wakeup/selwakeup/signal. If data
703 * are ready to send, let tcp_output
704 * decide between more output or persist.
705 */
706 if (tp->snd_una == tp->snd_max)
707 tp->t_timer[TCPT_REXMT] = 0;
708 else if (tp->t_timer[TCPT_PERSIST] == 0)
709 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
710
711 /*
712 * There's room in so_snd, sowwakup will read()
713 * from the socket if we can
714 */
715/* if (so->so_snd.sb_flags & SB_NOTIFY)
716 * sowwakeup(so);
717 */
718 /*
719 * This is called because sowwakeup might have
720 * put data into so_snd. Since we don't so sowwakeup,
721 * we don't need this.. XXX???
722 */
723 if (so->so_snd.sb_cc)
724 (void) tcp_output(pData, tp);
725
726 return;
727 }
728 } else if (ti->ti_ack == tp->snd_una &&
729#ifndef VBOX_WITH_BSD_TCP_REASS
730 u32_to_ptr(pData, tp->seg_next, struct tcpcb *) == tp &&
731#else /* VBOX_WITH_BSD_TCP_REASS */
732 LIST_FIRST(&tp->t_segq) &&
733#endif /* VBOX_WITH_BSD_TCP_REASS */
734 ti->ti_len <= sbspace(&so->so_rcv)) {
735 /*
736 * this is a pure, in-sequence data packet
737 * with nothing on the reassembly queue and
738 * we have enough buffer space to take it.
739 */
740 ++tcpstat.tcps_preddat;
741 tp->rcv_nxt += ti->ti_len;
742 tcpstat.tcps_rcvpack++;
743 tcpstat.tcps_rcvbyte += ti->ti_len;
744 /*
745 * Add data to socket buffer.
746 */
747 if (so->so_emu) {
748 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
749 } else
750 sbappend(pData, so, m);
751
752 /*
753 * XXX This is called when data arrives. Later, check
754 * if we can actually write() to the socket
755 * XXX Need to check? It's be NON_BLOCKING
756 */
757/* sorwakeup(so); */
758
759 /*
760 * If this is a short packet, then ACK now - with Nagel
761 * congestion avoidance sender won't send more until
762 * he gets an ACK.
763 *
764 * It is better to not delay acks at all to maximize
765 * TCP throughput. See RFC 2581.
766 */
767 tp->t_flags |= TF_ACKNOW;
768 tcp_output(pData, tp);
769 return;
770 }
771 } /* header prediction */
772 /*
773 * Calculate amount of space in receive window,
774 * and then do TCP input processing.
775 * Receive window is amount of space in rcv queue,
776 * but not less than advertised window.
777 */
778 { int win;
779 win = sbspace(&so->so_rcv);
780 if (win < 0)
781 win = 0;
782 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
783 }
784
785 switch (tp->t_state) {
786
787 /*
788 * If the state is LISTEN then ignore segment if it contains an RST.
789 * If the segment contains an ACK then it is bad and send a RST.
790 * If it does not contain a SYN then it is not interesting; drop it.
791 * Don't bother responding if the destination was a broadcast.
792 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
793 * tp->iss, and send a segment:
794 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
795 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
796 * Fill in remote peer address fields if not previously specified.
797 * Enter SYN_RECEIVED state, and process any other fields of this
798 * segment in this state.
799 */
800 case TCPS_LISTEN: {
801
802 if (tiflags & TH_RST)
803 goto drop;
804 if (tiflags & TH_ACK)
805 goto dropwithreset;
806 if ((tiflags & TH_SYN) == 0)
807 goto drop;
808
809 /*
810 * This has way too many gotos...
811 * But a bit of spaghetti code never hurt anybody :)
812 */
813
814 /*
815 * If this is destined for the control address, then flag to
816 * tcp_ctl once connected, otherwise connect
817 */
818 if ((so->so_faddr.s_addr&htonl(pData->netmask)) == special_addr.s_addr) {
819 int lastbyte=ntohl(so->so_faddr.s_addr) & ~pData->netmask;
820 if (lastbyte!=CTL_ALIAS && lastbyte!=CTL_DNS) {
821#if 0
822 if(lastbyte==CTL_CMD || lastbyte==CTL_EXEC) {
823 /* Command or exec adress */
824 so->so_state |= SS_CTL;
825 } else
826#endif
827 {
828 /* May be an add exec */
829 struct ex_list *ex_ptr;
830 for(ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
831 if(ex_ptr->ex_fport == so->so_fport &&
832 lastbyte == ex_ptr->ex_addr) {
833 so->so_state |= SS_CTL;
834 break;
835 }
836 }
837 }
838 if(so->so_state & SS_CTL) goto cont_input;
839 }
840 /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */
841 }
842
843 if (so->so_emu & EMU_NOCONNECT) {
844 so->so_emu &= ~EMU_NOCONNECT;
845 goto cont_input;
846 }
847
848 if((tcp_fconnect(pData, so) == -1) && (errno != EINPROGRESS) && (errno != EWOULDBLOCK)) {
849 u_char code=ICMP_UNREACH_NET;
850 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
851 errno,strerror(errno)));
852 if(errno == ECONNREFUSED) {
853 /* ACK the SYN, send RST to refuse the connection */
854 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
855 TH_RST|TH_ACK);
856 } else {
857 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST;
858 HTONL(ti->ti_seq); /* restore tcp header */
859 HTONL(ti->ti_ack);
860 HTONS(ti->ti_win);
861 HTONS(ti->ti_urp);
862 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
863 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
864 *ip=save_ip;
865 icmp_error(pData, m, ICMP_UNREACH,code, 0,strerror(errno));
866 }
867 tp = tcp_close(pData, tp);
868 m_free(pData, m);
869 } else {
870 /*
871 * Haven't connected yet, save the current mbuf
872 * and ti, and return
873 * XXX Some OS's don't tell us whether the connect()
874 * succeeded or not. So we must time it out.
875 */
876 so->so_m = m;
877 so->so_ti = ti;
878 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
879 tp->t_state = TCPS_SYN_RECEIVED;
880 }
881 return;
882
883 cont_conn:
884 /* m==NULL
885 * Check if the connect succeeded
886 */
887 if (so->so_state & SS_NOFDREF) {
888 tp = tcp_close(pData, tp);
889 goto dropwithreset;
890 }
891 cont_input:
892 tcp_template(tp);
893
894 if (optp)
895 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
896 /* , */
897 /* &ts_present, &ts_val, &ts_ecr); */
898
899 if (iss)
900 tp->iss = iss;
901 else
902 tp->iss = tcp_iss;
903 tcp_iss += TCP_ISSINCR/2;
904 tp->irs = ti->ti_seq;
905 tcp_sendseqinit(tp);
906 tcp_rcvseqinit(tp);
907 tp->t_flags |= TF_ACKNOW;
908 tp->t_state = TCPS_SYN_RECEIVED;
909 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
910 tcpstat.tcps_accepts++;
911 goto trimthenstep6;
912 } /* case TCPS_LISTEN */
913
914 /*
915 * If the state is SYN_SENT:
916 * if seg contains an ACK, but not for our SYN, drop the input.
917 * if seg contains a RST, then drop the connection.
918 * if seg does not contain SYN, then drop it.
919 * Otherwise this is an acceptable SYN segment
920 * initialize tp->rcv_nxt and tp->irs
921 * if seg contains ack then advance tp->snd_una
922 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
923 * arrange for segment to be acked (eventually)
924 * continue processing rest of data/controls, beginning with URG
925 */
926 case TCPS_SYN_SENT:
927 if ((tiflags & TH_ACK) &&
928 (SEQ_LEQ(ti->ti_ack, tp->iss) ||
929 SEQ_GT(ti->ti_ack, tp->snd_max)))
930 goto dropwithreset;
931
932 if (tiflags & TH_RST) {
933 if (tiflags & TH_ACK)
934 tp = tcp_drop(pData, tp,0); /* XXX Check t_softerror! */
935 goto drop;
936 }
937
938 if ((tiflags & TH_SYN) == 0)
939 goto drop;
940 if (tiflags & TH_ACK) {
941 tp->snd_una = ti->ti_ack;
942 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
943 tp->snd_nxt = tp->snd_una;
944 }
945
946 tp->t_timer[TCPT_REXMT] = 0;
947 tp->irs = ti->ti_seq;
948 tcp_rcvseqinit(tp);
949 tp->t_flags |= TF_ACKNOW;
950 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
951 tcpstat.tcps_connects++;
952 soisfconnected(so);
953 tp->t_state = TCPS_ESTABLISHED;
954
955 /* Do window scaling on this connection? */
956/* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
957 * (TF_RCVD_SCALE|TF_REQ_SCALE)) {
958 * tp->snd_scale = tp->requested_s_scale;
959 * tp->rcv_scale = tp->request_r_scale;
960 * }
961 */
962#ifndef VBOX_WITH_BSD_TCP_REASS
963 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0,
964 (struct mbuf *)0);
965#else /* VBOX_WITH_BSD_TCP_REASS */
966 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
967#endif /* VBOX_WITH_BSD_TCP_REASS */
968 /*
969 * if we didn't have to retransmit the SYN,
970 * use its rtt as our initial srtt & rtt var.
971 */
972 if (tp->t_rtt)
973 tcp_xmit_timer(pData, tp, tp->t_rtt);
974 } else
975 tp->t_state = TCPS_SYN_RECEIVED;
976
977trimthenstep6:
978 /*
979 * Advance ti->ti_seq to correspond to first data byte.
980 * If data, trim to stay within window,
981 * dropping FIN if necessary.
982 */
983 ti->ti_seq++;
984 if (ti->ti_len > tp->rcv_wnd) {
985 todrop = ti->ti_len - tp->rcv_wnd;
986 m_adj(m, -todrop);
987 ti->ti_len = tp->rcv_wnd;
988 tiflags &= ~TH_FIN;
989 tcpstat.tcps_rcvpackafterwin++;
990 tcpstat.tcps_rcvbyteafterwin += todrop;
991 }
992 tp->snd_wl1 = ti->ti_seq - 1;
993 tp->rcv_up = ti->ti_seq;
994 goto step6;
995 } /* switch tp->t_state */
996 /*
997 * States other than LISTEN or SYN_SENT.
998 * First check timestamp, if present.
999 * Then check that at least some bytes of segment are within
1000 * receive window. If segment begins before rcv_nxt,
1001 * drop leading data (and SYN); if nothing left, just ack.
1002 *
1003 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1004 * and it's less than ts_recent, drop it.
1005 */
1006/* if (ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent &&
1007 * TSTMP_LT(ts_val, tp->ts_recent)) {
1008 *
1009 */ /* Check to see if ts_recent is over 24 days old. */
1010/* if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1011 */ /*
1012 * * Invalidate ts_recent. If this segment updates
1013 * * ts_recent, the age will be reset later and ts_recent
1014 * * will get a valid value. If it does not, setting
1015 * * ts_recent to zero will at least satisfy the
1016 * * requirement that zero be placed in the timestamp
1017 * * echo reply when ts_recent isn't valid. The
1018 * * age isn't reset until we get a valid ts_recent
1019 * * because we don't want out-of-order segments to be
1020 * * dropped when ts_recent is old.
1021 * */
1022/* tp->ts_recent = 0;
1023 * } else {
1024 * tcpstat.tcps_rcvduppack++;
1025 * tcpstat.tcps_rcvdupbyte += ti->ti_len;
1026 * tcpstat.tcps_pawsdrop++;
1027 * goto dropafterack;
1028 * }
1029 * }
1030 */
1031
1032 todrop = tp->rcv_nxt - ti->ti_seq;
1033 if (todrop > 0) {
1034 if (tiflags & TH_SYN) {
1035 tiflags &= ~TH_SYN;
1036 ti->ti_seq++;
1037 if (ti->ti_urp > 1)
1038 ti->ti_urp--;
1039 else
1040 tiflags &= ~TH_URG;
1041 todrop--;
1042 }
1043 /*
1044 * Following if statement from Stevens, vol. 2, p. 960.
1045 */
1046 if (todrop > ti->ti_len
1047 || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) {
1048 /*
1049 * Any valid FIN must be to the left of the window.
1050 * At this point the FIN must be a duplicate or out
1051 * of sequence; drop it.
1052 */
1053 tiflags &= ~TH_FIN;
1054
1055 /*
1056 * Send an ACK to resynchronize and drop any data.
1057 * But keep on processing for RST or ACK.
1058 */
1059 tp->t_flags |= TF_ACKNOW;
1060 todrop = ti->ti_len;
1061 tcpstat.tcps_rcvduppack++;
1062 tcpstat.tcps_rcvdupbyte += todrop;
1063 } else {
1064 tcpstat.tcps_rcvpartduppack++;
1065 tcpstat.tcps_rcvpartdupbyte += todrop;
1066 }
1067 m_adj(m, todrop);
1068 ti->ti_seq += todrop;
1069 ti->ti_len -= todrop;
1070 if (ti->ti_urp > todrop)
1071 ti->ti_urp -= todrop;
1072 else {
1073 tiflags &= ~TH_URG;
1074 ti->ti_urp = 0;
1075 }
1076 }
1077 /*
1078 * If new data are received on a connection after the
1079 * user processes are gone, then RST the other end.
1080 */
1081 if ((so->so_state & SS_NOFDREF) &&
1082 tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) {
1083 tp = tcp_close(pData, tp);
1084 tcpstat.tcps_rcvafterclose++;
1085 goto dropwithreset;
1086 }
1087
1088 /*
1089 * If segment ends after window, drop trailing data
1090 * (and PUSH and FIN); if nothing left, just ACK.
1091 */
1092 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
1093 if (todrop > 0) {
1094 tcpstat.tcps_rcvpackafterwin++;
1095 if (todrop >= ti->ti_len) {
1096 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
1097 /*
1098 * If a new connection request is received
1099 * while in TIME_WAIT, drop the old connection
1100 * and start over if the sequence numbers
1101 * are above the previous ones.
1102 */
1103 if (tiflags & TH_SYN &&
1104 tp->t_state == TCPS_TIME_WAIT &&
1105 SEQ_GT(ti->ti_seq, tp->rcv_nxt)) {
1106 iss = tp->rcv_nxt + TCP_ISSINCR;
1107 tp = tcp_close(pData, tp);
1108 goto findso;
1109 }
1110 /*
1111 * If window is closed can only take segments at
1112 * window edge, and have to drop data and PUSH from
1113 * incoming segments. Continue processing, but
1114 * remember to ack. Otherwise, drop segment
1115 * and ack.
1116 */
1117 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) {
1118 tp->t_flags |= TF_ACKNOW;
1119 tcpstat.tcps_rcvwinprobe++;
1120 } else
1121 goto dropafterack;
1122 } else
1123 tcpstat.tcps_rcvbyteafterwin += todrop;
1124 m_adj(m, -todrop);
1125 ti->ti_len -= todrop;
1126 tiflags &= ~(TH_PUSH|TH_FIN);
1127 }
1128
1129 /*
1130 * If last ACK falls within this segment's sequence numbers,
1131 * record its timestamp.
1132 */
1133/* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
1134 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len +
1135 * ((tiflags & (TH_SYN|TH_FIN)) != 0))) {
1136 * tp->ts_recent_age = tcp_now;
1137 * tp->ts_recent = ts_val;
1138 * }
1139 */
1140
1141 /*
1142 * If the RST bit is set examine the state:
1143 * SYN_RECEIVED STATE:
1144 * If passive open, return to LISTEN state.
1145 * If active open, inform user that connection was refused.
1146 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1147 * Inform user that connection was reset, and close tcb.
1148 * CLOSING, LAST_ACK, TIME_WAIT STATES
1149 * Close the tcb.
1150 */
1151 if (tiflags&TH_RST) switch (tp->t_state) {
1152
1153 case TCPS_SYN_RECEIVED:
1154/* so->so_error = ECONNREFUSED; */
1155 goto close;
1156
1157 case TCPS_ESTABLISHED:
1158 case TCPS_FIN_WAIT_1:
1159 case TCPS_FIN_WAIT_2:
1160 case TCPS_CLOSE_WAIT:
1161/* so->so_error = ECONNRESET; */
1162 close:
1163 tp->t_state = TCPS_CLOSED;
1164 tcpstat.tcps_drops++;
1165 tp = tcp_close(pData, tp);
1166 goto drop;
1167
1168 case TCPS_CLOSING:
1169 case TCPS_LAST_ACK:
1170 case TCPS_TIME_WAIT:
1171 tp = tcp_close(pData, tp);
1172 goto drop;
1173 }
1174
1175 /*
1176 * If a SYN is in the window, then this is an
1177 * error and we send an RST and drop the connection.
1178 */
1179 if (tiflags & TH_SYN) {
1180 tp = tcp_drop(pData, tp,0);
1181 goto dropwithreset;
1182 }
1183
1184 /*
1185 * If the ACK bit is off we drop the segment and return.
1186 */
1187 if ((tiflags & TH_ACK) == 0) goto drop;
1188
1189 /*
1190 * Ack processing.
1191 */
1192 switch (tp->t_state) {
1193 /*
1194 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1195 * ESTABLISHED state and continue processing, otherwise
1196 * send an RST. una<=ack<=max
1197 */
1198 case TCPS_SYN_RECEIVED:
1199
1200 if (SEQ_GT(tp->snd_una, ti->ti_ack) ||
1201 SEQ_GT(ti->ti_ack, tp->snd_max))
1202 goto dropwithreset;
1203 tcpstat.tcps_connects++;
1204 tp->t_state = TCPS_ESTABLISHED;
1205 /*
1206 * The sent SYN is ack'ed with our sequence number +1
1207 * The first data byte already in the buffer will get
1208 * lost if no correction is made. This is only needed for
1209 * SS_CTL since the buffer is empty otherwise.
1210 * tp->snd_una++; or:
1211 */
1212 tp->snd_una=ti->ti_ack;
1213 if (so->so_state & SS_CTL) {
1214 /* So tcp_ctl reports the right state */
1215 ret = tcp_ctl(pData, so);
1216 if (ret == 1) {
1217 soisfconnected(so);
1218 so->so_state &= ~SS_CTL; /* success XXX */
1219 } else if (ret == 2) {
1220 so->so_state = SS_NOFDREF; /* CTL_CMD */
1221 } else {
1222 needoutput = 1;
1223 tp->t_state = TCPS_FIN_WAIT_1;
1224 }
1225 } else {
1226 soisfconnected(so);
1227 }
1228
1229 /* Do window scaling? */
1230/* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1231 * (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1232 * tp->snd_scale = tp->requested_s_scale;
1233 * tp->rcv_scale = tp->request_r_scale;
1234 * }
1235 */
1236#ifndef VBOX_WITH_BSD_TCP_REASS
1237 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0, (struct mbuf *)0);
1238#else /* VBOX_WITH_BSD_TCP_REASS */
1239 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1240#endif /*VBOX_WITH_BSD_TCP_REASS*/
1241 tp->snd_wl1 = ti->ti_seq - 1;
1242 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1243 goto synrx_to_est;
1244 /* fall into ... */
1245
1246 /*
1247 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1248 * ACKs. If the ack is in the range
1249 * tp->snd_una < ti->ti_ack <= tp->snd_max
1250 * then advance tp->snd_una to ti->ti_ack and drop
1251 * data from the retransmission queue. If this ACK reflects
1252 * more up to date window information we update our window information.
1253 */
1254 case TCPS_ESTABLISHED:
1255 case TCPS_FIN_WAIT_1:
1256 case TCPS_FIN_WAIT_2:
1257 case TCPS_CLOSE_WAIT:
1258 case TCPS_CLOSING:
1259 case TCPS_LAST_ACK:
1260 case TCPS_TIME_WAIT:
1261
1262 if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) {
1263 if (ti->ti_len == 0 && tiwin == tp->snd_wnd) {
1264 tcpstat.tcps_rcvdupack++;
1265 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
1266 (long )m, (long )so));
1267 /*
1268 * If we have outstanding data (other than
1269 * a window probe), this is a completely
1270 * duplicate ack (ie, window info didn't
1271 * change), the ack is the biggest we've
1272 * seen and we've seen exactly our rexmt
1273 * threshold of them, assume a packet
1274 * has been dropped and retransmit it.
1275 * Kludge snd_nxt & the congestion
1276 * window so we send only this one
1277 * packet.
1278 *
1279 * We know we're losing at the current
1280 * window size so do congestion avoidance
1281 * (set ssthresh to half the current window
1282 * and pull our congestion window back to
1283 * the new ssthresh).
1284 *
1285 * Dup acks mean that packets have left the
1286 * network (they're now cached at the receiver)
1287 * so bump cwnd by the amount in the receiver
1288 * to keep a constant cwnd packets in the
1289 * network.
1290 */
1291 if (tp->t_timer[TCPT_REXMT] == 0 ||
1292 ti->ti_ack != tp->snd_una)
1293 tp->t_dupacks = 0;
1294 else if (++tp->t_dupacks == tcprexmtthresh) {
1295 tcp_seq onxt = tp->snd_nxt;
1296 u_int win =
1297 min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1298 tp->t_maxseg;
1299
1300 if (win < 2)
1301 win = 2;
1302 tp->snd_ssthresh = win * tp->t_maxseg;
1303 tp->t_timer[TCPT_REXMT] = 0;
1304 tp->t_rtt = 0;
1305 tp->snd_nxt = ti->ti_ack;
1306 tp->snd_cwnd = tp->t_maxseg;
1307 (void) tcp_output(pData, tp);
1308 tp->snd_cwnd = tp->snd_ssthresh +
1309 tp->t_maxseg * tp->t_dupacks;
1310 if (SEQ_GT(onxt, tp->snd_nxt))
1311 tp->snd_nxt = onxt;
1312 goto drop;
1313 } else if (tp->t_dupacks > tcprexmtthresh) {
1314 tp->snd_cwnd += tp->t_maxseg;
1315 (void) tcp_output(pData, tp);
1316 goto drop;
1317 }
1318 } else
1319 tp->t_dupacks = 0;
1320 break;
1321 }
1322 synrx_to_est:
1323 /*
1324 * If the congestion window was inflated to account
1325 * for the other side's cached packets, retract it.
1326 */
1327 if (tp->t_dupacks > tcprexmtthresh &&
1328 tp->snd_cwnd > tp->snd_ssthresh)
1329 tp->snd_cwnd = tp->snd_ssthresh;
1330 tp->t_dupacks = 0;
1331 if (SEQ_GT(ti->ti_ack, tp->snd_max)) {
1332 tcpstat.tcps_rcvacktoomuch++;
1333 goto dropafterack;
1334 }
1335 acked = ti->ti_ack - tp->snd_una;
1336 tcpstat.tcps_rcvackpack++;
1337 tcpstat.tcps_rcvackbyte += acked;
1338
1339 /*
1340 * If we have a timestamp reply, update smoothed
1341 * round trip time. If no timestamp is present but
1342 * transmit timer is running and timed sequence
1343 * number was acked, update smoothed round trip time.
1344 * Since we now have an rtt measurement, cancel the
1345 * timer backoff (cf., Phil Karn's retransmit alg.).
1346 * Recompute the initial retransmit timer.
1347 */
1348/* if (ts_present)
1349 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1350 * else
1351 */
1352 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1353 tcp_xmit_timer(pData, tp,tp->t_rtt);
1354
1355 /*
1356 * If all outstanding data is acked, stop retransmit
1357 * timer and remember to restart (more output or persist).
1358 * If there is more data to be acked, restart retransmit
1359 * timer, using current (possibly backed-off) value.
1360 */
1361 if (ti->ti_ack == tp->snd_max) {
1362 tp->t_timer[TCPT_REXMT] = 0;
1363 needoutput = 1;
1364 } else if (tp->t_timer[TCPT_PERSIST] == 0)
1365 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1366 /*
1367 * When new data is acked, open the congestion window.
1368 * If the window gives us less than ssthresh packets
1369 * in flight, open exponentially (maxseg per packet).
1370 * Otherwise open linearly: maxseg per window
1371 * (maxseg^2 / cwnd per packet).
1372 */
1373 {
1374 register u_int cw = tp->snd_cwnd;
1375 register u_int incr = tp->t_maxseg;
1376
1377 if (cw > tp->snd_ssthresh)
1378 incr = incr * incr / cw;
1379 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1380 }
1381 if (acked > so->so_snd.sb_cc) {
1382 tp->snd_wnd -= so->so_snd.sb_cc;
1383 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1384 ourfinisacked = 1;
1385 } else {
1386 sbdrop(&so->so_snd, acked);
1387 tp->snd_wnd -= acked;
1388 ourfinisacked = 0;
1389 }
1390 /*
1391 * XXX sowwakup is called when data is acked and there's room for
1392 * for more data... it should read() the socket
1393 */
1394/* if (so->so_snd.sb_flags & SB_NOTIFY)
1395 * sowwakeup(so);
1396 */
1397 tp->snd_una = ti->ti_ack;
1398 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1399 tp->snd_nxt = tp->snd_una;
1400
1401 switch (tp->t_state) {
1402
1403 /*
1404 * In FIN_WAIT_1 STATE in addition to the processing
1405 * for the ESTABLISHED state if our FIN is now acknowledged
1406 * then enter FIN_WAIT_2.
1407 */
1408 case TCPS_FIN_WAIT_1:
1409 if (ourfinisacked) {
1410 /*
1411 * If we can't receive any more
1412 * data, then closing user can proceed.
1413 * Starting the timer is contrary to the
1414 * specification, but if we don't get a FIN
1415 * we'll hang forever.
1416 */
1417 if (so->so_state & SS_FCANTRCVMORE) {
1418 soisfdisconnected(so);
1419 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1420 }
1421 tp->t_state = TCPS_FIN_WAIT_2;
1422 }
1423 break;
1424
1425 /*
1426 * In CLOSING STATE in addition to the processing for
1427 * the ESTABLISHED state if the ACK acknowledges our FIN
1428 * then enter the TIME-WAIT state, otherwise ignore
1429 * the segment.
1430 */
1431 case TCPS_CLOSING:
1432 if (ourfinisacked) {
1433 tp->t_state = TCPS_TIME_WAIT;
1434 tcp_canceltimers(tp);
1435 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1436 soisfdisconnected(so);
1437 }
1438 break;
1439
1440 /*
1441 * In LAST_ACK, we may still be waiting for data to drain
1442 * and/or to be acked, as well as for the ack of our FIN.
1443 * If our FIN is now acknowledged, delete the TCB,
1444 * enter the closed state and return.
1445 */
1446 case TCPS_LAST_ACK:
1447 if (ourfinisacked) {
1448 tp = tcp_close(pData, tp);
1449 goto drop;
1450 }
1451 break;
1452
1453 /*
1454 * In TIME_WAIT state the only thing that should arrive
1455 * is a retransmission of the remote FIN. Acknowledge
1456 * it and restart the finack timer.
1457 */
1458 case TCPS_TIME_WAIT:
1459 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1460 goto dropafterack;
1461 }
1462 } /* switch(tp->t_state) */
1463
1464step6:
1465 /*
1466 * Update window information.
1467 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1468 */
1469 if ((tiflags & TH_ACK) &&
1470 (SEQ_LT(tp->snd_wl1, ti->ti_seq) ||
1471 (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) ||
1472 (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) {
1473 /* keep track of pure window updates */
1474 if (ti->ti_len == 0 &&
1475 tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd)
1476 tcpstat.tcps_rcvwinupd++;
1477 tp->snd_wnd = tiwin;
1478 tp->snd_wl1 = ti->ti_seq;
1479 tp->snd_wl2 = ti->ti_ack;
1480 if (tp->snd_wnd > tp->max_sndwnd)
1481 tp->max_sndwnd = tp->snd_wnd;
1482 needoutput = 1;
1483 }
1484
1485 /*
1486 * Process segments with URG.
1487 */
1488 if ((tiflags & TH_URG) && ti->ti_urp &&
1489 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1490 /*
1491 * This is a kludge, but if we receive and accept
1492 * random urgent pointers, we'll crash in
1493 * soreceive. It's hard to imagine someone
1494 * actually wanting to send this much urgent data.
1495 */
1496 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) {
1497 ti->ti_urp = 0;
1498 tiflags &= ~TH_URG;
1499 goto dodata;
1500 }
1501 /*
1502 * If this segment advances the known urgent pointer,
1503 * then mark the data stream. This should not happen
1504 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1505 * a FIN has been received from the remote side.
1506 * In these states we ignore the URG.
1507 *
1508 * According to RFC961 (Assigned Protocols),
1509 * the urgent pointer points to the last octet
1510 * of urgent data. We continue, however,
1511 * to consider it to indicate the first octet
1512 * of data past the urgent section as the original
1513 * spec states (in one of two places).
1514 */
1515 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) {
1516 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1517 so->so_urgc = so->so_rcv.sb_cc +
1518 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1519 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1520
1521 }
1522 } else
1523 /*
1524 * If no out of band data is expected,
1525 * pull receive urgent pointer along
1526 * with the receive window.
1527 */
1528 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1529 tp->rcv_up = tp->rcv_nxt;
1530dodata:
1531
1532 /*
1533 * Process the segment text, merging it into the TCP sequencing queue,
1534 * and arranging for acknowledgment of receipt if necessary.
1535 * This process logically involves adjusting tp->rcv_wnd as data
1536 * is presented to the user (this happens in tcp_usrreq.c,
1537 * case PRU_RCVD). If a FIN has already been received on this
1538 * connection then we just ignore the text.
1539 */
1540 if ((ti->ti_len || (tiflags&TH_FIN)) &&
1541 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1542#ifndef VBOX_WITH_BSD_TCP_REASS
1543 TCP_REASS(pData, tp, ti, m, so, tiflags);
1544#else /* VBOX_WITH_BSD_TCP_REASS */
1545 if (ti->ti_seq == tp->rcv_nxt
1546 && LIST_EMPTY(&tp->t_segq)
1547 && tp->t_state == TCPS_ESTABLISHED) {
1548 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1549 tp->rcv_nxt += tlen;
1550 tiflags = ti->ti_t.th_flags & TH_FIN;
1551 tcpstat.tcps_rcvpack++;
1552 tcpstat.tcps_rcvbyte += tlen;
1553 if (so->so_state & SS_FCANTRCVMORE)
1554 m_freem(pData, m);
1555 else
1556 sbappend(pData, so, m);
1557 }
1558 else {
1559 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1560 tiflags |= TF_ACKNOW;
1561 }
1562#endif /* VBOX_WITH_BSD_TCP_REASS */
1563 /*
1564 * Note the amount of data that peer has sent into
1565 * our window, in order to estimate the sender's
1566 * buffer size.
1567 */
1568 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1569 } else {
1570 mbuf_freed = 1; /* The mbuf must be freed, but only when its content is not needed anymore. */
1571 tiflags &= ~TH_FIN;
1572 }
1573
1574 /*
1575 * If FIN is received ACK the FIN and let the user know
1576 * that the connection is closing.
1577 */
1578 if (tiflags & TH_FIN) {
1579 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1580 /*
1581 * If we receive a FIN we can't send more data,
1582 * set it SS_FDRAIN
1583 * Shutdown the socket if there is no rx data in the
1584 * buffer.
1585 * soread() is called on completion of shutdown() and
1586 * will got to TCPS_LAST_ACK, and use tcp_output()
1587 * to send the FIN.
1588 */
1589/* sofcantrcvmore(so); */
1590 sofwdrain(so);
1591
1592 tp->t_flags |= TF_ACKNOW;
1593 tp->rcv_nxt++;
1594 }
1595 switch (tp->t_state) {
1596
1597 /*
1598 * In SYN_RECEIVED and ESTABLISHED STATES
1599 * enter the CLOSE_WAIT state.
1600 */
1601 case TCPS_SYN_RECEIVED:
1602 case TCPS_ESTABLISHED:
1603 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1604 tp->t_state = TCPS_LAST_ACK;
1605 else
1606 tp->t_state = TCPS_CLOSE_WAIT;
1607 break;
1608
1609 /*
1610 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1611 * enter the CLOSING state.
1612 */
1613 case TCPS_FIN_WAIT_1:
1614 tp->t_state = TCPS_CLOSING;
1615 break;
1616
1617 /*
1618 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1619 * starting the time-wait timer, turning off the other
1620 * standard timers.
1621 */
1622 case TCPS_FIN_WAIT_2:
1623 tp->t_state = TCPS_TIME_WAIT;
1624 tcp_canceltimers(tp);
1625 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1626 soisfdisconnected(so);
1627 break;
1628
1629 /*
1630 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1631 */
1632 case TCPS_TIME_WAIT:
1633 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1634 break;
1635 }
1636 }
1637
1638 /*
1639 * If this is a small packet, then ACK now - with Nagel
1640 * congestion avoidance sender won't send more until
1641 * he gets an ACK.
1642 *
1643 * See above.
1644 */
1645/* if (ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg) {
1646 */
1647/* if ((ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg &&
1648 * (so->so_iptos & IPTOS_LOWDELAY) == 0) ||
1649 * ((so->so_iptos & IPTOS_LOWDELAY) &&
1650 * ((struct tcpiphdr_2 *)ti)->first_char == (char)27)) {
1651 */
1652 if (ti->ti_len && (unsigned)ti->ti_len <= 5 &&
1653 ((struct tcpiphdr_2 *)ti)->first_char == (char)27) {
1654 tp->t_flags |= TF_ACKNOW;
1655 }
1656
1657 if (mbuf_freed) {
1658 m_free(pData, m);
1659 }
1660 /*
1661 * Return any desired output.
1662 */
1663 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
1664 (void) tcp_output(pData, tp);
1665 }
1666 return;
1667
1668dropafterack:
1669 /*
1670 * Generate an ACK dropping incoming segment if it occupies
1671 * sequence space, where the ACK reflects our state.
1672 */
1673 if (tiflags & TH_RST)
1674 goto drop;
1675 m_freem(pData, m);
1676 tp->t_flags |= TF_ACKNOW;
1677 (void) tcp_output(pData, tp);
1678 return;
1679
1680dropwithreset:
1681 /* reuses m if m!=NULL, m_free() unnecessary */
1682 if (tiflags & TH_ACK)
1683 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1684 else {
1685 if (tiflags & TH_SYN) ti->ti_len++;
1686 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1687 TH_RST|TH_ACK);
1688 }
1689
1690 return;
1691
1692drop:
1693 /*
1694 * Drop space held by incoming segment and return.
1695 */
1696 m_free(pData, m);
1697
1698 return;
1699}
1700
1701 /* , ts_present, ts_val, ts_ecr) */
1702/* int *ts_present;
1703 * u_int32_t *ts_val, *ts_ecr;
1704 */
1705void
1706tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1707{
1708 u_int16_t mss;
1709 int opt, optlen;
1710
1711 DEBUG_CALL("tcp_dooptions");
1712 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1713
1714 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1715 opt = cp[0];
1716 if (opt == TCPOPT_EOL)
1717 break;
1718 if (opt == TCPOPT_NOP)
1719 optlen = 1;
1720 else {
1721 optlen = cp[1];
1722 if (optlen <= 0)
1723 break;
1724 }
1725 switch (opt) {
1726
1727 default:
1728 continue;
1729
1730 case TCPOPT_MAXSEG:
1731 if (optlen != TCPOLEN_MAXSEG)
1732 continue;
1733 if (!(ti->ti_flags & TH_SYN))
1734 continue;
1735 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1736 NTOHS(mss);
1737 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1738 break;
1739
1740/* case TCPOPT_WINDOW:
1741 * if (optlen != TCPOLEN_WINDOW)
1742 * continue;
1743 * if (!(ti->ti_flags & TH_SYN))
1744 * continue;
1745 * tp->t_flags |= TF_RCVD_SCALE;
1746 * tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1747 * break;
1748 */
1749/* case TCPOPT_TIMESTAMP:
1750 * if (optlen != TCPOLEN_TIMESTAMP)
1751 * continue;
1752 * *ts_present = 1;
1753 * memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1754 * NTOHL(*ts_val);
1755 * memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1756 * NTOHL(*ts_ecr);
1757 *
1758 */ /*
1759 * * A timestamp received in a SYN makes
1760 * * it ok to send timestamp requests and replies.
1761 * */
1762/* if (ti->ti_flags & TH_SYN) {
1763 * tp->t_flags |= TF_RCVD_TSTMP;
1764 * tp->ts_recent = *ts_val;
1765 * tp->ts_recent_age = tcp_now;
1766 * }
1767 */ break;
1768 }
1769 }
1770}
1771
1772
1773/*
1774 * Pull out of band byte out of a segment so
1775 * it doesn't appear in the user's data queue.
1776 * It is still reflected in the segment length for
1777 * sequencing purposes.
1778 */
1779
1780#ifdef notdef
1781
1782void
1783tcp_pulloutofband(so, ti, m)
1784 struct socket *so;
1785 struct tcpiphdr *ti;
1786 register struct mbuf *m;
1787{
1788 int cnt = ti->ti_urp - 1;
1789
1790 while (cnt >= 0) {
1791 if (m->m_len > cnt) {
1792 char *cp = mtod(m, caddr_t) + cnt;
1793 struct tcpcb *tp = sototcpcb(so);
1794
1795 tp->t_iobc = *cp;
1796 tp->t_oobflags |= TCPOOB_HAVEDATA;
1797 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1798 m->m_len--;
1799 return;
1800 }
1801 cnt -= m->m_len;
1802 m = m->m_next; /* XXX WRONG! Fix it! */
1803 if (m == 0)
1804 break;
1805 }
1806 panic("tcp_pulloutofband");
1807}
1808
1809#endif /* notdef */
1810
1811/*
1812 * Collect new round-trip time estimate
1813 * and update averages and current timeout.
1814 */
1815
1816void
1817tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1818{
1819 register short delta;
1820
1821 DEBUG_CALL("tcp_xmit_timer");
1822 DEBUG_ARG("tp = %lx", (long)tp);
1823 DEBUG_ARG("rtt = %d", rtt);
1824
1825 tcpstat.tcps_rttupdated++;
1826 if (tp->t_srtt != 0) {
1827 /*
1828 * srtt is stored as fixed point with 3 bits after the
1829 * binary point (i.e., scaled by 8). The following magic
1830 * is equivalent to the smoothing algorithm in rfc793 with
1831 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1832 * point). Adjust rtt to origin 0.
1833 */
1834 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1835 if ((tp->t_srtt += delta) <= 0)
1836 tp->t_srtt = 1;
1837 /*
1838 * We accumulate a smoothed rtt variance (actually, a
1839 * smoothed mean difference), then set the retransmit
1840 * timer to smoothed rtt + 4 times the smoothed variance.
1841 * rttvar is stored as fixed point with 2 bits after the
1842 * binary point (scaled by 4). The following is
1843 * equivalent to rfc793 smoothing with an alpha of .75
1844 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1845 * rfc793's wired-in beta.
1846 */
1847 if (delta < 0)
1848 delta = -delta;
1849 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1850 if ((tp->t_rttvar += delta) <= 0)
1851 tp->t_rttvar = 1;
1852 } else {
1853 /*
1854 * No rtt measurement yet - use the unsmoothed rtt.
1855 * Set the variance to half the rtt (so our first
1856 * retransmit happens at 3*rtt).
1857 */
1858 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1859 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1860 }
1861 tp->t_rtt = 0;
1862 tp->t_rxtshift = 0;
1863
1864 /*
1865 * the retransmit should happen at rtt + 4 * rttvar.
1866 * Because of the way we do the smoothing, srtt and rttvar
1867 * will each average +1/2 tick of bias. When we compute
1868 * the retransmit timer, we want 1/2 tick of rounding and
1869 * 1 extra tick because of +-1/2 tick uncertainty in the
1870 * firing of the timer. The bias will give us exactly the
1871 * 1.5 tick we need. But, because the bias is
1872 * statistical, we have to test that we don't drop below
1873 * the minimum feasible timer (which is 2 ticks).
1874 */
1875 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1876 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1877
1878 /*
1879 * We received an ack for a packet that wasn't retransmitted;
1880 * it is probably safe to discard any error indications we've
1881 * received recently. This isn't quite right, but close enough
1882 * for now (a route might have failed after we sent a segment,
1883 * and the return path might not be symmetrical).
1884 */
1885 tp->t_softerror = 0;
1886}
1887
1888/*
1889 * Determine a reasonable value for maxseg size.
1890 * If the route is known, check route for mtu.
1891 * If none, use an mss that can be handled on the outgoing
1892 * interface without forcing IP to fragment; if bigger than
1893 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1894 * to utilize large mbufs. If no route is found, route has no mtu,
1895 * or the destination isn't local, use a default, hopefully conservative
1896 * size (usually 512 or the default IP max size, but no more than the mtu
1897 * of the interface), as we can't discover anything about intervening
1898 * gateways or networks. We also initialize the congestion/slow start
1899 * window to be a single segment if the destination isn't local.
1900 * While looking at the routing entry, we also initialize other path-dependent
1901 * parameters from pre-set or cached values in the routing entry.
1902 */
1903
1904int
1905tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1906{
1907 struct socket *so = tp->t_socket;
1908 int mss;
1909
1910 DEBUG_CALL("tcp_mss");
1911 DEBUG_ARG("tp = %lx", (long)tp);
1912 DEBUG_ARG("offer = %d", offer);
1913
1914 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1915 if (offer)
1916 mss = min(mss, offer);
1917 mss = max(mss, 32);
1918 if (mss < tp->t_maxseg || offer != 0)
1919 tp->t_maxseg = mss;
1920
1921 tp->snd_cwnd = mss;
1922
1923 sbreserve(&so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1924 sbreserve(&so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1925
1926 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1927
1928 return mss;
1929}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette