VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 13738

Last change on this file since 13738 was 13738, checked in by vboxsync, 16 years ago

removing extra if/ifndefs
introduced defered socket removing, to prevent deletion of socket being in lock

  • Property svn:eol-style set to native
File size: 51.0 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
50
51/* for modulo comparisons of timestamps */
52#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
53#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
54
55/*
56 * Insert segment ti into reassembly queue of tcp with
57 * control block tp. Return TH_FIN if reassembly now includes
58 * a segment with FIN. The macro form does the common case inline
59 * (segment is the next to be received on an established connection,
60 * and the queue is empty), avoiding linkage into and removal
61 * from the queue and repetition of various conversions.
62 * Set DELACK for segments received in order, but ack immediately
63 * when segments are out of order (so fast retransmit can work).
64 */
65#ifdef TCP_ACK_HACK
66#define TCP_REASS(pData, tp, ti, m, so, flags) {\
67 if ((ti)->ti_seq == (tp)->rcv_nxt && \
68 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \
69 (tp)->t_state == TCPS_ESTABLISHED) {\
70 if (ti->ti_flags & TH_PUSH) \
71 tp->t_flags |= TF_ACKNOW; \
72 else \
73 tp->t_flags |= TF_DELACK; \
74 (tp)->rcv_nxt += (ti)->ti_len; \
75 flags = (ti)->ti_flags & TH_FIN; \
76 tcpstat.tcps_rcvpack++;\
77 tcpstat.tcps_rcvbyte += (ti)->ti_len;\
78 if (so->so_emu) { \
79 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \
80 } else \
81 sbappend((pData), (so), (m)); \
82/* sorwakeup(so); */ \
83 } else {\
84 (flags) = tcp_reass((pData), (tp), (ti), (m)); \
85 tp->t_flags |= TF_ACKNOW; \
86 } \
87}
88#else
89#define TCP_REASS(pData, tp, ti, m, so, flags) { \
90 if ((ti)->ti_seq == (tp)->rcv_nxt && \
91 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \
92 (tp)->t_state == TCPS_ESTABLISHED) { \
93 tp->t_flags |= TF_DELACK; \
94 (tp)->rcv_nxt += (ti)->ti_len; \
95 flags = (ti)->ti_flags & TH_FIN; \
96 tcpstat.tcps_rcvpack++;\
97 tcpstat.tcps_rcvbyte += (ti)->ti_len;\
98 if (so->so_emu) { \
99 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \
100 } else \
101 sbappend((pData), (so), (m)); \
102/* sorwakeup(so); */ \
103 } else { \
104 (flags) = tcp_reass((pData), (tp), (ti), (m)); \
105 tp->t_flags |= TF_ACKNOW; \
106 } \
107}
108#endif
109
110int
111tcp_reass(PNATState pData, register struct tcpcb *tp, register struct tcpiphdr *ti, struct mbuf *m)
112{
113 register struct tcpiphdr *q;
114 struct socket *so = tp->t_socket;
115 int flags;
116
117 /*
118 * Call with ti==0 after become established to
119 * force pre-ESTABLISHED data up to user socket.
120 */
121 if (ti == 0)
122 goto present;
123
124 /*
125 * Find a segment which begins after this one does.
126 */
127 for (q = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *); q != (struct tcpiphdr *)tp;
128 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *))
129 if (SEQ_GT(q->ti_seq, ti->ti_seq))
130 break;
131
132 /*
133 * If there is a preceding segment, it may provide some of
134 * our data already. If so, drop the data from the incoming
135 * segment. If it provides all of our data, drop us.
136 */
137 if (u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *) != (struct tcpiphdr *)tp) {
138 register int i;
139 q = u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *);
140 /* conversion to int (in i) handles seq wraparound */
141 i = q->ti_seq + q->ti_len - ti->ti_seq;
142 if (i > 0) {
143 if (i >= ti->ti_len) {
144 tcpstat.tcps_rcvduppack++;
145 tcpstat.tcps_rcvdupbyte += ti->ti_len;
146 m_freem(pData, m);
147 /*
148 * Try to present any queued data
149 * at the left window edge to the user.
150 * This is needed after the 3-WHS
151 * completes.
152 */
153 goto present; /* ??? */
154 }
155 m_adj(m, i);
156 ti->ti_len -= i;
157 ti->ti_seq += i;
158 }
159 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *);
160 }
161 tcpstat.tcps_rcvoopack++;
162 tcpstat.tcps_rcvoobyte += ti->ti_len;
163 REASS_MBUF_SET(ti, m); /* XXX */
164
165 /*
166 * While we overlap succeeding segments trim them or,
167 * if they are completely covered, dequeue them.
168 */
169 while (q != (struct tcpiphdr *)tp) {
170 register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq;
171 if (i <= 0)
172 break;
173 if (i < q->ti_len) {
174 q->ti_seq += i;
175 q->ti_len -= i;
176 m_adj(REASS_MBUF_GET(q), i);
177 break;
178 }
179 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *);
180 m = REASS_MBUF_GET(u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
181 remque_32(pData, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
182 m_freem(pData, m);
183 }
184
185 /*
186 * Stick new segment in its place.
187 */
188 insque_32(pData, ti, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
189
190present:
191 /*
192 * Present data to user, advancing rcv_nxt through
193 * completed sequence space.
194 */
195 if (!TCPS_HAVEESTABLISHED(tp->t_state))
196 return (0);
197 ti = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *);
198 if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt)
199 return (0);
200 if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len)
201 return (0);
202 do {
203 tp->rcv_nxt += ti->ti_len;
204 flags = ti->ti_flags & TH_FIN;
205 remque_32(pData, ti);
206 m = REASS_MBUF_GET(ti); /* XXX */
207 ti = u32_to_ptr(pData, ti->ti_next, struct tcpiphdr *);
208/* if (so->so_state & SS_FCANTRCVMORE) */
209 if (so->so_state & SS_FCANTSENDMORE)
210 m_freem(pData, m);
211 else {
212 if (so->so_emu) {
213 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
214 } else
215 sbappend(pData, so, m);
216 }
217 } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt);
218/* sorwakeup(so); */
219 return (flags);
220}
221
222/*
223 * TCP input routine, follows pages 65-76 of the
224 * protocol specification dated September, 1981 very closely.
225 */
226void
227tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
228{
229 struct ip save_ip, *ip;
230 register struct tcpiphdr *ti;
231 caddr_t optp = NULL;
232 int optlen = 0;
233 int len, tlen, off;
234 register struct tcpcb *tp = 0;
235 register int tiflags;
236 struct socket *so = 0;
237 int todrop, acked, ourfinisacked, needoutput = 0;
238/* int dropsocket = 0; */
239 int iss = 0;
240 u_long tiwin;
241 int ret;
242/* int ts_present = 0; */
243 int mbuf_freed = 0;
244
245 DEBUG_CALL("tcp_input");
246 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
247 (long )m, iphlen, (long )inso ));
248#ifdef VBOX_WITH_SYNC_SLIRP
249#if 0
250#define return \
251do { \
252 fprintf(stderr, "%s:%d\n", __FILE__, __LINE__); \
253 return; \
254}while(0)
255#endif
256#endif
257
258 if (inso != NULL) {
259 VBOX_SLIRP_LOCK(inso->so_mutex);
260 }
261
262 /*
263 * If called with m == 0, then we're continuing the connect
264 */
265 if (m == NULL) {
266 so = inso;
267
268 /* Re-set a few variables */
269 tp = sototcpcb(so);
270 m = so->so_m;
271 VBOX_SLIRP_LOCK(m->m_mutex);
272 so->so_m = 0;
273 ti = so->so_ti;
274 tiwin = ti->ti_win;
275 tiflags = ti->ti_flags;
276
277 goto cont_conn;
278 }
279 if (inso != NULL) {
280 VBOX_SLIRP_UNLOCK(inso->so_mutex);
281 }
282 VBOX_SLIRP_LOCK(m->m_mutex);
283
284
285 tcpstat.tcps_rcvtotal++;
286 /*
287 * Get IP and TCP header together in first mbuf.
288 * Note: IP leaves IP header in first mbuf.
289 */
290 ti = mtod(m, struct tcpiphdr *);
291 if (iphlen > sizeof(struct ip )) {
292 ip_stripoptions(m, (struct mbuf *)0);
293 iphlen=sizeof(struct ip );
294 }
295 /* XXX Check if too short */
296
297
298 /*
299 * Save a copy of the IP header in case we want restore it
300 * for sending an ICMP error message in response.
301 */
302 ip=mtod(m, struct ip *);
303 save_ip = *ip;
304 save_ip.ip_len+= iphlen;
305
306 /*
307 * Checksum extended TCP header and data.
308 */
309 tlen = ((struct ip *)ti)->ip_len;
310 ti->ti_next = ti->ti_prev = 0;
311 ti->ti_x1 = 0;
312 ti->ti_len = htons((u_int16_t)tlen);
313 len = sizeof(struct ip ) + tlen;
314 /* keep checksum for ICMP reply
315 * ti->ti_sum = cksum(m, len);
316 * if (ti->ti_sum) { */
317 if(cksum(m, len)) {
318 tcpstat.tcps_rcvbadsum++;
319 goto drop;
320 }
321
322 /*
323 * Check that TCP offset makes sense,
324 * pull out TCP options and adjust length. XXX
325 */
326 off = ti->ti_off << 2;
327 if (off < sizeof (struct tcphdr) || off > tlen) {
328 tcpstat.tcps_rcvbadoff++;
329 goto drop;
330 }
331 tlen -= off;
332 ti->ti_len = tlen;
333 if (off > sizeof (struct tcphdr)) {
334 optlen = off - sizeof (struct tcphdr);
335 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
336
337 /*
338 * Do quick retrieval of timestamp options ("options
339 * prediction?"). If timestamp is the only option and it's
340 * formatted as recommended in RFC 1323 appendix A, we
341 * quickly get the values now and not bother calling
342 * tcp_dooptions(), etc.
343 */
344/* if ((optlen == TCPOLEN_TSTAMP_APPA ||
345 * (optlen > TCPOLEN_TSTAMP_APPA &&
346 * optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
347 * *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
348 * (ti->ti_flags & TH_SYN) == 0) {
349 * ts_present = 1;
350 * ts_val = ntohl(*(u_int32_t *)(optp + 4));
351 * ts_ecr = ntohl(*(u_int32_t *)(optp + 8));
352 * optp = NULL; / * we've parsed the options * /
353 * }
354 */
355 }
356 tiflags = ti->ti_flags;
357
358 /*
359 * Convert TCP protocol specific fields to host format.
360 */
361 NTOHL(ti->ti_seq);
362 NTOHL(ti->ti_ack);
363 NTOHS(ti->ti_win);
364 NTOHS(ti->ti_urp);
365
366 /*
367 * Drop TCP, IP headers and TCP options.
368 */
369 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
370 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
371
372 /*
373 * Locate pcb for segment.
374 */
375findso:
376 if (so != NULL) {
377 VBOX_SLIRP_UNLOCK(so->so_mutex);
378 }
379 VBOX_SLIRP_LOCK(pData->tcp_last_so_mutex);
380 so = tcp_last_so;
381 /* this checking for making sure that we're not trying to hold mutex on head list*/
382 if (tcp_last_so != &tcb) {
383 VBOX_SLIRP_LOCK(so->so_mutex);
384 }
385 VBOX_SLIRP_UNLOCK(pData->tcp_last_so_mutex);
386
387 if (so->so_fport != ti->ti_dport ||
388 so->so_lport != ti->ti_sport ||
389 so->so_laddr.s_addr != ti->ti_src.s_addr ||
390 so->so_faddr.s_addr != ti->ti_dst.s_addr) {
391 /*To make sure that we don't try to release mutex on head of the socket queue*/
392 if (so != &tcb) {
393 VBOX_SLIRP_UNLOCK(so->so_mutex);
394 }
395 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
396 ti->ti_dst, ti->ti_dport);
397 if (so) {
398 VBOX_SLIRP_LOCK(so->so_mutex);
399 VBOX_SLIRP_LOCK(pData->tcp_last_so_mutex);
400 tcp_last_so = so;
401 VBOX_SLIRP_UNLOCK(pData->tcp_last_so_mutex);
402 }
403 ++tcpstat.tcps_socachemiss;
404 }
405
406 /*
407 * If the state is CLOSED (i.e., TCB does not exist) then
408 * all data in the incoming segment is discarded.
409 * If the TCB exists but is in CLOSED state, it is embryonic,
410 * but should either do a listen or a connect soon.
411 *
412 * state == CLOSED means we've done socreate() but haven't
413 * attached it to a protocol yet...
414 *
415 * XXX If a TCB does not exist, and the TH_SYN flag is
416 * the only flag set, then create a session, mark it
417 * as if it was LISTENING, and continue...
418 */
419 if (so == 0) {
420 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
421 goto dropwithreset;
422
423 if ((so = socreate()) == NULL)
424 goto dropwithreset;
425
426 if (inso != NULL) VBOX_SLIRP_UNLOCK(inso->so_mutex);
427 if (tcp_attach(pData, so) < 0) {
428 VBOX_SLIRP_UNLOCK(so->so_mutex);
429 VBOX_SLIRP_LOCK_DESTROY(so->so_mutex);
430 free(so); /* Not sofree (if it failed, it's not insqued) */
431#ifdef VBOX_WITH_SYNC_SLIRP
432 so = NULL;
433#endif
434 goto dropwithreset;
435 }
436 VBOX_SLIRP_LOCK(so->so_mutex);
437
438 sbreserve(&so->so_snd, tcp_sndspace);
439 sbreserve(&so->so_rcv, tcp_rcvspace);
440
441 /* tcp_last_so = so; */ /* XXX ? */
442 /* tp = sototcpcb(so); */
443
444 so->so_laddr = ti->ti_src;
445 so->so_lport = ti->ti_sport;
446 so->so_faddr = ti->ti_dst;
447 so->so_fport = ti->ti_dport;
448
449 if ((so->so_iptos = tcp_tos(so)) == 0)
450 so->so_iptos = ((struct ip *)ti)->ip_tos;
451
452 tp = sototcpcb(so);
453 tp->t_state = TCPS_LISTEN;
454 }
455
456 /*
457 * If this is a still-connecting socket, this probably
458 * a retransmit of the SYN. Whether it's a retransmit SYN
459 * or something else, we nuke it.
460 */
461 if (so->so_state & SS_ISFCONNECTING)
462 goto drop;
463
464 tp = sototcpcb(so);
465
466 /* XXX Should never fail */
467 if (tp == 0)
468 goto dropwithreset;
469 if (tp->t_state == TCPS_CLOSED)
470 goto drop;
471
472 /* Unscale the window into a 32-bit value. */
473/* if ((tiflags & TH_SYN) == 0)
474 * tiwin = ti->ti_win << tp->snd_scale;
475 * else
476 */
477 tiwin = ti->ti_win;
478
479 /*
480 * Segment received on connection.
481 * Reset idle time and keep-alive timer.
482 */
483 tp->t_idle = 0;
484 if (so_options)
485 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
486 else
487 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
488
489 /*
490 * Process options if not in LISTEN state,
491 * else do it below (after getting remote address).
492 */
493 if (optp && tp->t_state != TCPS_LISTEN)
494 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
495/* , */
496/* &ts_present, &ts_val, &ts_ecr); */
497
498 /*
499 * Header prediction: check for the two common cases
500 * of a uni-directional data xfer. If the packet has
501 * no control flags, is in-sequence, the window didn't
502 * change and we're not retransmitting, it's a
503 * candidate. If the length is zero and the ack moved
504 * forward, we're the sender side of the xfer. Just
505 * free the data acked & wake any higher level process
506 * that was blocked waiting for space. If the length
507 * is non-zero and the ack didn't move, we're the
508 * receiver side. If we're getting packets in-order
509 * (the reassembly queue is empty), add the data to
510 * the socket buffer and note that we need a delayed ack.
511 *
512 * XXX Some of these tests are not needed
513 * eg: the tiwin == tp->snd_wnd prevents many more
514 * predictions.. with no *real* advantage..
515 */
516 if (tp->t_state == TCPS_ESTABLISHED &&
517 (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
518/* (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) && */
519 ti->ti_seq == tp->rcv_nxt &&
520 tiwin && tiwin == tp->snd_wnd &&
521 tp->snd_nxt == tp->snd_max) {
522 /*
523 * If last ACK falls within this segment's sequence numbers,
524 * record the timestamp.
525 */
526/* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
527 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len)) {
528 * tp->ts_recent_age = tcp_now;
529 * tp->ts_recent = ts_val;
530 * }
531 */
532 if (ti->ti_len == 0) {
533 if (SEQ_GT(ti->ti_ack, tp->snd_una) &&
534 SEQ_LEQ(ti->ti_ack, tp->snd_max) &&
535 tp->snd_cwnd >= tp->snd_wnd) {
536 /*
537 * this is a pure ack for outstanding data.
538 */
539 ++tcpstat.tcps_predack;
540/* if (ts_present)
541 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
542 * else
543 */ if (tp->t_rtt &&
544 SEQ_GT(ti->ti_ack, tp->t_rtseq))
545 tcp_xmit_timer(pData, tp, tp->t_rtt);
546 acked = ti->ti_ack - tp->snd_una;
547 tcpstat.tcps_rcvackpack++;
548 tcpstat.tcps_rcvackbyte += acked;
549 sbdrop(&so->so_snd, acked);
550 tp->snd_una = ti->ti_ack;
551 m_freem(pData, m);
552 if (m != NULL) {
553 VBOX_SLIRP_UNLOCK(m->m_mutex);
554 }
555
556 /*
557 * If all outstanding data are acked, stop
558 * retransmit timer, otherwise restart timer
559 * using current (possibly backed-off) value.
560 * If process is waiting for space,
561 * wakeup/selwakeup/signal. If data
562 * are ready to send, let tcp_output
563 * decide between more output or persist.
564 */
565 if (tp->snd_una == tp->snd_max)
566 tp->t_timer[TCPT_REXMT] = 0;
567 else if (tp->t_timer[TCPT_PERSIST] == 0)
568 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
569
570 /*
571 * There's room in so_snd, sowwakup will read()
572 * from the socket if we can
573 */
574/* if (so->so_snd.sb_flags & SB_NOTIFY)
575 * sowwakeup(so);
576 */
577 /*
578 * This is called because sowwakeup might have
579 * put data into so_snd. Since we don't so sowwakeup,
580 * we don't need this.. XXX???
581 */
582 if (so->so_snd.sb_cc)
583 (void) tcp_output(pData, tp);
584 VBOX_SLIRP_UNLOCK(so->so_mutex);
585 return;
586 }
587 } else if (ti->ti_ack == tp->snd_una &&
588 u32_to_ptr(pData, tp->seg_next, struct tcpcb *) == tp &&
589 ti->ti_len <= sbspace(&so->so_rcv)) {
590 /*
591 * this is a pure, in-sequence data packet
592 * with nothing on the reassembly queue and
593 * we have enough buffer space to take it.
594 */
595 ++tcpstat.tcps_preddat;
596 tp->rcv_nxt += ti->ti_len;
597 tcpstat.tcps_rcvpack++;
598 tcpstat.tcps_rcvbyte += ti->ti_len;
599 /*
600 * Add data to socket buffer.
601 */
602 if (so->so_emu) {
603 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
604 } else
605 sbappend(pData, so, m);
606
607 /*
608 * XXX This is called when data arrives. Later, check
609 * if we can actually write() to the socket
610 * XXX Need to check? It's be NON_BLOCKING
611 */
612/* sorwakeup(so); */
613
614 /*
615 * If this is a short packet, then ACK now - with Nagel
616 * congestion avoidance sender won't send more until
617 * he gets an ACK.
618 *
619 * It is better to not delay acks at all to maximize
620 * TCP throughput. See RFC 2581.
621 */
622 tp->t_flags |= TF_ACKNOW;
623 tcp_output(pData, tp);
624 VBOX_SLIRP_UNLOCK(so->so_mutex);
625 if (m != NULL) {
626 VBOX_SLIRP_UNLOCK(m->m_mutex);
627 }
628 return;
629 }
630 } /* header prediction */
631 /*
632 * Calculate amount of space in receive window,
633 * and then do TCP input processing.
634 * Receive window is amount of space in rcv queue,
635 * but not less than advertised window.
636 */
637 { int win;
638 win = sbspace(&so->so_rcv);
639 if (win < 0)
640 win = 0;
641 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
642 }
643
644 switch (tp->t_state) {
645
646 /*
647 * If the state is LISTEN then ignore segment if it contains an RST.
648 * If the segment contains an ACK then it is bad and send a RST.
649 * If it does not contain a SYN then it is not interesting; drop it.
650 * Don't bother responding if the destination was a broadcast.
651 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
652 * tp->iss, and send a segment:
653 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
654 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
655 * Fill in remote peer address fields if not previously specified.
656 * Enter SYN_RECEIVED state, and process any other fields of this
657 * segment in this state.
658 */
659 case TCPS_LISTEN: {
660
661 if (tiflags & TH_RST)
662 goto drop;
663 if (tiflags & TH_ACK)
664 goto dropwithreset;
665 if ((tiflags & TH_SYN) == 0)
666 goto drop;
667
668 /*
669 * This has way too many gotos...
670 * But a bit of spaghetti code never hurt anybody :)
671 */
672
673 /*
674 * If this is destined for the control address, then flag to
675 * tcp_ctl once connected, otherwise connect
676 */
677 if ((so->so_faddr.s_addr&htonl(pData->netmask)) == special_addr.s_addr) {
678 int lastbyte=ntohl(so->so_faddr.s_addr) & ~pData->netmask;
679 if (lastbyte!=CTL_ALIAS && lastbyte!=CTL_DNS) {
680#if 0
681 if(lastbyte==CTL_CMD || lastbyte==CTL_EXEC) {
682 /* Command or exec adress */
683 so->so_state |= SS_CTL;
684 } else
685#endif
686 {
687 /* May be an add exec */
688 struct ex_list *ex_ptr;
689 for(ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
690 if(ex_ptr->ex_fport == so->so_fport &&
691 lastbyte == ex_ptr->ex_addr) {
692 so->so_state |= SS_CTL;
693 break;
694 }
695 }
696 }
697 if(so->so_state & SS_CTL) goto cont_input;
698 }
699 /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */
700 }
701
702 if (so->so_emu & EMU_NOCONNECT) {
703 so->so_emu &= ~EMU_NOCONNECT;
704 goto cont_input;
705 }
706
707 if((tcp_fconnect(pData, so) == -1) && (errno != EINPROGRESS) && (errno != EWOULDBLOCK)) {
708 u_char code=ICMP_UNREACH_NET;
709 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
710 errno,strerror(errno)));
711 if(errno == ECONNREFUSED) {
712 /* ACK the SYN, send RST to refuse the connection */
713 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
714 TH_RST|TH_ACK);
715 } else {
716 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST;
717 HTONL(ti->ti_seq); /* restore tcp header */
718 HTONL(ti->ti_ack);
719 HTONS(ti->ti_win);
720 HTONS(ti->ti_urp);
721 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
722 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
723 *ip=save_ip;
724 icmp_error(pData, m, ICMP_UNREACH,code, 0,strerror(errno));
725 }
726 tp = tcp_close(pData, tp);
727 m_free(pData, m);
728 if (m != NULL) {
729 VBOX_SLIRP_UNLOCK(m->m_mutex);
730 }
731 } else {
732 /*
733 * Haven't connected yet, save the current mbuf
734 * and ti, and return
735 * XXX Some OS's don't tell us whether the connect()
736 * succeeded or not. So we must time it out.
737 */
738 so->so_m = m;
739 so->so_ti = ti;
740 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
741 tp->t_state = TCPS_SYN_RECEIVED;
742 }
743 VBOX_SLIRP_UNLOCK(so->so_mutex);
744 if (m != NULL) {
745 VBOX_SLIRP_UNLOCK(m->m_mutex);
746 }
747 return;
748
749 cont_conn:
750 /* m==NULL
751 * Check if the connect succeeded
752 */
753 if (so->so_state & SS_NOFDREF) {
754 tp = tcp_close(pData, tp);
755 goto dropwithreset;
756 }
757 cont_input:
758 tcp_template(tp);
759
760 if (optp)
761 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
762 /* , */
763 /* &ts_present, &ts_val, &ts_ecr); */
764
765 if (iss)
766 tp->iss = iss;
767 else
768 tp->iss = tcp_iss;
769 tcp_iss += TCP_ISSINCR/2;
770 tp->irs = ti->ti_seq;
771 tcp_sendseqinit(tp);
772 tcp_rcvseqinit(tp);
773 tp->t_flags |= TF_ACKNOW;
774 tp->t_state = TCPS_SYN_RECEIVED;
775 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
776 tcpstat.tcps_accepts++;
777 goto trimthenstep6;
778 } /* case TCPS_LISTEN */
779
780 /*
781 * If the state is SYN_SENT:
782 * if seg contains an ACK, but not for our SYN, drop the input.
783 * if seg contains a RST, then drop the connection.
784 * if seg does not contain SYN, then drop it.
785 * Otherwise this is an acceptable SYN segment
786 * initialize tp->rcv_nxt and tp->irs
787 * if seg contains ack then advance tp->snd_una
788 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
789 * arrange for segment to be acked (eventually)
790 * continue processing rest of data/controls, beginning with URG
791 */
792 case TCPS_SYN_SENT:
793 if ((tiflags & TH_ACK) &&
794 (SEQ_LEQ(ti->ti_ack, tp->iss) ||
795 SEQ_GT(ti->ti_ack, tp->snd_max)))
796 goto dropwithreset;
797
798 if (tiflags & TH_RST) {
799 if (tiflags & TH_ACK)
800 tp = tcp_drop(pData, tp,0); /* XXX Check t_softerror! */
801 goto drop;
802 }
803
804 if ((tiflags & TH_SYN) == 0)
805 goto drop;
806 if (tiflags & TH_ACK) {
807 tp->snd_una = ti->ti_ack;
808 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
809 tp->snd_nxt = tp->snd_una;
810 }
811
812 tp->t_timer[TCPT_REXMT] = 0;
813 tp->irs = ti->ti_seq;
814 tcp_rcvseqinit(tp);
815 tp->t_flags |= TF_ACKNOW;
816 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
817 tcpstat.tcps_connects++;
818 soisfconnected(so);
819 tp->t_state = TCPS_ESTABLISHED;
820
821 /* Do window scaling on this connection? */
822/* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
823 * (TF_RCVD_SCALE|TF_REQ_SCALE)) {
824 * tp->snd_scale = tp->requested_s_scale;
825 * tp->rcv_scale = tp->request_r_scale;
826 * }
827 */
828 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0,
829 (struct mbuf *)0);
830 /*
831 * if we didn't have to retransmit the SYN,
832 * use its rtt as our initial srtt & rtt var.
833 */
834 if (tp->t_rtt)
835 tcp_xmit_timer(pData, tp, tp->t_rtt);
836 } else
837 tp->t_state = TCPS_SYN_RECEIVED;
838
839trimthenstep6:
840 /*
841 * Advance ti->ti_seq to correspond to first data byte.
842 * If data, trim to stay within window,
843 * dropping FIN if necessary.
844 */
845 ti->ti_seq++;
846 if (ti->ti_len > tp->rcv_wnd) {
847 todrop = ti->ti_len - tp->rcv_wnd;
848 m_adj(m, -todrop);
849 ti->ti_len = tp->rcv_wnd;
850 tiflags &= ~TH_FIN;
851 tcpstat.tcps_rcvpackafterwin++;
852 tcpstat.tcps_rcvbyteafterwin += todrop;
853 }
854 tp->snd_wl1 = ti->ti_seq - 1;
855 tp->rcv_up = ti->ti_seq;
856 goto step6;
857 } /* switch tp->t_state */
858 /*
859 * States other than LISTEN or SYN_SENT.
860 * First check timestamp, if present.
861 * Then check that at least some bytes of segment are within
862 * receive window. If segment begins before rcv_nxt,
863 * drop leading data (and SYN); if nothing left, just ack.
864 *
865 * RFC 1323 PAWS: If we have a timestamp reply on this segment
866 * and it's less than ts_recent, drop it.
867 */
868/* if (ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent &&
869 * TSTMP_LT(ts_val, tp->ts_recent)) {
870 *
871 */ /* Check to see if ts_recent is over 24 days old. */
872/* if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
873 */ /*
874 * * Invalidate ts_recent. If this segment updates
875 * * ts_recent, the age will be reset later and ts_recent
876 * * will get a valid value. If it does not, setting
877 * * ts_recent to zero will at least satisfy the
878 * * requirement that zero be placed in the timestamp
879 * * echo reply when ts_recent isn't valid. The
880 * * age isn't reset until we get a valid ts_recent
881 * * because we don't want out-of-order segments to be
882 * * dropped when ts_recent is old.
883 * */
884/* tp->ts_recent = 0;
885 * } else {
886 * tcpstat.tcps_rcvduppack++;
887 * tcpstat.tcps_rcvdupbyte += ti->ti_len;
888 * tcpstat.tcps_pawsdrop++;
889 * goto dropafterack;
890 * }
891 * }
892 */
893
894 todrop = tp->rcv_nxt - ti->ti_seq;
895 if (todrop > 0) {
896 if (tiflags & TH_SYN) {
897 tiflags &= ~TH_SYN;
898 ti->ti_seq++;
899 if (ti->ti_urp > 1)
900 ti->ti_urp--;
901 else
902 tiflags &= ~TH_URG;
903 todrop--;
904 }
905 /*
906 * Following if statement from Stevens, vol. 2, p. 960.
907 */
908 if (todrop > ti->ti_len
909 || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) {
910 /*
911 * Any valid FIN must be to the left of the window.
912 * At this point the FIN must be a duplicate or out
913 * of sequence; drop it.
914 */
915 tiflags &= ~TH_FIN;
916
917 /*
918 * Send an ACK to resynchronize and drop any data.
919 * But keep on processing for RST or ACK.
920 */
921 tp->t_flags |= TF_ACKNOW;
922 todrop = ti->ti_len;
923 tcpstat.tcps_rcvduppack++;
924 tcpstat.tcps_rcvdupbyte += todrop;
925 } else {
926 tcpstat.tcps_rcvpartduppack++;
927 tcpstat.tcps_rcvpartdupbyte += todrop;
928 }
929 m_adj(m, todrop);
930 ti->ti_seq += todrop;
931 ti->ti_len -= todrop;
932 if (ti->ti_urp > todrop)
933 ti->ti_urp -= todrop;
934 else {
935 tiflags &= ~TH_URG;
936 ti->ti_urp = 0;
937 }
938 }
939 /*
940 * If new data are received on a connection after the
941 * user processes are gone, then RST the other end.
942 */
943 if ((so->so_state & SS_NOFDREF) &&
944 tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) {
945 tp = tcp_close(pData, tp);
946 tcpstat.tcps_rcvafterclose++;
947 goto dropwithreset;
948 }
949
950 /*
951 * If segment ends after window, drop trailing data
952 * (and PUSH and FIN); if nothing left, just ACK.
953 */
954 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
955 if (todrop > 0) {
956 tcpstat.tcps_rcvpackafterwin++;
957 if (todrop >= ti->ti_len) {
958 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
959 /*
960 * If a new connection request is received
961 * while in TIME_WAIT, drop the old connection
962 * and start over if the sequence numbers
963 * are above the previous ones.
964 */
965 if (tiflags & TH_SYN &&
966 tp->t_state == TCPS_TIME_WAIT &&
967 SEQ_GT(ti->ti_seq, tp->rcv_nxt)) {
968 iss = tp->rcv_nxt + TCP_ISSINCR;
969 tp = tcp_close(pData, tp);
970 goto findso;
971 }
972 /*
973 * If window is closed can only take segments at
974 * window edge, and have to drop data and PUSH from
975 * incoming segments. Continue processing, but
976 * remember to ack. Otherwise, drop segment
977 * and ack.
978 */
979 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) {
980 tp->t_flags |= TF_ACKNOW;
981 tcpstat.tcps_rcvwinprobe++;
982 } else
983 goto dropafterack;
984 } else
985 tcpstat.tcps_rcvbyteafterwin += todrop;
986 m_adj(m, -todrop);
987 ti->ti_len -= todrop;
988 tiflags &= ~(TH_PUSH|TH_FIN);
989 }
990
991 /*
992 * If last ACK falls within this segment's sequence numbers,
993 * record its timestamp.
994 */
995/* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
996 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len +
997 * ((tiflags & (TH_SYN|TH_FIN)) != 0))) {
998 * tp->ts_recent_age = tcp_now;
999 * tp->ts_recent = ts_val;
1000 * }
1001 */
1002
1003 /*
1004 * If the RST bit is set examine the state:
1005 * SYN_RECEIVED STATE:
1006 * If passive open, return to LISTEN state.
1007 * If active open, inform user that connection was refused.
1008 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1009 * Inform user that connection was reset, and close tcb.
1010 * CLOSING, LAST_ACK, TIME_WAIT STATES
1011 * Close the tcb.
1012 */
1013 if (tiflags&TH_RST) switch (tp->t_state) {
1014
1015 case TCPS_SYN_RECEIVED:
1016/* so->so_error = ECONNREFUSED; */
1017 goto close;
1018
1019 case TCPS_ESTABLISHED:
1020 case TCPS_FIN_WAIT_1:
1021 case TCPS_FIN_WAIT_2:
1022 case TCPS_CLOSE_WAIT:
1023/* so->so_error = ECONNRESET; */
1024 close:
1025 tp->t_state = TCPS_CLOSED;
1026 tcpstat.tcps_drops++;
1027 tp = tcp_close(pData, tp);
1028 goto drop;
1029
1030 case TCPS_CLOSING:
1031 case TCPS_LAST_ACK:
1032 case TCPS_TIME_WAIT:
1033 tp = tcp_close(pData, tp);
1034 goto drop;
1035 }
1036
1037 /*
1038 * If a SYN is in the window, then this is an
1039 * error and we send an RST and drop the connection.
1040 */
1041 if (tiflags & TH_SYN) {
1042 tp = tcp_drop(pData, tp,0);
1043 goto dropwithreset;
1044 }
1045
1046 /*
1047 * If the ACK bit is off we drop the segment and return.
1048 */
1049 if ((tiflags & TH_ACK) == 0) goto drop;
1050
1051 /*
1052 * Ack processing.
1053 */
1054 switch (tp->t_state) {
1055 /*
1056 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1057 * ESTABLISHED state and continue processing, otherwise
1058 * send an RST. una<=ack<=max
1059 */
1060 case TCPS_SYN_RECEIVED:
1061
1062 if (SEQ_GT(tp->snd_una, ti->ti_ack) ||
1063 SEQ_GT(ti->ti_ack, tp->snd_max))
1064 goto dropwithreset;
1065 tcpstat.tcps_connects++;
1066 tp->t_state = TCPS_ESTABLISHED;
1067 /*
1068 * The sent SYN is ack'ed with our sequence number +1
1069 * The first data byte already in the buffer will get
1070 * lost if no correction is made. This is only needed for
1071 * SS_CTL since the buffer is empty otherwise.
1072 * tp->snd_una++; or:
1073 */
1074 tp->snd_una=ti->ti_ack;
1075 if (so->so_state & SS_CTL) {
1076 /* So tcp_ctl reports the right state */
1077 ret = tcp_ctl(pData, so);
1078 if (ret == 1) {
1079 soisfconnected(so);
1080 so->so_state &= ~SS_CTL; /* success XXX */
1081 } else if (ret == 2) {
1082 so->so_state = SS_NOFDREF; /* CTL_CMD */
1083 } else {
1084 needoutput = 1;
1085 tp->t_state = TCPS_FIN_WAIT_1;
1086 }
1087 } else {
1088 soisfconnected(so);
1089 }
1090
1091 /* Do window scaling? */
1092/* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1093 * (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1094 * tp->snd_scale = tp->requested_s_scale;
1095 * tp->rcv_scale = tp->request_r_scale;
1096 * }
1097 */
1098 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0, (struct mbuf *)0);
1099 tp->snd_wl1 = ti->ti_seq - 1;
1100 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1101 goto synrx_to_est;
1102 /* fall into ... */
1103
1104 /*
1105 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1106 * ACKs. If the ack is in the range
1107 * tp->snd_una < ti->ti_ack <= tp->snd_max
1108 * then advance tp->snd_una to ti->ti_ack and drop
1109 * data from the retransmission queue. If this ACK reflects
1110 * more up to date window information we update our window information.
1111 */
1112 case TCPS_ESTABLISHED:
1113 case TCPS_FIN_WAIT_1:
1114 case TCPS_FIN_WAIT_2:
1115 case TCPS_CLOSE_WAIT:
1116 case TCPS_CLOSING:
1117 case TCPS_LAST_ACK:
1118 case TCPS_TIME_WAIT:
1119
1120 if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) {
1121 if (ti->ti_len == 0 && tiwin == tp->snd_wnd) {
1122 tcpstat.tcps_rcvdupack++;
1123 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
1124 (long )m, (long )so));
1125 /*
1126 * If we have outstanding data (other than
1127 * a window probe), this is a completely
1128 * duplicate ack (ie, window info didn't
1129 * change), the ack is the biggest we've
1130 * seen and we've seen exactly our rexmt
1131 * threshold of them, assume a packet
1132 * has been dropped and retransmit it.
1133 * Kludge snd_nxt & the congestion
1134 * window so we send only this one
1135 * packet.
1136 *
1137 * We know we're losing at the current
1138 * window size so do congestion avoidance
1139 * (set ssthresh to half the current window
1140 * and pull our congestion window back to
1141 * the new ssthresh).
1142 *
1143 * Dup acks mean that packets have left the
1144 * network (they're now cached at the receiver)
1145 * so bump cwnd by the amount in the receiver
1146 * to keep a constant cwnd packets in the
1147 * network.
1148 */
1149 if (tp->t_timer[TCPT_REXMT] == 0 ||
1150 ti->ti_ack != tp->snd_una)
1151 tp->t_dupacks = 0;
1152 else if (++tp->t_dupacks == tcprexmtthresh) {
1153 tcp_seq onxt = tp->snd_nxt;
1154 u_int win =
1155 min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1156 tp->t_maxseg;
1157
1158 if (win < 2)
1159 win = 2;
1160 tp->snd_ssthresh = win * tp->t_maxseg;
1161 tp->t_timer[TCPT_REXMT] = 0;
1162 tp->t_rtt = 0;
1163 tp->snd_nxt = ti->ti_ack;
1164 tp->snd_cwnd = tp->t_maxseg;
1165 (void) tcp_output(pData, tp);
1166 tp->snd_cwnd = tp->snd_ssthresh +
1167 tp->t_maxseg * tp->t_dupacks;
1168 if (SEQ_GT(onxt, tp->snd_nxt))
1169 tp->snd_nxt = onxt;
1170 goto drop;
1171 } else if (tp->t_dupacks > tcprexmtthresh) {
1172 tp->snd_cwnd += tp->t_maxseg;
1173 (void) tcp_output(pData, tp);
1174 goto drop;
1175 }
1176 } else
1177 tp->t_dupacks = 0;
1178 break;
1179 }
1180 synrx_to_est:
1181 /*
1182 * If the congestion window was inflated to account
1183 * for the other side's cached packets, retract it.
1184 */
1185 if (tp->t_dupacks > tcprexmtthresh &&
1186 tp->snd_cwnd > tp->snd_ssthresh)
1187 tp->snd_cwnd = tp->snd_ssthresh;
1188 tp->t_dupacks = 0;
1189 if (SEQ_GT(ti->ti_ack, tp->snd_max)) {
1190 tcpstat.tcps_rcvacktoomuch++;
1191 goto dropafterack;
1192 }
1193 acked = ti->ti_ack - tp->snd_una;
1194 tcpstat.tcps_rcvackpack++;
1195 tcpstat.tcps_rcvackbyte += acked;
1196
1197 /*
1198 * If we have a timestamp reply, update smoothed
1199 * round trip time. If no timestamp is present but
1200 * transmit timer is running and timed sequence
1201 * number was acked, update smoothed round trip time.
1202 * Since we now have an rtt measurement, cancel the
1203 * timer backoff (cf., Phil Karn's retransmit alg.).
1204 * Recompute the initial retransmit timer.
1205 */
1206/* if (ts_present)
1207 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1208 * else
1209 */
1210 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1211 tcp_xmit_timer(pData, tp,tp->t_rtt);
1212
1213 /*
1214 * If all outstanding data is acked, stop retransmit
1215 * timer and remember to restart (more output or persist).
1216 * If there is more data to be acked, restart retransmit
1217 * timer, using current (possibly backed-off) value.
1218 */
1219 if (ti->ti_ack == tp->snd_max) {
1220 tp->t_timer[TCPT_REXMT] = 0;
1221 needoutput = 1;
1222 } else if (tp->t_timer[TCPT_PERSIST] == 0)
1223 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1224 /*
1225 * When new data is acked, open the congestion window.
1226 * If the window gives us less than ssthresh packets
1227 * in flight, open exponentially (maxseg per packet).
1228 * Otherwise open linearly: maxseg per window
1229 * (maxseg^2 / cwnd per packet).
1230 */
1231 {
1232 register u_int cw = tp->snd_cwnd;
1233 register u_int incr = tp->t_maxseg;
1234
1235 if (cw > tp->snd_ssthresh)
1236 incr = incr * incr / cw;
1237 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1238 }
1239 if (acked > so->so_snd.sb_cc) {
1240 tp->snd_wnd -= so->so_snd.sb_cc;
1241 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1242 ourfinisacked = 1;
1243 } else {
1244 sbdrop(&so->so_snd, acked);
1245 tp->snd_wnd -= acked;
1246 ourfinisacked = 0;
1247 }
1248 /*
1249 * XXX sowwakup is called when data is acked and there's room for
1250 * for more data... it should read() the socket
1251 */
1252/* if (so->so_snd.sb_flags & SB_NOTIFY)
1253 * sowwakeup(so);
1254 */
1255 tp->snd_una = ti->ti_ack;
1256 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1257 tp->snd_nxt = tp->snd_una;
1258
1259 switch (tp->t_state) {
1260
1261 /*
1262 * In FIN_WAIT_1 STATE in addition to the processing
1263 * for the ESTABLISHED state if our FIN is now acknowledged
1264 * then enter FIN_WAIT_2.
1265 */
1266 case TCPS_FIN_WAIT_1:
1267 if (ourfinisacked) {
1268 /*
1269 * If we can't receive any more
1270 * data, then closing user can proceed.
1271 * Starting the timer is contrary to the
1272 * specification, but if we don't get a FIN
1273 * we'll hang forever.
1274 */
1275 if (so->so_state & SS_FCANTRCVMORE) {
1276 soisfdisconnected(so);
1277 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1278 }
1279 tp->t_state = TCPS_FIN_WAIT_2;
1280 }
1281 break;
1282
1283 /*
1284 * In CLOSING STATE in addition to the processing for
1285 * the ESTABLISHED state if the ACK acknowledges our FIN
1286 * then enter the TIME-WAIT state, otherwise ignore
1287 * the segment.
1288 */
1289 case TCPS_CLOSING:
1290 if (ourfinisacked) {
1291 tp->t_state = TCPS_TIME_WAIT;
1292 tcp_canceltimers(tp);
1293 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1294 soisfdisconnected(so);
1295 }
1296 break;
1297
1298 /*
1299 * In LAST_ACK, we may still be waiting for data to drain
1300 * and/or to be acked, as well as for the ack of our FIN.
1301 * If our FIN is now acknowledged, delete the TCB,
1302 * enter the closed state and return.
1303 */
1304 case TCPS_LAST_ACK:
1305 if (ourfinisacked) {
1306 tp = tcp_close(pData, tp);
1307 goto drop;
1308 }
1309 break;
1310
1311 /*
1312 * In TIME_WAIT state the only thing that should arrive
1313 * is a retransmission of the remote FIN. Acknowledge
1314 * it and restart the finack timer.
1315 */
1316 case TCPS_TIME_WAIT:
1317 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1318 goto dropafterack;
1319 }
1320 } /* switch(tp->t_state) */
1321
1322step6:
1323 /*
1324 * Update window information.
1325 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1326 */
1327 if ((tiflags & TH_ACK) &&
1328 (SEQ_LT(tp->snd_wl1, ti->ti_seq) ||
1329 (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) ||
1330 (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) {
1331 /* keep track of pure window updates */
1332 if (ti->ti_len == 0 &&
1333 tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd)
1334 tcpstat.tcps_rcvwinupd++;
1335 tp->snd_wnd = tiwin;
1336 tp->snd_wl1 = ti->ti_seq;
1337 tp->snd_wl2 = ti->ti_ack;
1338 if (tp->snd_wnd > tp->max_sndwnd)
1339 tp->max_sndwnd = tp->snd_wnd;
1340 needoutput = 1;
1341 }
1342
1343 /*
1344 * Process segments with URG.
1345 */
1346 if ((tiflags & TH_URG) && ti->ti_urp &&
1347 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1348 /*
1349 * This is a kludge, but if we receive and accept
1350 * random urgent pointers, we'll crash in
1351 * soreceive. It's hard to imagine someone
1352 * actually wanting to send this much urgent data.
1353 */
1354 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) {
1355 ti->ti_urp = 0;
1356 tiflags &= ~TH_URG;
1357 goto dodata;
1358 }
1359 /*
1360 * If this segment advances the known urgent pointer,
1361 * then mark the data stream. This should not happen
1362 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1363 * a FIN has been received from the remote side.
1364 * In these states we ignore the URG.
1365 *
1366 * According to RFC961 (Assigned Protocols),
1367 * the urgent pointer points to the last octet
1368 * of urgent data. We continue, however,
1369 * to consider it to indicate the first octet
1370 * of data past the urgent section as the original
1371 * spec states (in one of two places).
1372 */
1373 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) {
1374 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1375 so->so_urgc = so->so_rcv.sb_cc +
1376 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1377 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1378
1379 }
1380 } else
1381 /*
1382 * If no out of band data is expected,
1383 * pull receive urgent pointer along
1384 * with the receive window.
1385 */
1386 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1387 tp->rcv_up = tp->rcv_nxt;
1388dodata:
1389
1390 /*
1391 * Process the segment text, merging it into the TCP sequencing queue,
1392 * and arranging for acknowledgment of receipt if necessary.
1393 * This process logically involves adjusting tp->rcv_wnd as data
1394 * is presented to the user (this happens in tcp_usrreq.c,
1395 * case PRU_RCVD). If a FIN has already been received on this
1396 * connection then we just ignore the text.
1397 */
1398 if ((ti->ti_len || (tiflags&TH_FIN)) &&
1399 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1400 TCP_REASS(pData, tp, ti, m, so, tiflags);
1401 /*
1402 * Note the amount of data that peer has sent into
1403 * our window, in order to estimate the sender's
1404 * buffer size.
1405 */
1406 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1407 } else {
1408 mbuf_freed = 1; /* The mbuf must be freed, but only when its content is not needed anymore. */
1409 tiflags &= ~TH_FIN;
1410 }
1411
1412 /*
1413 * If FIN is received ACK the FIN and let the user know
1414 * that the connection is closing.
1415 */
1416 if (tiflags & TH_FIN) {
1417 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1418 /*
1419 * If we receive a FIN we can't send more data,
1420 * set it SS_FDRAIN
1421 * Shutdown the socket if there is no rx data in the
1422 * buffer.
1423 * soread() is called on completion of shutdown() and
1424 * will got to TCPS_LAST_ACK, and use tcp_output()
1425 * to send the FIN.
1426 */
1427/* sofcantrcvmore(so); */
1428 sofwdrain(so);
1429
1430 tp->t_flags |= TF_ACKNOW;
1431 tp->rcv_nxt++;
1432 }
1433 switch (tp->t_state) {
1434
1435 /*
1436 * In SYN_RECEIVED and ESTABLISHED STATES
1437 * enter the CLOSE_WAIT state.
1438 */
1439 case TCPS_SYN_RECEIVED:
1440 case TCPS_ESTABLISHED:
1441 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1442 tp->t_state = TCPS_LAST_ACK;
1443 else
1444 tp->t_state = TCPS_CLOSE_WAIT;
1445 break;
1446
1447 /*
1448 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1449 * enter the CLOSING state.
1450 */
1451 case TCPS_FIN_WAIT_1:
1452 tp->t_state = TCPS_CLOSING;
1453 break;
1454
1455 /*
1456 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1457 * starting the time-wait timer, turning off the other
1458 * standard timers.
1459 */
1460 case TCPS_FIN_WAIT_2:
1461 tp->t_state = TCPS_TIME_WAIT;
1462 tcp_canceltimers(tp);
1463 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1464 soisfdisconnected(so);
1465 break;
1466
1467 /*
1468 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1469 */
1470 case TCPS_TIME_WAIT:
1471 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1472 break;
1473 }
1474 }
1475
1476 /*
1477 * If this is a small packet, then ACK now - with Nagel
1478 * congestion avoidance sender won't send more until
1479 * he gets an ACK.
1480 *
1481 * See above.
1482 */
1483/* if (ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg) {
1484 */
1485/* if ((ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg &&
1486 * (so->so_iptos & IPTOS_LOWDELAY) == 0) ||
1487 * ((so->so_iptos & IPTOS_LOWDELAY) &&
1488 * ((struct tcpiphdr_2 *)ti)->first_char == (char)27)) {
1489 */
1490 if (ti->ti_len && (unsigned)ti->ti_len <= 5 &&
1491 ((struct tcpiphdr_2 *)ti)->first_char == (char)27) {
1492 tp->t_flags |= TF_ACKNOW;
1493 }
1494
1495 if (mbuf_freed) {
1496 m_free(pData, m);
1497 }
1498 /*
1499 * Return any desired output.
1500 */
1501 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
1502 (void) tcp_output(pData, tp);
1503 }
1504
1505 VBOX_SLIRP_UNLOCK(so->so_mutex);
1506 if (m != NULL) {
1507 VBOX_SLIRP_UNLOCK(m->m_mutex);
1508 }
1509 return;
1510
1511dropafterack:
1512 /*
1513 * Generate an ACK dropping incoming segment if it occupies
1514 * sequence space, where the ACK reflects our state.
1515 */
1516 if (tiflags & TH_RST)
1517 goto drop;
1518 m_freem(pData, m);
1519 tp->t_flags |= TF_ACKNOW;
1520 (void) tcp_output(pData, tp);
1521 VBOX_SLIRP_UNLOCK(so->so_mutex);
1522 if (m != NULL) {
1523 VBOX_SLIRP_UNLOCK(m->m_mutex);
1524 }
1525 return;
1526
1527dropwithreset:
1528 /* reuses m if m!=NULL, m_free() unnecessary */
1529 if (tiflags & TH_ACK)
1530 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1531 else {
1532 if (tiflags & TH_SYN) ti->ti_len++;
1533 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1534 TH_RST|TH_ACK);
1535 }
1536
1537 if (so != NULL) VBOX_SLIRP_UNLOCK(so->so_mutex);
1538 if (m != NULL) VBOX_SLIRP_UNLOCK(m->m_mutex);
1539 return;
1540
1541drop:
1542 /*
1543 * Drop space held by incoming segment and return.
1544 */
1545 m_free(pData, m);
1546 VBOX_SLIRP_UNLOCK(so->so_mutex);
1547 if (m != NULL) {
1548 VBOX_SLIRP_UNLOCK(m->m_mutex);
1549 }
1550
1551 return;
1552#ifdef VBOX_WITH_SYNC_SLIRP
1553#undef return
1554#endif
1555}
1556
1557 /* , ts_present, ts_val, ts_ecr) */
1558/* int *ts_present;
1559 * u_int32_t *ts_val, *ts_ecr;
1560 */
1561void
1562tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1563{
1564 u_int16_t mss;
1565 int opt, optlen;
1566
1567 DEBUG_CALL("tcp_dooptions");
1568 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1569
1570 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1571 opt = cp[0];
1572 if (opt == TCPOPT_EOL)
1573 break;
1574 if (opt == TCPOPT_NOP)
1575 optlen = 1;
1576 else {
1577 optlen = cp[1];
1578 if (optlen <= 0)
1579 break;
1580 }
1581 switch (opt) {
1582
1583 default:
1584 continue;
1585
1586 case TCPOPT_MAXSEG:
1587 if (optlen != TCPOLEN_MAXSEG)
1588 continue;
1589 if (!(ti->ti_flags & TH_SYN))
1590 continue;
1591 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1592 NTOHS(mss);
1593 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1594 break;
1595
1596/* case TCPOPT_WINDOW:
1597 * if (optlen != TCPOLEN_WINDOW)
1598 * continue;
1599 * if (!(ti->ti_flags & TH_SYN))
1600 * continue;
1601 * tp->t_flags |= TF_RCVD_SCALE;
1602 * tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1603 * break;
1604 */
1605/* case TCPOPT_TIMESTAMP:
1606 * if (optlen != TCPOLEN_TIMESTAMP)
1607 * continue;
1608 * *ts_present = 1;
1609 * memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1610 * NTOHL(*ts_val);
1611 * memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1612 * NTOHL(*ts_ecr);
1613 *
1614 */ /*
1615 * * A timestamp received in a SYN makes
1616 * * it ok to send timestamp requests and replies.
1617 * */
1618/* if (ti->ti_flags & TH_SYN) {
1619 * tp->t_flags |= TF_RCVD_TSTMP;
1620 * tp->ts_recent = *ts_val;
1621 * tp->ts_recent_age = tcp_now;
1622 * }
1623 */ break;
1624 }
1625 }
1626}
1627
1628
1629/*
1630 * Pull out of band byte out of a segment so
1631 * it doesn't appear in the user's data queue.
1632 * It is still reflected in the segment length for
1633 * sequencing purposes.
1634 */
1635
1636#ifdef notdef
1637
1638void
1639tcp_pulloutofband(so, ti, m)
1640 struct socket *so;
1641 struct tcpiphdr *ti;
1642 register struct mbuf *m;
1643{
1644 int cnt = ti->ti_urp - 1;
1645
1646 while (cnt >= 0) {
1647 if (m->m_len > cnt) {
1648 char *cp = mtod(m, caddr_t) + cnt;
1649 struct tcpcb *tp = sototcpcb(so);
1650
1651 tp->t_iobc = *cp;
1652 tp->t_oobflags |= TCPOOB_HAVEDATA;
1653 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1654 m->m_len--;
1655 return;
1656 }
1657 cnt -= m->m_len;
1658 m = m->m_next; /* XXX WRONG! Fix it! */
1659 if (m == 0)
1660 break;
1661 }
1662 panic("tcp_pulloutofband");
1663}
1664
1665#endif /* notdef */
1666
1667/*
1668 * Collect new round-trip time estimate
1669 * and update averages and current timeout.
1670 */
1671
1672void
1673tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1674{
1675 register short delta;
1676
1677 DEBUG_CALL("tcp_xmit_timer");
1678 DEBUG_ARG("tp = %lx", (long)tp);
1679 DEBUG_ARG("rtt = %d", rtt);
1680
1681 tcpstat.tcps_rttupdated++;
1682 if (tp->t_srtt != 0) {
1683 /*
1684 * srtt is stored as fixed point with 3 bits after the
1685 * binary point (i.e., scaled by 8). The following magic
1686 * is equivalent to the smoothing algorithm in rfc793 with
1687 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1688 * point). Adjust rtt to origin 0.
1689 */
1690 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1691 if ((tp->t_srtt += delta) <= 0)
1692 tp->t_srtt = 1;
1693 /*
1694 * We accumulate a smoothed rtt variance (actually, a
1695 * smoothed mean difference), then set the retransmit
1696 * timer to smoothed rtt + 4 times the smoothed variance.
1697 * rttvar is stored as fixed point with 2 bits after the
1698 * binary point (scaled by 4). The following is
1699 * equivalent to rfc793 smoothing with an alpha of .75
1700 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1701 * rfc793's wired-in beta.
1702 */
1703 if (delta < 0)
1704 delta = -delta;
1705 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1706 if ((tp->t_rttvar += delta) <= 0)
1707 tp->t_rttvar = 1;
1708 } else {
1709 /*
1710 * No rtt measurement yet - use the unsmoothed rtt.
1711 * Set the variance to half the rtt (so our first
1712 * retransmit happens at 3*rtt).
1713 */
1714 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1715 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1716 }
1717 tp->t_rtt = 0;
1718 tp->t_rxtshift = 0;
1719
1720 /*
1721 * the retransmit should happen at rtt + 4 * rttvar.
1722 * Because of the way we do the smoothing, srtt and rttvar
1723 * will each average +1/2 tick of bias. When we compute
1724 * the retransmit timer, we want 1/2 tick of rounding and
1725 * 1 extra tick because of +-1/2 tick uncertainty in the
1726 * firing of the timer. The bias will give us exactly the
1727 * 1.5 tick we need. But, because the bias is
1728 * statistical, we have to test that we don't drop below
1729 * the minimum feasible timer (which is 2 ticks).
1730 */
1731 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1732 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1733
1734 /*
1735 * We received an ack for a packet that wasn't retransmitted;
1736 * it is probably safe to discard any error indications we've
1737 * received recently. This isn't quite right, but close enough
1738 * for now (a route might have failed after we sent a segment,
1739 * and the return path might not be symmetrical).
1740 */
1741 tp->t_softerror = 0;
1742}
1743
1744/*
1745 * Determine a reasonable value for maxseg size.
1746 * If the route is known, check route for mtu.
1747 * If none, use an mss that can be handled on the outgoing
1748 * interface without forcing IP to fragment; if bigger than
1749 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1750 * to utilize large mbufs. If no route is found, route has no mtu,
1751 * or the destination isn't local, use a default, hopefully conservative
1752 * size (usually 512 or the default IP max size, but no more than the mtu
1753 * of the interface), as we can't discover anything about intervening
1754 * gateways or networks. We also initialize the congestion/slow start
1755 * window to be a single segment if the destination isn't local.
1756 * While looking at the routing entry, we also initialize other path-dependent
1757 * parameters from pre-set or cached values in the routing entry.
1758 */
1759
1760int
1761tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1762{
1763 struct socket *so = tp->t_socket;
1764 int mss;
1765
1766 DEBUG_CALL("tcp_mss");
1767 DEBUG_ARG("tp = %lx", (long)tp);
1768 DEBUG_ARG("offer = %d", offer);
1769
1770 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1771 if (offer)
1772 mss = min(mss, offer);
1773 mss = max(mss, 32);
1774 if (mss < tp->t_maxseg || offer != 0)
1775 tp->t_maxseg = mss;
1776
1777 tp->snd_cwnd = mss;
1778
1779 sbreserve(&so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1780 sbreserve(&so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1781
1782 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1783
1784 return mss;
1785}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette