VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 14160

Last change on this file since 14160 was 14159, checked in by vboxsync, 16 years ago

registration several sockets per event, to avoid artificial barrier with
WSA_MAXIMUM_WAIT_EVENTS (64). Currently registered 64 sockets per event.
should work fine ;)

  • Property svn:eol-style set to native
File size: 48.5 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
50
51/* for modulo comparisons of timestamps */
52#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
53#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
54
55/*
56 * Insert segment ti into reassembly queue of tcp with
57 * control block tp. Return TH_FIN if reassembly now includes
58 * a segment with FIN. The macro form does the common case inline
59 * (segment is the next to be received on an established connection,
60 * and the queue is empty), avoiding linkage into and removal
61 * from the queue and repetition of various conversions.
62 * Set DELACK for segments received in order, but ack immediately
63 * when segments are out of order (so fast retransmit can work).
64 */
65#ifdef TCP_ACK_HACK
66#define TCP_REASS(pData, tp, ti, m, so, flags) {\
67 if ((ti)->ti_seq == (tp)->rcv_nxt && \
68 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \
69 (tp)->t_state == TCPS_ESTABLISHED) {\
70 if (ti->ti_flags & TH_PUSH) \
71 tp->t_flags |= TF_ACKNOW; \
72 else \
73 tp->t_flags |= TF_DELACK; \
74 (tp)->rcv_nxt += (ti)->ti_len; \
75 flags = (ti)->ti_flags & TH_FIN; \
76 tcpstat.tcps_rcvpack++;\
77 tcpstat.tcps_rcvbyte += (ti)->ti_len;\
78 if (so->so_emu) { \
79 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \
80 } else \
81 sbappend((pData), (so), (m)); \
82/* sorwakeup(so); */ \
83 } else {\
84 (flags) = tcp_reass((pData), (tp), (ti), (m)); \
85 tp->t_flags |= TF_ACKNOW; \
86 } \
87}
88#else
89#define TCP_REASS(pData, tp, ti, m, so, flags) { \
90 if ((ti)->ti_seq == (tp)->rcv_nxt && \
91 u32_to_ptr((pData), (tp)->seg_next, struct tcpcb *) == (tp) && \
92 (tp)->t_state == TCPS_ESTABLISHED) { \
93 tp->t_flags |= TF_DELACK; \
94 (tp)->rcv_nxt += (ti)->ti_len; \
95 flags = (ti)->ti_flags & TH_FIN; \
96 tcpstat.tcps_rcvpack++;\
97 tcpstat.tcps_rcvbyte += (ti)->ti_len;\
98 if (so->so_emu) { \
99 if (tcp_emu((pData), (so),(m))) sbappend((pData), (so), (m)); \
100 } else \
101 sbappend((pData), (so), (m)); \
102/* sorwakeup(so); */ \
103 } else { \
104 (flags) = tcp_reass((pData), (tp), (ti), (m)); \
105 tp->t_flags |= TF_ACKNOW; \
106 } \
107}
108#endif
109
110int
111tcp_reass(PNATState pData, register struct tcpcb *tp, register struct tcpiphdr *ti, struct mbuf *m)
112{
113 register struct tcpiphdr *q;
114 struct socket *so = tp->t_socket;
115 int flags;
116
117 /*
118 * Call with ti==0 after become established to
119 * force pre-ESTABLISHED data up to user socket.
120 */
121 if (ti == 0)
122 goto present;
123
124 /*
125 * Find a segment which begins after this one does.
126 */
127 for (q = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *); q != (struct tcpiphdr *)tp;
128 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *))
129 if (SEQ_GT(q->ti_seq, ti->ti_seq))
130 break;
131
132 /*
133 * If there is a preceding segment, it may provide some of
134 * our data already. If so, drop the data from the incoming
135 * segment. If it provides all of our data, drop us.
136 */
137 if (u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *) != (struct tcpiphdr *)tp) {
138 register int i;
139 q = u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *);
140 /* conversion to int (in i) handles seq wraparound */
141 i = q->ti_seq + q->ti_len - ti->ti_seq;
142 if (i > 0) {
143 if (i >= ti->ti_len) {
144 tcpstat.tcps_rcvduppack++;
145 tcpstat.tcps_rcvdupbyte += ti->ti_len;
146 m_freem(pData, m);
147 /*
148 * Try to present any queued data
149 * at the left window edge to the user.
150 * This is needed after the 3-WHS
151 * completes.
152 */
153 goto present; /* ??? */
154 }
155 m_adj(m, i);
156 ti->ti_len -= i;
157 ti->ti_seq += i;
158 }
159 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *);
160 }
161 tcpstat.tcps_rcvoopack++;
162 tcpstat.tcps_rcvoobyte += ti->ti_len;
163 REASS_MBUF_SET(ti, m); /* XXX */
164
165 /*
166 * While we overlap succeeding segments trim them or,
167 * if they are completely covered, dequeue them.
168 */
169 while (q != (struct tcpiphdr *)tp) {
170 register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq;
171 if (i <= 0)
172 break;
173 if (i < q->ti_len) {
174 q->ti_seq += i;
175 q->ti_len -= i;
176 m_adj(REASS_MBUF_GET(q), i);
177 break;
178 }
179 q = u32_to_ptr(pData, q->ti_next, struct tcpiphdr *);
180 m = REASS_MBUF_GET(u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
181 remque_32(pData, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
182 m_freem(pData, m);
183 }
184
185 /*
186 * Stick new segment in its place.
187 */
188 insque_32(pData, ti, u32_to_ptr(pData, q->ti_prev, struct tcpiphdr *));
189
190present:
191 /*
192 * Present data to user, advancing rcv_nxt through
193 * completed sequence space.
194 */
195 if (!TCPS_HAVEESTABLISHED(tp->t_state))
196 return (0);
197 ti = u32_to_ptr(pData, tp->seg_next, struct tcpiphdr *);
198 if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt)
199 return (0);
200 if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len)
201 return (0);
202 do {
203 tp->rcv_nxt += ti->ti_len;
204 flags = ti->ti_flags & TH_FIN;
205 remque_32(pData, ti);
206 m = REASS_MBUF_GET(ti); /* XXX */
207 ti = u32_to_ptr(pData, ti->ti_next, struct tcpiphdr *);
208/* if (so->so_state & SS_FCANTRCVMORE) */
209 if (so->so_state & SS_FCANTSENDMORE)
210 m_freem(pData, m);
211 else {
212 if (so->so_emu) {
213 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
214 } else
215 sbappend(pData, so, m);
216 }
217 } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt);
218/* sorwakeup(so); */
219 return (flags);
220}
221
222/*
223 * TCP input routine, follows pages 65-76 of the
224 * protocol specification dated September, 1981 very closely.
225 */
226void
227tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
228{
229 struct ip save_ip, *ip;
230 register struct tcpiphdr *ti;
231 caddr_t optp = NULL;
232 int optlen = 0;
233 int len, tlen, off;
234 register struct tcpcb *tp = 0;
235 register int tiflags;
236 struct socket *so = 0;
237 int todrop, acked, ourfinisacked, needoutput = 0;
238/* int dropsocket = 0; */
239 int iss = 0;
240 u_long tiwin;
241 int ret;
242/* int ts_present = 0; */
243 int mbuf_freed = 0;
244
245 DEBUG_CALL("tcp_input");
246 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
247 (long )m, iphlen, (long )inso ));
248
249 /*
250 * If called with m == 0, then we're continuing the connect
251 */
252 if (m == NULL) {
253 so = inso;
254
255 /* Re-set a few variables */
256 tp = sototcpcb(so);
257 m = so->so_m;
258 so->so_m = 0;
259 ti = so->so_ti;
260 tiwin = ti->ti_win;
261 tiflags = ti->ti_flags;
262
263 goto cont_conn;
264 }
265
266
267 tcpstat.tcps_rcvtotal++;
268 /*
269 * Get IP and TCP header together in first mbuf.
270 * Note: IP leaves IP header in first mbuf.
271 */
272 ti = mtod(m, struct tcpiphdr *);
273 if (iphlen > sizeof(struct ip )) {
274 ip_stripoptions(m, (struct mbuf *)0);
275 iphlen=sizeof(struct ip );
276 }
277 /* XXX Check if too short */
278
279
280 /*
281 * Save a copy of the IP header in case we want restore it
282 * for sending an ICMP error message in response.
283 */
284 ip=mtod(m, struct ip *);
285 save_ip = *ip;
286 save_ip.ip_len+= iphlen;
287
288 /*
289 * Checksum extended TCP header and data.
290 */
291 tlen = ((struct ip *)ti)->ip_len;
292 ti->ti_next = ti->ti_prev = 0;
293 ti->ti_x1 = 0;
294 ti->ti_len = htons((u_int16_t)tlen);
295 len = sizeof(struct ip ) + tlen;
296 /* keep checksum for ICMP reply
297 * ti->ti_sum = cksum(m, len);
298 * if (ti->ti_sum) { */
299 if(cksum(m, len)) {
300 tcpstat.tcps_rcvbadsum++;
301 goto drop;
302 }
303
304 /*
305 * Check that TCP offset makes sense,
306 * pull out TCP options and adjust length. XXX
307 */
308 off = ti->ti_off << 2;
309 if (off < sizeof (struct tcphdr) || off > tlen) {
310 tcpstat.tcps_rcvbadoff++;
311 goto drop;
312 }
313 tlen -= off;
314 ti->ti_len = tlen;
315 if (off > sizeof (struct tcphdr)) {
316 optlen = off - sizeof (struct tcphdr);
317 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
318
319 /*
320 * Do quick retrieval of timestamp options ("options
321 * prediction?"). If timestamp is the only option and it's
322 * formatted as recommended in RFC 1323 appendix A, we
323 * quickly get the values now and not bother calling
324 * tcp_dooptions(), etc.
325 */
326/* if ((optlen == TCPOLEN_TSTAMP_APPA ||
327 * (optlen > TCPOLEN_TSTAMP_APPA &&
328 * optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
329 * *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
330 * (ti->ti_flags & TH_SYN) == 0) {
331 * ts_present = 1;
332 * ts_val = ntohl(*(u_int32_t *)(optp + 4));
333 * ts_ecr = ntohl(*(u_int32_t *)(optp + 8));
334 * optp = NULL; / * we've parsed the options * /
335 * }
336 */
337 }
338 tiflags = ti->ti_flags;
339
340 /*
341 * Convert TCP protocol specific fields to host format.
342 */
343 NTOHL(ti->ti_seq);
344 NTOHL(ti->ti_ack);
345 NTOHS(ti->ti_win);
346 NTOHS(ti->ti_urp);
347
348 /*
349 * Drop TCP, IP headers and TCP options.
350 */
351 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
352 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
353
354 /*
355 * Locate pcb for segment.
356 */
357findso:
358 so = tcp_last_so;
359 if (so->so_fport != ti->ti_dport ||
360 so->so_lport != ti->ti_sport ||
361 so->so_laddr.s_addr != ti->ti_src.s_addr ||
362 so->so_faddr.s_addr != ti->ti_dst.s_addr) {
363 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
364 ti->ti_dst, ti->ti_dport);
365 if (so)
366 tcp_last_so = so;
367 ++tcpstat.tcps_socachemiss;
368 }
369
370 /*
371 * If the state is CLOSED (i.e., TCB does not exist) then
372 * all data in the incoming segment is discarded.
373 * If the TCB exists but is in CLOSED state, it is embryonic,
374 * but should either do a listen or a connect soon.
375 *
376 * state == CLOSED means we've done socreate() but haven't
377 * attached it to a protocol yet...
378 *
379 * XXX If a TCB does not exist, and the TH_SYN flag is
380 * the only flag set, then create a session, mark it
381 * as if it was LISTENING, and continue...
382 */
383 if (so == 0) {
384 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
385 goto dropwithreset;
386
387 if ((so = socreate()) == NULL)
388 goto dropwithreset;
389 if (tcp_attach(pData, so) < 0) {
390 free(so); /* Not sofree (if it failed, it's not insqued) */
391 goto dropwithreset;
392 }
393#if defined(VBOX_WITH_SIMPLEFIED_SLIRP_SYNC) && defined(RT_OS_WINDOWS)
394 soregister_event(pData, so);
395#endif
396
397 sbreserve(&so->so_snd, tcp_sndspace);
398 sbreserve(&so->so_rcv, tcp_rcvspace);
399
400 /* tcp_last_so = so; */ /* XXX ? */
401 /* tp = sototcpcb(so); */
402
403 so->so_laddr = ti->ti_src;
404 so->so_lport = ti->ti_sport;
405 so->so_faddr = ti->ti_dst;
406 so->so_fport = ti->ti_dport;
407
408 if ((so->so_iptos = tcp_tos(so)) == 0)
409 so->so_iptos = ((struct ip *)ti)->ip_tos;
410
411 tp = sototcpcb(so);
412 tp->t_state = TCPS_LISTEN;
413 }
414
415 /*
416 * If this is a still-connecting socket, this probably
417 * a retransmit of the SYN. Whether it's a retransmit SYN
418 * or something else, we nuke it.
419 */
420 if (so->so_state & SS_ISFCONNECTING)
421 goto drop;
422
423 tp = sototcpcb(so);
424
425 /* XXX Should never fail */
426 if (tp == 0)
427 goto dropwithreset;
428 if (tp->t_state == TCPS_CLOSED)
429 goto drop;
430
431 /* Unscale the window into a 32-bit value. */
432/* if ((tiflags & TH_SYN) == 0)
433 * tiwin = ti->ti_win << tp->snd_scale;
434 * else
435 */
436 tiwin = ti->ti_win;
437
438 /*
439 * Segment received on connection.
440 * Reset idle time and keep-alive timer.
441 */
442 tp->t_idle = 0;
443 if (so_options)
444 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
445 else
446 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
447
448 /*
449 * Process options if not in LISTEN state,
450 * else do it below (after getting remote address).
451 */
452 if (optp && tp->t_state != TCPS_LISTEN)
453 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
454/* , */
455/* &ts_present, &ts_val, &ts_ecr); */
456
457 /*
458 * Header prediction: check for the two common cases
459 * of a uni-directional data xfer. If the packet has
460 * no control flags, is in-sequence, the window didn't
461 * change and we're not retransmitting, it's a
462 * candidate. If the length is zero and the ack moved
463 * forward, we're the sender side of the xfer. Just
464 * free the data acked & wake any higher level process
465 * that was blocked waiting for space. If the length
466 * is non-zero and the ack didn't move, we're the
467 * receiver side. If we're getting packets in-order
468 * (the reassembly queue is empty), add the data to
469 * the socket buffer and note that we need a delayed ack.
470 *
471 * XXX Some of these tests are not needed
472 * eg: the tiwin == tp->snd_wnd prevents many more
473 * predictions.. with no *real* advantage..
474 */
475 if (tp->t_state == TCPS_ESTABLISHED &&
476 (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
477/* (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) && */
478 ti->ti_seq == tp->rcv_nxt &&
479 tiwin && tiwin == tp->snd_wnd &&
480 tp->snd_nxt == tp->snd_max) {
481 /*
482 * If last ACK falls within this segment's sequence numbers,
483 * record the timestamp.
484 */
485/* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
486 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len)) {
487 * tp->ts_recent_age = tcp_now;
488 * tp->ts_recent = ts_val;
489 * }
490 */
491 if (ti->ti_len == 0) {
492 if (SEQ_GT(ti->ti_ack, tp->snd_una) &&
493 SEQ_LEQ(ti->ti_ack, tp->snd_max) &&
494 tp->snd_cwnd >= tp->snd_wnd) {
495 /*
496 * this is a pure ack for outstanding data.
497 */
498 ++tcpstat.tcps_predack;
499/* if (ts_present)
500 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
501 * else
502 */ if (tp->t_rtt &&
503 SEQ_GT(ti->ti_ack, tp->t_rtseq))
504 tcp_xmit_timer(pData, tp, tp->t_rtt);
505 acked = ti->ti_ack - tp->snd_una;
506 tcpstat.tcps_rcvackpack++;
507 tcpstat.tcps_rcvackbyte += acked;
508 sbdrop(&so->so_snd, acked);
509 tp->snd_una = ti->ti_ack;
510 m_freem(pData, m);
511
512 /*
513 * If all outstanding data are acked, stop
514 * retransmit timer, otherwise restart timer
515 * using current (possibly backed-off) value.
516 * If process is waiting for space,
517 * wakeup/selwakeup/signal. If data
518 * are ready to send, let tcp_output
519 * decide between more output or persist.
520 */
521 if (tp->snd_una == tp->snd_max)
522 tp->t_timer[TCPT_REXMT] = 0;
523 else if (tp->t_timer[TCPT_PERSIST] == 0)
524 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
525
526 /*
527 * There's room in so_snd, sowwakup will read()
528 * from the socket if we can
529 */
530/* if (so->so_snd.sb_flags & SB_NOTIFY)
531 * sowwakeup(so);
532 */
533 /*
534 * This is called because sowwakeup might have
535 * put data into so_snd. Since we don't so sowwakeup,
536 * we don't need this.. XXX???
537 */
538 if (so->so_snd.sb_cc)
539 (void) tcp_output(pData, tp);
540
541 return;
542 }
543 } else if (ti->ti_ack == tp->snd_una &&
544 u32_to_ptr(pData, tp->seg_next, struct tcpcb *) == tp &&
545 ti->ti_len <= sbspace(&so->so_rcv)) {
546 /*
547 * this is a pure, in-sequence data packet
548 * with nothing on the reassembly queue and
549 * we have enough buffer space to take it.
550 */
551 ++tcpstat.tcps_preddat;
552 tp->rcv_nxt += ti->ti_len;
553 tcpstat.tcps_rcvpack++;
554 tcpstat.tcps_rcvbyte += ti->ti_len;
555 /*
556 * Add data to socket buffer.
557 */
558 if (so->so_emu) {
559 if (tcp_emu(pData, so,m)) sbappend(pData, so, m);
560 } else
561 sbappend(pData, so, m);
562
563 /*
564 * XXX This is called when data arrives. Later, check
565 * if we can actually write() to the socket
566 * XXX Need to check? It's be NON_BLOCKING
567 */
568/* sorwakeup(so); */
569
570 /*
571 * If this is a short packet, then ACK now - with Nagel
572 * congestion avoidance sender won't send more until
573 * he gets an ACK.
574 *
575 * It is better to not delay acks at all to maximize
576 * TCP throughput. See RFC 2581.
577 */
578 tp->t_flags |= TF_ACKNOW;
579 tcp_output(pData, tp);
580 return;
581 }
582 } /* header prediction */
583 /*
584 * Calculate amount of space in receive window,
585 * and then do TCP input processing.
586 * Receive window is amount of space in rcv queue,
587 * but not less than advertised window.
588 */
589 { int win;
590 win = sbspace(&so->so_rcv);
591 if (win < 0)
592 win = 0;
593 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
594 }
595
596 switch (tp->t_state) {
597
598 /*
599 * If the state is LISTEN then ignore segment if it contains an RST.
600 * If the segment contains an ACK then it is bad and send a RST.
601 * If it does not contain a SYN then it is not interesting; drop it.
602 * Don't bother responding if the destination was a broadcast.
603 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
604 * tp->iss, and send a segment:
605 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
606 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
607 * Fill in remote peer address fields if not previously specified.
608 * Enter SYN_RECEIVED state, and process any other fields of this
609 * segment in this state.
610 */
611 case TCPS_LISTEN: {
612
613 if (tiflags & TH_RST)
614 goto drop;
615 if (tiflags & TH_ACK)
616 goto dropwithreset;
617 if ((tiflags & TH_SYN) == 0)
618 goto drop;
619
620 /*
621 * This has way too many gotos...
622 * But a bit of spaghetti code never hurt anybody :)
623 */
624
625 /*
626 * If this is destined for the control address, then flag to
627 * tcp_ctl once connected, otherwise connect
628 */
629 if ((so->so_faddr.s_addr&htonl(pData->netmask)) == special_addr.s_addr) {
630 int lastbyte=ntohl(so->so_faddr.s_addr) & ~pData->netmask;
631 if (lastbyte!=CTL_ALIAS && lastbyte!=CTL_DNS) {
632#if 0
633 if(lastbyte==CTL_CMD || lastbyte==CTL_EXEC) {
634 /* Command or exec adress */
635 so->so_state |= SS_CTL;
636 } else
637#endif
638 {
639 /* May be an add exec */
640 struct ex_list *ex_ptr;
641 for(ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
642 if(ex_ptr->ex_fport == so->so_fport &&
643 lastbyte == ex_ptr->ex_addr) {
644 so->so_state |= SS_CTL;
645 break;
646 }
647 }
648 }
649 if(so->so_state & SS_CTL) goto cont_input;
650 }
651 /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */
652 }
653
654 if (so->so_emu & EMU_NOCONNECT) {
655 so->so_emu &= ~EMU_NOCONNECT;
656 goto cont_input;
657 }
658
659 if((tcp_fconnect(pData, so) == -1) && (errno != EINPROGRESS) && (errno != EWOULDBLOCK)) {
660 u_char code=ICMP_UNREACH_NET;
661 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
662 errno,strerror(errno)));
663 if(errno == ECONNREFUSED) {
664 /* ACK the SYN, send RST to refuse the connection */
665 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
666 TH_RST|TH_ACK);
667 } else {
668 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST;
669 HTONL(ti->ti_seq); /* restore tcp header */
670 HTONL(ti->ti_ack);
671 HTONS(ti->ti_win);
672 HTONS(ti->ti_urp);
673 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
674 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
675 *ip=save_ip;
676 icmp_error(pData, m, ICMP_UNREACH,code, 0,strerror(errno));
677 }
678 tp = tcp_close(pData, tp);
679 m_free(pData, m);
680 } else {
681 /*
682 * Haven't connected yet, save the current mbuf
683 * and ti, and return
684 * XXX Some OS's don't tell us whether the connect()
685 * succeeded or not. So we must time it out.
686 */
687 so->so_m = m;
688 so->so_ti = ti;
689 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
690 tp->t_state = TCPS_SYN_RECEIVED;
691 }
692 return;
693
694 cont_conn:
695 /* m==NULL
696 * Check if the connect succeeded
697 */
698 if (so->so_state & SS_NOFDREF) {
699 tp = tcp_close(pData, tp);
700 goto dropwithreset;
701 }
702 cont_input:
703 tcp_template(tp);
704
705 if (optp)
706 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
707 /* , */
708 /* &ts_present, &ts_val, &ts_ecr); */
709
710 if (iss)
711 tp->iss = iss;
712 else
713 tp->iss = tcp_iss;
714 tcp_iss += TCP_ISSINCR/2;
715 tp->irs = ti->ti_seq;
716 tcp_sendseqinit(tp);
717 tcp_rcvseqinit(tp);
718 tp->t_flags |= TF_ACKNOW;
719 tp->t_state = TCPS_SYN_RECEIVED;
720 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
721 tcpstat.tcps_accepts++;
722 goto trimthenstep6;
723 } /* case TCPS_LISTEN */
724
725 /*
726 * If the state is SYN_SENT:
727 * if seg contains an ACK, but not for our SYN, drop the input.
728 * if seg contains a RST, then drop the connection.
729 * if seg does not contain SYN, then drop it.
730 * Otherwise this is an acceptable SYN segment
731 * initialize tp->rcv_nxt and tp->irs
732 * if seg contains ack then advance tp->snd_una
733 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
734 * arrange for segment to be acked (eventually)
735 * continue processing rest of data/controls, beginning with URG
736 */
737 case TCPS_SYN_SENT:
738 if ((tiflags & TH_ACK) &&
739 (SEQ_LEQ(ti->ti_ack, tp->iss) ||
740 SEQ_GT(ti->ti_ack, tp->snd_max)))
741 goto dropwithreset;
742
743 if (tiflags & TH_RST) {
744 if (tiflags & TH_ACK)
745 tp = tcp_drop(pData, tp,0); /* XXX Check t_softerror! */
746 goto drop;
747 }
748
749 if ((tiflags & TH_SYN) == 0)
750 goto drop;
751 if (tiflags & TH_ACK) {
752 tp->snd_una = ti->ti_ack;
753 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
754 tp->snd_nxt = tp->snd_una;
755 }
756
757 tp->t_timer[TCPT_REXMT] = 0;
758 tp->irs = ti->ti_seq;
759 tcp_rcvseqinit(tp);
760 tp->t_flags |= TF_ACKNOW;
761 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
762 tcpstat.tcps_connects++;
763 soisfconnected(so);
764 tp->t_state = TCPS_ESTABLISHED;
765
766 /* Do window scaling on this connection? */
767/* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
768 * (TF_RCVD_SCALE|TF_REQ_SCALE)) {
769 * tp->snd_scale = tp->requested_s_scale;
770 * tp->rcv_scale = tp->request_r_scale;
771 * }
772 */
773 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0,
774 (struct mbuf *)0);
775 /*
776 * if we didn't have to retransmit the SYN,
777 * use its rtt as our initial srtt & rtt var.
778 */
779 if (tp->t_rtt)
780 tcp_xmit_timer(pData, tp, tp->t_rtt);
781 } else
782 tp->t_state = TCPS_SYN_RECEIVED;
783
784trimthenstep6:
785 /*
786 * Advance ti->ti_seq to correspond to first data byte.
787 * If data, trim to stay within window,
788 * dropping FIN if necessary.
789 */
790 ti->ti_seq++;
791 if (ti->ti_len > tp->rcv_wnd) {
792 todrop = ti->ti_len - tp->rcv_wnd;
793 m_adj(m, -todrop);
794 ti->ti_len = tp->rcv_wnd;
795 tiflags &= ~TH_FIN;
796 tcpstat.tcps_rcvpackafterwin++;
797 tcpstat.tcps_rcvbyteafterwin += todrop;
798 }
799 tp->snd_wl1 = ti->ti_seq - 1;
800 tp->rcv_up = ti->ti_seq;
801 goto step6;
802 } /* switch tp->t_state */
803 /*
804 * States other than LISTEN or SYN_SENT.
805 * First check timestamp, if present.
806 * Then check that at least some bytes of segment are within
807 * receive window. If segment begins before rcv_nxt,
808 * drop leading data (and SYN); if nothing left, just ack.
809 *
810 * RFC 1323 PAWS: If we have a timestamp reply on this segment
811 * and it's less than ts_recent, drop it.
812 */
813/* if (ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent &&
814 * TSTMP_LT(ts_val, tp->ts_recent)) {
815 *
816 */ /* Check to see if ts_recent is over 24 days old. */
817/* if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
818 */ /*
819 * * Invalidate ts_recent. If this segment updates
820 * * ts_recent, the age will be reset later and ts_recent
821 * * will get a valid value. If it does not, setting
822 * * ts_recent to zero will at least satisfy the
823 * * requirement that zero be placed in the timestamp
824 * * echo reply when ts_recent isn't valid. The
825 * * age isn't reset until we get a valid ts_recent
826 * * because we don't want out-of-order segments to be
827 * * dropped when ts_recent is old.
828 * */
829/* tp->ts_recent = 0;
830 * } else {
831 * tcpstat.tcps_rcvduppack++;
832 * tcpstat.tcps_rcvdupbyte += ti->ti_len;
833 * tcpstat.tcps_pawsdrop++;
834 * goto dropafterack;
835 * }
836 * }
837 */
838
839 todrop = tp->rcv_nxt - ti->ti_seq;
840 if (todrop > 0) {
841 if (tiflags & TH_SYN) {
842 tiflags &= ~TH_SYN;
843 ti->ti_seq++;
844 if (ti->ti_urp > 1)
845 ti->ti_urp--;
846 else
847 tiflags &= ~TH_URG;
848 todrop--;
849 }
850 /*
851 * Following if statement from Stevens, vol. 2, p. 960.
852 */
853 if (todrop > ti->ti_len
854 || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) {
855 /*
856 * Any valid FIN must be to the left of the window.
857 * At this point the FIN must be a duplicate or out
858 * of sequence; drop it.
859 */
860 tiflags &= ~TH_FIN;
861
862 /*
863 * Send an ACK to resynchronize and drop any data.
864 * But keep on processing for RST or ACK.
865 */
866 tp->t_flags |= TF_ACKNOW;
867 todrop = ti->ti_len;
868 tcpstat.tcps_rcvduppack++;
869 tcpstat.tcps_rcvdupbyte += todrop;
870 } else {
871 tcpstat.tcps_rcvpartduppack++;
872 tcpstat.tcps_rcvpartdupbyte += todrop;
873 }
874 m_adj(m, todrop);
875 ti->ti_seq += todrop;
876 ti->ti_len -= todrop;
877 if (ti->ti_urp > todrop)
878 ti->ti_urp -= todrop;
879 else {
880 tiflags &= ~TH_URG;
881 ti->ti_urp = 0;
882 }
883 }
884 /*
885 * If new data are received on a connection after the
886 * user processes are gone, then RST the other end.
887 */
888 if ((so->so_state & SS_NOFDREF) &&
889 tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) {
890 tp = tcp_close(pData, tp);
891 tcpstat.tcps_rcvafterclose++;
892 goto dropwithreset;
893 }
894
895 /*
896 * If segment ends after window, drop trailing data
897 * (and PUSH and FIN); if nothing left, just ACK.
898 */
899 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
900 if (todrop > 0) {
901 tcpstat.tcps_rcvpackafterwin++;
902 if (todrop >= ti->ti_len) {
903 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
904 /*
905 * If a new connection request is received
906 * while in TIME_WAIT, drop the old connection
907 * and start over if the sequence numbers
908 * are above the previous ones.
909 */
910 if (tiflags & TH_SYN &&
911 tp->t_state == TCPS_TIME_WAIT &&
912 SEQ_GT(ti->ti_seq, tp->rcv_nxt)) {
913 iss = tp->rcv_nxt + TCP_ISSINCR;
914 tp = tcp_close(pData, tp);
915 goto findso;
916 }
917 /*
918 * If window is closed can only take segments at
919 * window edge, and have to drop data and PUSH from
920 * incoming segments. Continue processing, but
921 * remember to ack. Otherwise, drop segment
922 * and ack.
923 */
924 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) {
925 tp->t_flags |= TF_ACKNOW;
926 tcpstat.tcps_rcvwinprobe++;
927 } else
928 goto dropafterack;
929 } else
930 tcpstat.tcps_rcvbyteafterwin += todrop;
931 m_adj(m, -todrop);
932 ti->ti_len -= todrop;
933 tiflags &= ~(TH_PUSH|TH_FIN);
934 }
935
936 /*
937 * If last ACK falls within this segment's sequence numbers,
938 * record its timestamp.
939 */
940/* if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
941 * SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len +
942 * ((tiflags & (TH_SYN|TH_FIN)) != 0))) {
943 * tp->ts_recent_age = tcp_now;
944 * tp->ts_recent = ts_val;
945 * }
946 */
947
948 /*
949 * If the RST bit is set examine the state:
950 * SYN_RECEIVED STATE:
951 * If passive open, return to LISTEN state.
952 * If active open, inform user that connection was refused.
953 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
954 * Inform user that connection was reset, and close tcb.
955 * CLOSING, LAST_ACK, TIME_WAIT STATES
956 * Close the tcb.
957 */
958 if (tiflags&TH_RST) switch (tp->t_state) {
959
960 case TCPS_SYN_RECEIVED:
961/* so->so_error = ECONNREFUSED; */
962 goto close;
963
964 case TCPS_ESTABLISHED:
965 case TCPS_FIN_WAIT_1:
966 case TCPS_FIN_WAIT_2:
967 case TCPS_CLOSE_WAIT:
968/* so->so_error = ECONNRESET; */
969 close:
970 tp->t_state = TCPS_CLOSED;
971 tcpstat.tcps_drops++;
972 tp = tcp_close(pData, tp);
973 goto drop;
974
975 case TCPS_CLOSING:
976 case TCPS_LAST_ACK:
977 case TCPS_TIME_WAIT:
978 tp = tcp_close(pData, tp);
979 goto drop;
980 }
981
982 /*
983 * If a SYN is in the window, then this is an
984 * error and we send an RST and drop the connection.
985 */
986 if (tiflags & TH_SYN) {
987 tp = tcp_drop(pData, tp,0);
988 goto dropwithreset;
989 }
990
991 /*
992 * If the ACK bit is off we drop the segment and return.
993 */
994 if ((tiflags & TH_ACK) == 0) goto drop;
995
996 /*
997 * Ack processing.
998 */
999 switch (tp->t_state) {
1000 /*
1001 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1002 * ESTABLISHED state and continue processing, otherwise
1003 * send an RST. una<=ack<=max
1004 */
1005 case TCPS_SYN_RECEIVED:
1006
1007 if (SEQ_GT(tp->snd_una, ti->ti_ack) ||
1008 SEQ_GT(ti->ti_ack, tp->snd_max))
1009 goto dropwithreset;
1010 tcpstat.tcps_connects++;
1011 tp->t_state = TCPS_ESTABLISHED;
1012 /*
1013 * The sent SYN is ack'ed with our sequence number +1
1014 * The first data byte already in the buffer will get
1015 * lost if no correction is made. This is only needed for
1016 * SS_CTL since the buffer is empty otherwise.
1017 * tp->snd_una++; or:
1018 */
1019 tp->snd_una=ti->ti_ack;
1020 if (so->so_state & SS_CTL) {
1021 /* So tcp_ctl reports the right state */
1022 ret = tcp_ctl(pData, so);
1023 if (ret == 1) {
1024 soisfconnected(so);
1025 so->so_state &= ~SS_CTL; /* success XXX */
1026 } else if (ret == 2) {
1027 so->so_state = SS_NOFDREF; /* CTL_CMD */
1028 } else {
1029 needoutput = 1;
1030 tp->t_state = TCPS_FIN_WAIT_1;
1031 }
1032 } else {
1033 soisfconnected(so);
1034 }
1035
1036 /* Do window scaling? */
1037/* if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1038 * (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1039 * tp->snd_scale = tp->requested_s_scale;
1040 * tp->rcv_scale = tp->request_r_scale;
1041 * }
1042 */
1043 (void) tcp_reass(pData, tp, (struct tcpiphdr *)0, (struct mbuf *)0);
1044 tp->snd_wl1 = ti->ti_seq - 1;
1045 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1046 goto synrx_to_est;
1047 /* fall into ... */
1048
1049 /*
1050 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1051 * ACKs. If the ack is in the range
1052 * tp->snd_una < ti->ti_ack <= tp->snd_max
1053 * then advance tp->snd_una to ti->ti_ack and drop
1054 * data from the retransmission queue. If this ACK reflects
1055 * more up to date window information we update our window information.
1056 */
1057 case TCPS_ESTABLISHED:
1058 case TCPS_FIN_WAIT_1:
1059 case TCPS_FIN_WAIT_2:
1060 case TCPS_CLOSE_WAIT:
1061 case TCPS_CLOSING:
1062 case TCPS_LAST_ACK:
1063 case TCPS_TIME_WAIT:
1064
1065 if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) {
1066 if (ti->ti_len == 0 && tiwin == tp->snd_wnd) {
1067 tcpstat.tcps_rcvdupack++;
1068 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
1069 (long )m, (long )so));
1070 /*
1071 * If we have outstanding data (other than
1072 * a window probe), this is a completely
1073 * duplicate ack (ie, window info didn't
1074 * change), the ack is the biggest we've
1075 * seen and we've seen exactly our rexmt
1076 * threshold of them, assume a packet
1077 * has been dropped and retransmit it.
1078 * Kludge snd_nxt & the congestion
1079 * window so we send only this one
1080 * packet.
1081 *
1082 * We know we're losing at the current
1083 * window size so do congestion avoidance
1084 * (set ssthresh to half the current window
1085 * and pull our congestion window back to
1086 * the new ssthresh).
1087 *
1088 * Dup acks mean that packets have left the
1089 * network (they're now cached at the receiver)
1090 * so bump cwnd by the amount in the receiver
1091 * to keep a constant cwnd packets in the
1092 * network.
1093 */
1094 if (tp->t_timer[TCPT_REXMT] == 0 ||
1095 ti->ti_ack != tp->snd_una)
1096 tp->t_dupacks = 0;
1097 else if (++tp->t_dupacks == tcprexmtthresh) {
1098 tcp_seq onxt = tp->snd_nxt;
1099 u_int win =
1100 min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1101 tp->t_maxseg;
1102
1103 if (win < 2)
1104 win = 2;
1105 tp->snd_ssthresh = win * tp->t_maxseg;
1106 tp->t_timer[TCPT_REXMT] = 0;
1107 tp->t_rtt = 0;
1108 tp->snd_nxt = ti->ti_ack;
1109 tp->snd_cwnd = tp->t_maxseg;
1110 (void) tcp_output(pData, tp);
1111 tp->snd_cwnd = tp->snd_ssthresh +
1112 tp->t_maxseg * tp->t_dupacks;
1113 if (SEQ_GT(onxt, tp->snd_nxt))
1114 tp->snd_nxt = onxt;
1115 goto drop;
1116 } else if (tp->t_dupacks > tcprexmtthresh) {
1117 tp->snd_cwnd += tp->t_maxseg;
1118 (void) tcp_output(pData, tp);
1119 goto drop;
1120 }
1121 } else
1122 tp->t_dupacks = 0;
1123 break;
1124 }
1125 synrx_to_est:
1126 /*
1127 * If the congestion window was inflated to account
1128 * for the other side's cached packets, retract it.
1129 */
1130 if (tp->t_dupacks > tcprexmtthresh &&
1131 tp->snd_cwnd > tp->snd_ssthresh)
1132 tp->snd_cwnd = tp->snd_ssthresh;
1133 tp->t_dupacks = 0;
1134 if (SEQ_GT(ti->ti_ack, tp->snd_max)) {
1135 tcpstat.tcps_rcvacktoomuch++;
1136 goto dropafterack;
1137 }
1138 acked = ti->ti_ack - tp->snd_una;
1139 tcpstat.tcps_rcvackpack++;
1140 tcpstat.tcps_rcvackbyte += acked;
1141
1142 /*
1143 * If we have a timestamp reply, update smoothed
1144 * round trip time. If no timestamp is present but
1145 * transmit timer is running and timed sequence
1146 * number was acked, update smoothed round trip time.
1147 * Since we now have an rtt measurement, cancel the
1148 * timer backoff (cf., Phil Karn's retransmit alg.).
1149 * Recompute the initial retransmit timer.
1150 */
1151/* if (ts_present)
1152 * tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1153 * else
1154 */
1155 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1156 tcp_xmit_timer(pData, tp,tp->t_rtt);
1157
1158 /*
1159 * If all outstanding data is acked, stop retransmit
1160 * timer and remember to restart (more output or persist).
1161 * If there is more data to be acked, restart retransmit
1162 * timer, using current (possibly backed-off) value.
1163 */
1164 if (ti->ti_ack == tp->snd_max) {
1165 tp->t_timer[TCPT_REXMT] = 0;
1166 needoutput = 1;
1167 } else if (tp->t_timer[TCPT_PERSIST] == 0)
1168 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1169 /*
1170 * When new data is acked, open the congestion window.
1171 * If the window gives us less than ssthresh packets
1172 * in flight, open exponentially (maxseg per packet).
1173 * Otherwise open linearly: maxseg per window
1174 * (maxseg^2 / cwnd per packet).
1175 */
1176 {
1177 register u_int cw = tp->snd_cwnd;
1178 register u_int incr = tp->t_maxseg;
1179
1180 if (cw > tp->snd_ssthresh)
1181 incr = incr * incr / cw;
1182 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1183 }
1184 if (acked > so->so_snd.sb_cc) {
1185 tp->snd_wnd -= so->so_snd.sb_cc;
1186 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1187 ourfinisacked = 1;
1188 } else {
1189 sbdrop(&so->so_snd, acked);
1190 tp->snd_wnd -= acked;
1191 ourfinisacked = 0;
1192 }
1193 /*
1194 * XXX sowwakup is called when data is acked and there's room for
1195 * for more data... it should read() the socket
1196 */
1197/* if (so->so_snd.sb_flags & SB_NOTIFY)
1198 * sowwakeup(so);
1199 */
1200 tp->snd_una = ti->ti_ack;
1201 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1202 tp->snd_nxt = tp->snd_una;
1203
1204 switch (tp->t_state) {
1205
1206 /*
1207 * In FIN_WAIT_1 STATE in addition to the processing
1208 * for the ESTABLISHED state if our FIN is now acknowledged
1209 * then enter FIN_WAIT_2.
1210 */
1211 case TCPS_FIN_WAIT_1:
1212 if (ourfinisacked) {
1213 /*
1214 * If we can't receive any more
1215 * data, then closing user can proceed.
1216 * Starting the timer is contrary to the
1217 * specification, but if we don't get a FIN
1218 * we'll hang forever.
1219 */
1220 if (so->so_state & SS_FCANTRCVMORE) {
1221 soisfdisconnected(so);
1222 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1223 }
1224 tp->t_state = TCPS_FIN_WAIT_2;
1225 }
1226 break;
1227
1228 /*
1229 * In CLOSING STATE in addition to the processing for
1230 * the ESTABLISHED state if the ACK acknowledges our FIN
1231 * then enter the TIME-WAIT state, otherwise ignore
1232 * the segment.
1233 */
1234 case TCPS_CLOSING:
1235 if (ourfinisacked) {
1236 tp->t_state = TCPS_TIME_WAIT;
1237 tcp_canceltimers(tp);
1238 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1239 soisfdisconnected(so);
1240 }
1241 break;
1242
1243 /*
1244 * In LAST_ACK, we may still be waiting for data to drain
1245 * and/or to be acked, as well as for the ack of our FIN.
1246 * If our FIN is now acknowledged, delete the TCB,
1247 * enter the closed state and return.
1248 */
1249 case TCPS_LAST_ACK:
1250 if (ourfinisacked) {
1251 tp = tcp_close(pData, tp);
1252 goto drop;
1253 }
1254 break;
1255
1256 /*
1257 * In TIME_WAIT state the only thing that should arrive
1258 * is a retransmission of the remote FIN. Acknowledge
1259 * it and restart the finack timer.
1260 */
1261 case TCPS_TIME_WAIT:
1262 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1263 goto dropafterack;
1264 }
1265 } /* switch(tp->t_state) */
1266
1267step6:
1268 /*
1269 * Update window information.
1270 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1271 */
1272 if ((tiflags & TH_ACK) &&
1273 (SEQ_LT(tp->snd_wl1, ti->ti_seq) ||
1274 (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) ||
1275 (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) {
1276 /* keep track of pure window updates */
1277 if (ti->ti_len == 0 &&
1278 tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd)
1279 tcpstat.tcps_rcvwinupd++;
1280 tp->snd_wnd = tiwin;
1281 tp->snd_wl1 = ti->ti_seq;
1282 tp->snd_wl2 = ti->ti_ack;
1283 if (tp->snd_wnd > tp->max_sndwnd)
1284 tp->max_sndwnd = tp->snd_wnd;
1285 needoutput = 1;
1286 }
1287
1288 /*
1289 * Process segments with URG.
1290 */
1291 if ((tiflags & TH_URG) && ti->ti_urp &&
1292 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1293 /*
1294 * This is a kludge, but if we receive and accept
1295 * random urgent pointers, we'll crash in
1296 * soreceive. It's hard to imagine someone
1297 * actually wanting to send this much urgent data.
1298 */
1299 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) {
1300 ti->ti_urp = 0;
1301 tiflags &= ~TH_URG;
1302 goto dodata;
1303 }
1304 /*
1305 * If this segment advances the known urgent pointer,
1306 * then mark the data stream. This should not happen
1307 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1308 * a FIN has been received from the remote side.
1309 * In these states we ignore the URG.
1310 *
1311 * According to RFC961 (Assigned Protocols),
1312 * the urgent pointer points to the last octet
1313 * of urgent data. We continue, however,
1314 * to consider it to indicate the first octet
1315 * of data past the urgent section as the original
1316 * spec states (in one of two places).
1317 */
1318 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) {
1319 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1320 so->so_urgc = so->so_rcv.sb_cc +
1321 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1322 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1323
1324 }
1325 } else
1326 /*
1327 * If no out of band data is expected,
1328 * pull receive urgent pointer along
1329 * with the receive window.
1330 */
1331 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1332 tp->rcv_up = tp->rcv_nxt;
1333dodata:
1334
1335 /*
1336 * Process the segment text, merging it into the TCP sequencing queue,
1337 * and arranging for acknowledgment of receipt if necessary.
1338 * This process logically involves adjusting tp->rcv_wnd as data
1339 * is presented to the user (this happens in tcp_usrreq.c,
1340 * case PRU_RCVD). If a FIN has already been received on this
1341 * connection then we just ignore the text.
1342 */
1343 if ((ti->ti_len || (tiflags&TH_FIN)) &&
1344 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1345 TCP_REASS(pData, tp, ti, m, so, tiflags);
1346 /*
1347 * Note the amount of data that peer has sent into
1348 * our window, in order to estimate the sender's
1349 * buffer size.
1350 */
1351 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1352 } else {
1353 mbuf_freed = 1; /* The mbuf must be freed, but only when its content is not needed anymore. */
1354 tiflags &= ~TH_FIN;
1355 }
1356
1357 /*
1358 * If FIN is received ACK the FIN and let the user know
1359 * that the connection is closing.
1360 */
1361 if (tiflags & TH_FIN) {
1362 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1363 /*
1364 * If we receive a FIN we can't send more data,
1365 * set it SS_FDRAIN
1366 * Shutdown the socket if there is no rx data in the
1367 * buffer.
1368 * soread() is called on completion of shutdown() and
1369 * will got to TCPS_LAST_ACK, and use tcp_output()
1370 * to send the FIN.
1371 */
1372/* sofcantrcvmore(so); */
1373 sofwdrain(so);
1374
1375 tp->t_flags |= TF_ACKNOW;
1376 tp->rcv_nxt++;
1377 }
1378 switch (tp->t_state) {
1379
1380 /*
1381 * In SYN_RECEIVED and ESTABLISHED STATES
1382 * enter the CLOSE_WAIT state.
1383 */
1384 case TCPS_SYN_RECEIVED:
1385 case TCPS_ESTABLISHED:
1386 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1387 tp->t_state = TCPS_LAST_ACK;
1388 else
1389 tp->t_state = TCPS_CLOSE_WAIT;
1390 break;
1391
1392 /*
1393 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1394 * enter the CLOSING state.
1395 */
1396 case TCPS_FIN_WAIT_1:
1397 tp->t_state = TCPS_CLOSING;
1398 break;
1399
1400 /*
1401 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1402 * starting the time-wait timer, turning off the other
1403 * standard timers.
1404 */
1405 case TCPS_FIN_WAIT_2:
1406 tp->t_state = TCPS_TIME_WAIT;
1407 tcp_canceltimers(tp);
1408 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1409 soisfdisconnected(so);
1410 break;
1411
1412 /*
1413 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1414 */
1415 case TCPS_TIME_WAIT:
1416 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1417 break;
1418 }
1419 }
1420
1421 /*
1422 * If this is a small packet, then ACK now - with Nagel
1423 * congestion avoidance sender won't send more until
1424 * he gets an ACK.
1425 *
1426 * See above.
1427 */
1428/* if (ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg) {
1429 */
1430/* if ((ti->ti_len && (unsigned)ti->ti_len < tp->t_maxseg &&
1431 * (so->so_iptos & IPTOS_LOWDELAY) == 0) ||
1432 * ((so->so_iptos & IPTOS_LOWDELAY) &&
1433 * ((struct tcpiphdr_2 *)ti)->first_char == (char)27)) {
1434 */
1435 if (ti->ti_len && (unsigned)ti->ti_len <= 5 &&
1436 ((struct tcpiphdr_2 *)ti)->first_char == (char)27) {
1437 tp->t_flags |= TF_ACKNOW;
1438 }
1439
1440 if (mbuf_freed) {
1441 m_free(pData, m);
1442 }
1443 /*
1444 * Return any desired output.
1445 */
1446 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
1447 (void) tcp_output(pData, tp);
1448 }
1449 return;
1450
1451dropafterack:
1452 /*
1453 * Generate an ACK dropping incoming segment if it occupies
1454 * sequence space, where the ACK reflects our state.
1455 */
1456 if (tiflags & TH_RST)
1457 goto drop;
1458 m_freem(pData, m);
1459 tp->t_flags |= TF_ACKNOW;
1460 (void) tcp_output(pData, tp);
1461 return;
1462
1463dropwithreset:
1464 /* reuses m if m!=NULL, m_free() unnecessary */
1465 if (tiflags & TH_ACK)
1466 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1467 else {
1468 if (tiflags & TH_SYN) ti->ti_len++;
1469 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1470 TH_RST|TH_ACK);
1471 }
1472
1473 return;
1474
1475drop:
1476 /*
1477 * Drop space held by incoming segment and return.
1478 */
1479 m_free(pData, m);
1480
1481 return;
1482}
1483
1484 /* , ts_present, ts_val, ts_ecr) */
1485/* int *ts_present;
1486 * u_int32_t *ts_val, *ts_ecr;
1487 */
1488void
1489tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1490{
1491 u_int16_t mss;
1492 int opt, optlen;
1493
1494 DEBUG_CALL("tcp_dooptions");
1495 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1496
1497 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1498 opt = cp[0];
1499 if (opt == TCPOPT_EOL)
1500 break;
1501 if (opt == TCPOPT_NOP)
1502 optlen = 1;
1503 else {
1504 optlen = cp[1];
1505 if (optlen <= 0)
1506 break;
1507 }
1508 switch (opt) {
1509
1510 default:
1511 continue;
1512
1513 case TCPOPT_MAXSEG:
1514 if (optlen != TCPOLEN_MAXSEG)
1515 continue;
1516 if (!(ti->ti_flags & TH_SYN))
1517 continue;
1518 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1519 NTOHS(mss);
1520 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1521 break;
1522
1523/* case TCPOPT_WINDOW:
1524 * if (optlen != TCPOLEN_WINDOW)
1525 * continue;
1526 * if (!(ti->ti_flags & TH_SYN))
1527 * continue;
1528 * tp->t_flags |= TF_RCVD_SCALE;
1529 * tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1530 * break;
1531 */
1532/* case TCPOPT_TIMESTAMP:
1533 * if (optlen != TCPOLEN_TIMESTAMP)
1534 * continue;
1535 * *ts_present = 1;
1536 * memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1537 * NTOHL(*ts_val);
1538 * memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1539 * NTOHL(*ts_ecr);
1540 *
1541 */ /*
1542 * * A timestamp received in a SYN makes
1543 * * it ok to send timestamp requests and replies.
1544 * */
1545/* if (ti->ti_flags & TH_SYN) {
1546 * tp->t_flags |= TF_RCVD_TSTMP;
1547 * tp->ts_recent = *ts_val;
1548 * tp->ts_recent_age = tcp_now;
1549 * }
1550 */ break;
1551 }
1552 }
1553}
1554
1555
1556/*
1557 * Pull out of band byte out of a segment so
1558 * it doesn't appear in the user's data queue.
1559 * It is still reflected in the segment length for
1560 * sequencing purposes.
1561 */
1562
1563#ifdef notdef
1564
1565void
1566tcp_pulloutofband(so, ti, m)
1567 struct socket *so;
1568 struct tcpiphdr *ti;
1569 register struct mbuf *m;
1570{
1571 int cnt = ti->ti_urp - 1;
1572
1573 while (cnt >= 0) {
1574 if (m->m_len > cnt) {
1575 char *cp = mtod(m, caddr_t) + cnt;
1576 struct tcpcb *tp = sototcpcb(so);
1577
1578 tp->t_iobc = *cp;
1579 tp->t_oobflags |= TCPOOB_HAVEDATA;
1580 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1581 m->m_len--;
1582 return;
1583 }
1584 cnt -= m->m_len;
1585 m = m->m_next; /* XXX WRONG! Fix it! */
1586 if (m == 0)
1587 break;
1588 }
1589 panic("tcp_pulloutofband");
1590}
1591
1592#endif /* notdef */
1593
1594/*
1595 * Collect new round-trip time estimate
1596 * and update averages and current timeout.
1597 */
1598
1599void
1600tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1601{
1602 register short delta;
1603
1604 DEBUG_CALL("tcp_xmit_timer");
1605 DEBUG_ARG("tp = %lx", (long)tp);
1606 DEBUG_ARG("rtt = %d", rtt);
1607
1608 tcpstat.tcps_rttupdated++;
1609 if (tp->t_srtt != 0) {
1610 /*
1611 * srtt is stored as fixed point with 3 bits after the
1612 * binary point (i.e., scaled by 8). The following magic
1613 * is equivalent to the smoothing algorithm in rfc793 with
1614 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1615 * point). Adjust rtt to origin 0.
1616 */
1617 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1618 if ((tp->t_srtt += delta) <= 0)
1619 tp->t_srtt = 1;
1620 /*
1621 * We accumulate a smoothed rtt variance (actually, a
1622 * smoothed mean difference), then set the retransmit
1623 * timer to smoothed rtt + 4 times the smoothed variance.
1624 * rttvar is stored as fixed point with 2 bits after the
1625 * binary point (scaled by 4). The following is
1626 * equivalent to rfc793 smoothing with an alpha of .75
1627 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1628 * rfc793's wired-in beta.
1629 */
1630 if (delta < 0)
1631 delta = -delta;
1632 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1633 if ((tp->t_rttvar += delta) <= 0)
1634 tp->t_rttvar = 1;
1635 } else {
1636 /*
1637 * No rtt measurement yet - use the unsmoothed rtt.
1638 * Set the variance to half the rtt (so our first
1639 * retransmit happens at 3*rtt).
1640 */
1641 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1642 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1643 }
1644 tp->t_rtt = 0;
1645 tp->t_rxtshift = 0;
1646
1647 /*
1648 * the retransmit should happen at rtt + 4 * rttvar.
1649 * Because of the way we do the smoothing, srtt and rttvar
1650 * will each average +1/2 tick of bias. When we compute
1651 * the retransmit timer, we want 1/2 tick of rounding and
1652 * 1 extra tick because of +-1/2 tick uncertainty in the
1653 * firing of the timer. The bias will give us exactly the
1654 * 1.5 tick we need. But, because the bias is
1655 * statistical, we have to test that we don't drop below
1656 * the minimum feasible timer (which is 2 ticks).
1657 */
1658 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1659 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1660
1661 /*
1662 * We received an ack for a packet that wasn't retransmitted;
1663 * it is probably safe to discard any error indications we've
1664 * received recently. This isn't quite right, but close enough
1665 * for now (a route might have failed after we sent a segment,
1666 * and the return path might not be symmetrical).
1667 */
1668 tp->t_softerror = 0;
1669}
1670
1671/*
1672 * Determine a reasonable value for maxseg size.
1673 * If the route is known, check route for mtu.
1674 * If none, use an mss that can be handled on the outgoing
1675 * interface without forcing IP to fragment; if bigger than
1676 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1677 * to utilize large mbufs. If no route is found, route has no mtu,
1678 * or the destination isn't local, use a default, hopefully conservative
1679 * size (usually 512 or the default IP max size, but no more than the mtu
1680 * of the interface), as we can't discover anything about intervening
1681 * gateways or networks. We also initialize the congestion/slow start
1682 * window to be a single segment if the destination isn't local.
1683 * While looking at the routing entry, we also initialize other path-dependent
1684 * parameters from pre-set or cached values in the routing entry.
1685 */
1686
1687int
1688tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1689{
1690 struct socket *so = tp->t_socket;
1691 int mss;
1692
1693 DEBUG_CALL("tcp_mss");
1694 DEBUG_ARG("tp = %lx", (long)tp);
1695 DEBUG_ARG("offer = %d", offer);
1696
1697 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1698 if (offer)
1699 mss = min(mss, offer);
1700 mss = max(mss, 32);
1701 if (mss < tp->t_maxseg || offer != 0)
1702 tp->t_maxseg = mss;
1703
1704 tp->snd_cwnd = mss;
1705
1706 sbreserve(&so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1707 sbreserve(&so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1708
1709 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1710
1711 return mss;
1712}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette