VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 15450

Last change on this file since 15450 was 15450, checked in by vboxsync, 16 years ago

slirp: style, dead code

  • Property svn:eol-style set to native
File size: 23.4 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49/*
50 * IP initialization: fill in IP protocol switch table.
51 * All protocols not implemented in kernel go to raw IP protocol handler.
52 */
53void
54ip_init(PNATState pData)
55{
56#ifndef VBOX_WITH_BSD_REASS
57 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq);
58#else /* !VBOX_WITH_BSD_REASS */
59 int i = 0;
60 for (i = 0; i < IPREASS_NHASH; ++i)
61 TAILQ_INIT(&ipq[i]);
62 maxnipq = 100; /* ??? */
63 maxfragsperpacket = 16;
64 nipq = 0;
65#endif /* VBOX_WITH_BSD_REASS */
66 ip_currid = tt.tv_sec & 0xffff;
67 udp_init(pData);
68 tcp_init(pData);
69}
70
71/*
72 * Ip input routine. Checksum and byte swap header. If fragmented
73 * try to reassemble. Process options. Pass to next level.
74 */
75void
76ip_input(PNATState pData, struct mbuf *m)
77{
78 register struct ip *ip;
79 int hlen;
80
81 DEBUG_CALL("ip_input");
82 DEBUG_ARG("m = %lx", (long)m);
83 DEBUG_ARG("m_len = %d", m->m_len);
84
85 ipstat.ips_total++;
86
87 if (m->m_len < sizeof(struct ip))
88 {
89 ipstat.ips_toosmall++;
90 return;
91 }
92
93 ip = mtod(m, struct ip *);
94 if (ip->ip_v != IPVERSION)
95 {
96 ipstat.ips_badvers++;
97 goto bad;
98 }
99
100 hlen = ip->ip_hl << 2;
101 if ( hlen < sizeof(struct ip)
102 || hlen > m->m_len)
103 {
104 /* min header length */
105 ipstat.ips_badhlen++; /* or packet too short */
106 goto bad;
107 }
108
109 /* keep ip header intact for ICMP reply
110 * ip->ip_sum = cksum(m, hlen);
111 * if (ip->ip_sum) {
112 */
113 if(cksum(m,hlen))
114 {
115 ipstat.ips_badsum++;
116 goto bad;
117 }
118
119 /*
120 * Convert fields to host representation.
121 */
122 NTOHS(ip->ip_len);
123 if (ip->ip_len < hlen)
124 {
125 ipstat.ips_badlen++;
126 goto bad;
127 }
128 NTOHS(ip->ip_id);
129 NTOHS(ip->ip_off);
130
131 /*
132 * Check that the amount of data in the buffers
133 * is as at least much as the IP header would have us expect.
134 * Trim mbufs if longer than we expect.
135 * Drop packet if shorter than we expect.
136 */
137 if (m->m_len < ip->ip_len)
138 {
139 ipstat.ips_tooshort++;
140 goto bad;
141 }
142 /* Should drop packet if mbuf too long? hmmm... */
143 if (m->m_len > ip->ip_len)
144 m_adj(m, ip->ip_len - m->m_len);
145
146 /* check ip_ttl for a correct ICMP reply */
147 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
148 {
149 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
150 goto bad;
151 }
152
153#ifdef VBOX_WITH_SLIRP_ICMP
154 ip->ip_ttl--;
155#endif
156 /*
157 * If offset or IP_MF are set, must reassemble.
158 * Otherwise, nothing need be done.
159 * (We could look in the reassembly queue to see
160 * if the packet was previously fragmented,
161 * but it's not worth the time; just let them time out.)
162 *
163 * XXX This should fail, don't fragment yet
164 */
165#ifndef VBOX_WITH_BSD_REASS
166 if (ip->ip_off &~ IP_DF)
167 {
168 register struct ipq_t *fp;
169 /*
170 * Look for queue of fragments
171 * of this datagram.
172 */
173 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
174 fp != &ipq;
175 fp = u32_to_ptr(pData, fp->next, struct ipq_t *))
176 if ( ip->ip_id == fp->ipq_id
177 && ip->ip_src.s_addr == fp->ipq_src.s_addr
178 && ip->ip_dst.s_addr == fp->ipq_dst.s_addr
179 && ip->ip_p == fp->ipq_p)
180 goto found;
181 fp = 0;
182found:
183
184 /*
185 * Adjust ip_len to not reflect header,
186 * set ip_mff if more fragments are expected,
187 * convert offset of this to bytes.
188 */
189 ip->ip_len -= hlen;
190 if (ip->ip_off & IP_MF)
191 ((struct ipasfrag *)ip)->ipf_mff |= 1;
192 else
193 ((struct ipasfrag *)ip)->ipf_mff &= ~1;
194
195 ip->ip_off <<= 3;
196
197 /*
198 * If datagram marked as having more fragments
199 * or if this is not the first fragment,
200 * attempt reassembly; if it succeeds, proceed.
201 */
202 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off)
203 {
204 ipstat.ips_fragments++;
205 ip = ip_reass(pData, (struct ipasfrag *)ip, fp);
206 if (ip == 0)
207 return;
208 ipstat.ips_reassembled++;
209 m = dtom(pData, ip);
210 }
211 else
212 if (fp)
213 ip_freef(pData, fp);
214
215 }
216 else
217 ip->ip_len -= hlen;
218#else /* !VBOX_WITH_BSD_REASS */
219 if (ip->ip_off & (IP_MF | IP_OFFMASK))
220 {
221 m = ip_reass(pData, m);
222 if (m == NULL)
223 return;
224 ip = mtod(m, struct ip *);
225 hlen = ip->ip_len;
226 }
227 else
228 ip->ip_len -= hlen;
229#endif /* VBOX_WITH_BSD_REASS */
230
231 /*
232 * Switch out to protocol's input routine.
233 */
234 ipstat.ips_delivered++;
235 switch (ip->ip_p)
236 {
237 case IPPROTO_TCP:
238 tcp_input(pData, m, hlen, (struct socket *)NULL);
239 break;
240 case IPPROTO_UDP:
241 udp_input(pData, m, hlen);
242 break;
243 case IPPROTO_ICMP:
244 icmp_input(pData, m, hlen);
245 break;
246 default:
247 ipstat.ips_noproto++;
248 m_free(pData, m);
249 }
250 return;
251bad:
252 m_freem(pData, m);
253 return;
254}
255
256#ifndef VBOX_WITH_BSD_REASS
257/*
258 * Take incoming datagram fragment and try to
259 * reassemble it into whole datagram. If a chain for
260 * reassembly of this datagram already exists, then it
261 * is given as fp; otherwise have to make a chain.
262 */
263struct ip *
264ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp)
265{
266 register struct mbuf *m = dtom(pData, ip);
267 register struct ipasfrag *q;
268 int hlen = ip->ip_hl << 2;
269 int i, next;
270
271 DEBUG_CALL("ip_reass");
272 DEBUG_ARG("ip = %lx", (long)ip);
273 DEBUG_ARG("fp = %lx", (long)fp);
274 DEBUG_ARG("m = %lx", (long)m);
275
276 /*
277 * Presence of header sizes in mbufs
278 * would confuse code below.
279 * Fragment m_data is concatenated.
280 */
281 m->m_data += hlen;
282 m->m_len -= hlen;
283
284 /*
285 * If first fragment to arrive, create a reassembly queue.
286 */
287 if (fp == 0)
288 {
289 struct mbuf *t;
290 if ((t = m_get(pData)) == NULL) goto dropfrag;
291 fp = mtod(t, struct ipq_t *);
292 insque_32(pData, fp, &ipq);
293 fp->ipq_ttl = IPFRAGTTL;
294 fp->ipq_p = ip->ip_p;
295 fp->ipq_id = ip->ip_id;
296 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp);
297 fp->ipq_src = ((struct ip *)ip)->ip_src;
298 fp->ipq_dst = ((struct ip *)ip)->ip_dst;
299 q = (struct ipasfrag *)fp;
300 goto insert;
301 }
302
303 /*
304 * Find a segment which begins after this one does.
305 */
306 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
307 q != (struct ipasfrag *)fp;
308 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
309 if (q->ip_off > ip->ip_off)
310 break;
311
312 /*
313 * If there is a preceding segment, it may provide some of
314 * our data already. If so, drop the data from the incoming
315 * segment. If it provides all of our data, drop us.
316 */
317 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp)
318 {
319 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off +
320 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off;
321 if (i > 0)
322 {
323 if (i >= ip->ip_len)
324 goto dropfrag;
325 m_adj(dtom(pData, ip), i);
326 ip->ip_off += i;
327 ip->ip_len -= i;
328 }
329 }
330
331 /*
332 * While we overlap succeeding segments trim them or,
333 * if they are completely covered, dequeue them.
334 */
335 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off)
336 {
337 i = (ip->ip_off + ip->ip_len) - q->ip_off;
338 if (i < q->ip_len) {
339 q->ip_len -= i;
340 q->ip_off += i;
341 m_adj(dtom(pData, q), i);
342 break;
343 }
344 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
345 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)));
346 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
347 }
348
349insert:
350 /*
351 * Stick new segment in its place;
352 * check for complete reassembly.
353 */
354 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
355 next = 0;
356 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
357 q != (struct ipasfrag *)fp;
358 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
359 {
360 if (q->ip_off != next)
361 return (0);
362 next += q->ip_len;
363 }
364 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1)
365 return (0);
366
367 /*
368 * Reassembly is complete; concatenate fragments.
369 */
370 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
371 m = dtom(pData, q);
372
373 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
374 while (q != (struct ipasfrag *)fp)
375 {
376 struct mbuf *t;
377 t = dtom(pData, q);
378 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
379 m_cat(pData, m, t);
380 }
381
382 /*
383 * Create header for new ip packet by
384 * modifying header of first packet;
385 * dequeue and discard fragment reassembly header.
386 * Make header visible.
387 */
388 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
389
390 /*
391 * If the fragments concatenated to an mbuf that's
392 * bigger than the total size of the fragment, then and
393 * m_ext buffer was alloced. But fp->ipq_next points to
394 * the old buffer (in the mbuf), so we must point ip
395 * into the new buffer.
396 */
397 if (m->m_flags & M_EXT)
398 {
399 int delta;
400 delta = (char *)ip - m->m_dat;
401 ip = (struct ipasfrag *)(m->m_ext + delta);
402 }
403
404 /* DEBUG_ARG("ip = %lx", (long)ip);
405 * ip=(struct ipasfrag *)m->m_data; */
406
407 ip->ip_len = next;
408 ip->ipf_mff &= ~1;
409 ((struct ip *)ip)->ip_src = fp->ipq_src;
410 ((struct ip *)ip)->ip_dst = fp->ipq_dst;
411 remque_32(pData, fp);
412 (void) m_free(pData, dtom(pData, fp));
413 m = dtom(pData, ip);
414 m->m_len += (ip->ip_hl << 2);
415 m->m_data -= (ip->ip_hl << 2);
416
417 return ((struct ip *)ip);
418
419dropfrag:
420 ipstat.ips_fragdropped++;
421 m_freem(pData, m);
422 return (0);
423}
424
425/*
426 * Free a fragment reassembly header and all
427 * associated datagrams.
428 */
429void
430ip_freef(PNATState pData, struct ipq_t *fp)
431{
432 register struct ipasfrag *q, *p;
433
434 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
435 q != (struct ipasfrag *)fp;
436 q = p)
437 {
438 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
439 ip_deq(pData, q);
440 m_freem(pData, dtom(pData, q));
441 }
442 remque_32(pData, fp);
443 (void) m_free(pData, dtom(pData, fp));
444}
445
446#else /* VBOX_WITH_BSD_REASS */
447
448struct mbuf *
449ip_reass(PNATState pData, struct mbuf* m)
450{
451 struct ip *ip;
452 struct mbuf *p, *q, *nq, *t;
453 struct ipq_t *fp = NULL;
454 struct ipqhead *head;
455 int i, hlen, next;
456 u_short hash;
457
458 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
459 if ( maxnipq == 0
460 || maxfragsperpacket == 0)
461 {
462 ipstat.ips_fragments++;
463 ipstat.ips_fragdropped++;
464 m_freem(pData, m);
465 return (NULL);
466 }
467
468 ip = mtod(m, struct ip *);
469 hlen = ip->ip_hl << 2;
470
471 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
472 head = &ipq[hash];
473
474 /*
475 * Look for queue of fragments
476 * of this datagram.
477 */
478 TAILQ_FOREACH(fp, head, ipq_list)
479 if (ip->ip_id == fp->ipq_id &&
480 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
481 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
482 ip->ip_p == fp->ipq_p)
483 goto found;
484
485 fp = NULL;
486
487 /*
488 * Attempt to trim the number of allocated fragment queues if it
489 * exceeds the administrative limit.
490 */
491 if ((nipq > maxnipq) && (maxnipq > 0))
492 {
493 /*
494 * drop something from the tail of the current queue
495 * before proceeding further
496 */
497 struct ipq_t *q = TAILQ_LAST(head, ipqhead);
498 if (q == NULL)
499 {
500 /* gak */
501 for (i = 0; i < IPREASS_NHASH; i++)
502 {
503 struct ipq_t *r = TAILQ_LAST(&ipq[i], ipqhead);
504 if (r)
505 {
506 ipstat.ips_fragtimeout += r->ipq_nfrags;
507 ip_freef(pData, &ipq[i], r);
508 break;
509 }
510 }
511 }
512 else
513 {
514 ipstat.ips_fragtimeout += q->ipq_nfrags;
515 ip_freef(pData, head, q);
516 }
517 }
518
519found:
520 /*
521 * Adjust ip_len to not reflect header,
522 * convert offset of this to bytes.
523 */
524 ip->ip_len -= hlen;
525 if (ip->ip_off & IP_MF)
526 {
527 /*
528 * Make sure that fragments have a data length
529 * that's a non-zero multiple of 8 bytes.
530 */
531 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
532 {
533 ipstat.ips_toosmall++; /* XXX */
534 goto dropfrag;
535 }
536 m->m_flags |= M_FRAG;
537 }
538 else
539 m->m_flags &= ~M_FRAG;
540 ip->ip_off <<= 3;
541
542
543 /*
544 * Attempt reassembly; if it succeeds, proceed.
545 * ip_reass() will return a different mbuf.
546 */
547 ipstat.ips_fragments++;
548 m->m_hdr.header = ip;
549
550 /* Previous ip_reass() started here. */
551 /*
552 * Presence of header sizes in mbufs
553 * would confuse code below.
554 */
555 m->m_data += hlen;
556 m->m_len -= hlen;
557
558 /*
559 * If first fragment to arrive, create a reassembly queue.
560 */
561 if (fp == NULL)
562 {
563 fp = malloc(sizeof(struct ipq_t));
564 if (fp == NULL)
565 goto dropfrag;
566 TAILQ_INSERT_HEAD(head, fp, ipq_list);
567 nipq++;
568 fp->ipq_nfrags = 1;
569 fp->ipq_ttl = IPFRAGTTL;
570 fp->ipq_p = ip->ip_p;
571 fp->ipq_id = ip->ip_id;
572 fp->ipq_src = ip->ip_src;
573 fp->ipq_dst = ip->ip_dst;
574 fp->ipq_frags = m;
575 m->m_nextpkt = NULL;
576 goto done;
577 }
578 else
579 {
580 fp->ipq_nfrags++;
581 }
582
583#define GETIP(m) ((struct ip*)((m)->m_hdr.header))
584
585
586 /*
587 * Find a segment which begins after this one does.
588 */
589 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
590 if (GETIP(q)->ip_off > ip->ip_off)
591 break;
592
593 /*
594 * If there is a preceding segment, it may provide some of
595 * our data already. If so, drop the data from the incoming
596 * segment. If it provides all of our data, drop us, otherwise
597 * stick new segment in the proper place.
598 *
599 * If some of the data is dropped from the the preceding
600 * segment, then it's checksum is invalidated.
601 */
602 if (p)
603 {
604 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
605 if (i > 0)
606 {
607 if (i >= ip->ip_len)
608 goto dropfrag;
609 m_adj(m, i);
610 ip->ip_off += i;
611 ip->ip_len -= i;
612 }
613 m->m_nextpkt = p->m_nextpkt;
614 p->m_nextpkt = m;
615 }
616 else
617 {
618 m->m_nextpkt = fp->ipq_frags;
619 fp->ipq_frags = m;
620 }
621
622 /*
623 * While we overlap succeeding segments trim them or,
624 * if they are completely covered, dequeue them.
625 */
626 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
627 q = nq)
628 {
629 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
630 if (i < GETIP(q)->ip_len)
631 {
632 GETIP(q)->ip_len -= i;
633 GETIP(q)->ip_off += i;
634 m_adj(q, i);
635 break;
636 }
637 nq = q->m_nextpkt;
638 m->m_nextpkt = nq;
639 ipstat.ips_fragdropped++;
640 fp->ipq_nfrags--;
641 m_freem(pData, q);
642 }
643
644 /*
645 * Check for complete reassembly and perform frag per packet
646 * limiting.
647 *
648 * Frag limiting is performed here so that the nth frag has
649 * a chance to complete the packet before we drop the packet.
650 * As a result, n+1 frags are actually allowed per packet, but
651 * only n will ever be stored. (n = maxfragsperpacket.)
652 *
653 */
654 next = 0;
655 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
656 {
657 if (GETIP(q)->ip_off != next)
658 {
659 if (fp->ipq_nfrags > maxfragsperpacket)
660 {
661 ipstat.ips_fragdropped += fp->ipq_nfrags;
662 ip_freef(pData, head, fp);
663 }
664 goto done;
665 }
666 next += GETIP(q)->ip_len;
667 }
668 /* Make sure the last packet didn't have the IP_MF flag */
669 if (p->m_flags & M_FRAG)
670 {
671 if (fp->ipq_nfrags > maxfragsperpacket)
672 {
673 ipstat.ips_fragdropped += fp->ipq_nfrags;
674 ip_freef(pData, head, fp);
675 }
676 goto done;
677 }
678
679 /*
680 * Reassembly is complete. Make sure the packet is a sane size.
681 */
682 q = fp->ipq_frags;
683 ip = GETIP(q);
684 if (next + (ip->ip_hl << 2) > IP_MAXPACKET)
685 {
686 ipstat.ips_fragdropped += fp->ipq_nfrags;
687 ip_freef(pData, head, fp);
688 goto done;
689 }
690
691 /*
692 * Concatenate fragments.
693 */
694 m = q;
695#if 0
696 t = m->m_next;
697 m->m_next = NULL;
698 m_cat(pData, m, t);
699#endif
700 nq = q->m_nextpkt;
701 q->m_nextpkt = NULL;
702 for (q = nq; q != NULL; q = nq)
703 {
704 nq = q->m_nextpkt;
705 q->m_nextpkt = NULL;
706 m_cat(pData, m, q);
707 }
708
709 /*
710 * Create header for new ip packet by modifying header of first
711 * packet; dequeue and discard fragment reassembly header.
712 * Make header visible.
713 */
714#if 0
715 ip->ip_len = (ip->ip_hl << 2) + next;
716#else
717 ip->ip_len = next;
718#endif
719 ip->ip_src = fp->ipq_src;
720 ip->ip_dst = fp->ipq_dst;
721 TAILQ_REMOVE(head, fp, ipq_list);
722 nipq--;
723 free(fp);
724
725 m->m_len += (ip->ip_hl << 2);
726 m->m_data -= (ip->ip_hl << 2);
727 /* some debugging cruft by sklower, below, will go away soon */
728#if 0
729 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
730 m_fixhdr(m);
731#endif
732 ipstat.ips_reassembled++;
733 return (m);
734
735dropfrag:
736 ipstat.ips_fragdropped++;
737 if (fp != NULL)
738 fp->ipq_nfrags--;
739 m_freem(pData, m);
740
741done:
742 return NULL;
743
744#undef GETIP
745}
746
747void
748ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
749{
750 struct mbuf *q;
751
752 while (fp->ipq_frags)
753 {
754 q = fp->ipq_frags;
755 fp->ipq_frags = q->m_nextpkt;
756 m_freem(pData, q);
757 }
758 TAILQ_REMOVE(fhp, fp, ipq_list);
759 free(fp);
760 nipq--;
761}
762#endif /* VBOX_WITH_BSD_REASS */
763
764#ifndef VBOX_WITH_BSD_REASS
765/*
766 * Put an ip fragment on a reassembly chain.
767 * Like insque, but pointers in middle of structure.
768 */
769void
770ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev)
771{
772 DEBUG_CALL("ip_enq");
773 DEBUG_ARG("prev = %lx", (long)prev);
774 p->ipf_prev = ptr_to_u32(pData, prev);
775 p->ipf_next = prev->ipf_next;
776 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p);
777 prev->ipf_next = ptr_to_u32(pData, p);
778}
779
780/*
781 * To ip_enq as remque is to insque.
782 */
783void
784ip_deq(PNATState pData, register struct ipasfrag *p)
785{
786 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *);
787 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *);
788 u32ptr_done(pData, prev->ipf_next, p);
789 prev->ipf_next = p->ipf_next;
790 next->ipf_prev = p->ipf_prev;
791}
792#endif /* !VBOX_WITH_BSD_REASS */
793
794/*
795 * IP timer processing;
796 * if a timer expires on a reassembly
797 * queue, discard it.
798 */
799void
800ip_slowtimo(PNATState pData)
801{
802 register struct ipq_t *fp;
803
804#ifndef VBOX_WITH_BSD_REASS
805 DEBUG_CALL("ip_slowtimo");
806
807 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
808 if (fp == 0)
809 return;
810
811 while (fp != &ipq)
812 {
813 --fp->ipq_ttl;
814 fp = u32_to_ptr(pData, fp->next, struct ipq_t *);
815 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0)
816 {
817 ipstat.ips_fragtimeout++;
818 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *));
819 }
820 }
821#else /* VBOX_WITH_BSD_REASS */
822 /* XXX: the fragment expiration is the same but requier
823 * additional loop see (see ip_input.c in FreeBSD tree)
824 */
825 int i;
826 DEBUG_CALL("ip_slowtimo");
827 for (i = 0; i < IPREASS_NHASH; i++)
828 {
829 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
830 {
831 struct ipq_t *fpp;
832
833 fpp = fp;
834 fp = TAILQ_NEXT(fp, ipq_list);
835 if(--fpp->ipq_ttl == 0) {
836 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
837 ip_freef(pData, &ipq[i], fpp);
838 }
839 }
840 }
841 /*
842 * If we are over the maximum number of fragments
843 * (due to the limit being lowered), drain off
844 * enough to get down to the new limit.
845 */
846 if (maxnipq >= 0 && nipq > maxnipq)
847 {
848 for (i = 0; i < IPREASS_NHASH; i++)
849 {
850 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
851 {
852 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
853 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
854 }
855 }
856 }
857#endif /* VBOX_WITH_BSD_REASS */
858}
859
860
861/*
862 * Strip out IP options, at higher
863 * level protocol in the kernel.
864 * Second argument is buffer to which options
865 * will be moved, and return value is their length.
866 * (XXX) should be deleted; last arg currently ignored.
867 */
868void
869ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
870{
871 register int i;
872 struct ip *ip = mtod(m, struct ip *);
873 register caddr_t opts;
874 int olen;
875
876 olen = (ip->ip_hl<<2) - sizeof(struct ip);
877 opts = (caddr_t)(ip + 1);
878 i = m->m_len - (sizeof(struct ip) + olen);
879 memcpy(opts, opts + olen, (unsigned)i);
880 m->m_len -= olen;
881
882 ip->ip_hl = sizeof(struct ip) >> 2;
883}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette