VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 29603

Last change on this file since 29603 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.9 KB
Line 
1/* $Id: ip_input.c 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * NAT - IP input.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1993
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
53 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP are
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66#include "alias.h"
67
68
69/*
70 * IP initialization: fill in IP protocol switch table.
71 * All protocols not implemented in kernel go to raw IP protocol handler.
72 */
73void
74ip_init(PNATState pData)
75{
76 int i = 0;
77 for (i = 0; i < IPREASS_NHASH; ++i)
78 TAILQ_INIT(&ipq[i]);
79 maxnipq = 100; /* ??? */
80 maxfragsperpacket = 16;
81 nipq = 0;
82 ip_currid = tt.tv_sec & 0xffff;
83 udp_init(pData);
84 tcp_init(pData);
85}
86
87static struct libalias *select_alias(PNATState pData, struct mbuf* m)
88{
89 struct libalias *la = pData->proxy_alias;
90 struct udphdr *udp = NULL;
91 struct ip *pip = NULL;
92
93#ifndef VBOX_WITH_SLIRP_BSD_MBUF
94 if (m->m_la)
95 return m->m_la;
96#else
97 struct m_tag *t;
98 if ((t = m_tag_find(m, PACKET_TAG_ALIAS, NULL)) != 0)
99 return (struct libalias *)&t[1];
100#endif
101
102 return la;
103}
104
105/*
106 * Ip input routine. Checksum and byte swap header. If fragmented
107 * try to reassemble. Process options. Pass to next level.
108 */
109void
110ip_input(PNATState pData, struct mbuf *m)
111{
112 register struct ip *ip;
113 int hlen = 0;
114 int mlen = 0;
115
116 STAM_PROFILE_START(&pData->StatIP_input, a);
117
118 DEBUG_CALL("ip_input");
119 DEBUG_ARG("m = %lx", (long)m);
120 ip = mtod(m, struct ip *);
121 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
122 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
123
124 ipstat.ips_total++;
125 {
126 int rc;
127 STAM_PROFILE_START(&pData->StatALIAS_input, b);
128 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m->m_len);
129 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
130 Log2(("NAT: LibAlias return %d\n", rc));
131 if (m->m_len != RT_N2H_U16(ip->ip_len))
132 m->m_len = RT_N2H_U16(ip->ip_len);
133 }
134
135 mlen = m->m_len;
136
137 if (mlen < sizeof(struct ip))
138 {
139 ipstat.ips_toosmall++;
140 STAM_PROFILE_STOP(&pData->StatIP_input, a);
141 return;
142 }
143
144 ip = mtod(m, struct ip *);
145 if (ip->ip_v != IPVERSION)
146 {
147 ipstat.ips_badvers++;
148 goto bad;
149 }
150
151 hlen = ip->ip_hl << 2;
152 if ( hlen < sizeof(struct ip)
153 || hlen > m->m_len)
154 {
155 /* min header length */
156 ipstat.ips_badhlen++; /* or packet too short */
157 goto bad;
158 }
159
160 /* keep ip header intact for ICMP reply
161 * ip->ip_sum = cksum(m, hlen);
162 * if (ip->ip_sum) {
163 */
164 if (cksum(m, hlen))
165 {
166 ipstat.ips_badsum++;
167 goto bad;
168 }
169
170 /*
171 * Convert fields to host representation.
172 */
173 NTOHS(ip->ip_len);
174 if (ip->ip_len < hlen)
175 {
176 ipstat.ips_badlen++;
177 goto bad;
178 }
179
180 NTOHS(ip->ip_id);
181 NTOHS(ip->ip_off);
182
183 /*
184 * Check that the amount of data in the buffers
185 * is as at least much as the IP header would have us expect.
186 * Trim mbufs if longer than we expect.
187 * Drop packet if shorter than we expect.
188 */
189 if (mlen < ip->ip_len)
190 {
191 ipstat.ips_tooshort++;
192 goto bad;
193 }
194
195 /* Should drop packet if mbuf too long? hmmm... */
196 if (mlen > ip->ip_len)
197 m_adj(m, ip->ip_len - m->m_len);
198
199 /* check ip_ttl for a correct ICMP reply */
200 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
201 {
202 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
203 goto bad;
204 }
205
206 ip->ip_ttl--;
207 /*
208 * If offset or IP_MF are set, must reassemble.
209 * Otherwise, nothing need be done.
210 * (We could look in the reassembly queue to see
211 * if the packet was previously fragmented,
212 * but it's not worth the time; just let them time out.)
213 *
214 */
215 if (ip->ip_off & (IP_MF | IP_OFFMASK))
216 {
217 m = ip_reass(pData, m);
218 if (m == NULL)
219 {
220 STAM_PROFILE_STOP(&pData->StatIP_input, a);
221 return;
222 }
223 ip = mtod(m, struct ip *);
224 hlen = ip->ip_hl << 2;
225 }
226 else
227 ip->ip_len -= hlen;
228
229 /*
230 * Switch out to protocol's input routine.
231 */
232 ipstat.ips_delivered++;
233 switch (ip->ip_p)
234 {
235 case IPPROTO_TCP:
236 tcp_input(pData, m, hlen, (struct socket *)NULL);
237 break;
238 case IPPROTO_UDP:
239 udp_input(pData, m, hlen);
240 break;
241 case IPPROTO_ICMP:
242 icmp_input(pData, m, hlen);
243 break;
244 default:
245 ipstat.ips_noproto++;
246 m_freem(pData, m);
247 }
248 STAM_PROFILE_STOP(&pData->StatIP_input, a);
249 return;
250
251bad:
252 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
253 &ip->ip_dst, ip->ip_len));
254 m_freem(pData, m);
255 STAM_PROFILE_STOP(&pData->StatIP_input, a);
256 return;
257}
258
259struct mbuf *
260ip_reass(PNATState pData, struct mbuf* m)
261{
262 struct ip *ip;
263 struct mbuf *p, *q, *nq;
264 struct ipq_t *fp = NULL;
265 struct ipqhead *head;
266 int i, hlen, next;
267 u_short hash;
268
269 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
270 if ( maxnipq == 0
271 || maxfragsperpacket == 0)
272 {
273 ipstat.ips_fragments++;
274 ipstat.ips_fragdropped++;
275 m_freem(pData, m);
276 return (NULL);
277 }
278
279 ip = mtod(m, struct ip *);
280 hlen = ip->ip_hl << 2;
281
282 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
283 head = &ipq[hash];
284
285 /*
286 * Look for queue of fragments
287 * of this datagram.
288 */
289 TAILQ_FOREACH(fp, head, ipq_list)
290 if (ip->ip_id == fp->ipq_id &&
291 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
292 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
293 ip->ip_p == fp->ipq_p)
294 goto found;
295
296 fp = NULL;
297
298 /*
299 * Attempt to trim the number of allocated fragment queues if it
300 * exceeds the administrative limit.
301 */
302 if ((nipq > maxnipq) && (maxnipq > 0))
303 {
304 /*
305 * drop something from the tail of the current queue
306 * before proceeding further
307 */
308 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
309 if (pHead == NULL)
310 {
311 /* gak */
312 for (i = 0; i < IPREASS_NHASH; i++)
313 {
314 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
315 if (pTail)
316 {
317 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
318 ip_freef(pData, &ipq[i], pTail);
319 break;
320 }
321 }
322 }
323 else
324 {
325 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
326 ip_freef(pData, head, pHead);
327 }
328 }
329
330found:
331 /*
332 * Adjust ip_len to not reflect header,
333 * convert offset of this to bytes.
334 */
335 ip->ip_len -= hlen;
336 if (ip->ip_off & IP_MF)
337 {
338 /*
339 * Make sure that fragments have a data length
340 * that's a non-zero multiple of 8 bytes.
341 */
342 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
343 {
344 ipstat.ips_toosmall++; /* XXX */
345 goto dropfrag;
346 }
347 m->m_flags |= M_FRAG;
348 }
349 else
350 m->m_flags &= ~M_FRAG;
351 ip->ip_off <<= 3;
352
353
354 /*
355 * Attempt reassembly; if it succeeds, proceed.
356 * ip_reass() will return a different mbuf.
357 */
358 ipstat.ips_fragments++;
359
360 /* Previous ip_reass() started here. */
361 /*
362 * Presence of header sizes in mbufs
363 * would confuse code below.
364 */
365 m->m_data += hlen;
366 m->m_len -= hlen;
367
368 /*
369 * If first fragment to arrive, create a reassembly queue.
370 */
371 if (fp == NULL)
372 {
373 fp = RTMemAlloc(sizeof(struct ipq_t));
374 if (fp == NULL)
375 goto dropfrag;
376 TAILQ_INSERT_HEAD(head, fp, ipq_list);
377 nipq++;
378 fp->ipq_nfrags = 1;
379 fp->ipq_ttl = IPFRAGTTL;
380 fp->ipq_p = ip->ip_p;
381 fp->ipq_id = ip->ip_id;
382 fp->ipq_src = ip->ip_src;
383 fp->ipq_dst = ip->ip_dst;
384 fp->ipq_frags = m;
385 m->m_nextpkt = NULL;
386 goto done;
387 }
388 else
389 {
390 fp->ipq_nfrags++;
391 }
392
393#ifndef VBOX_WITH_SLIRP_BSD_MBUF
394#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
395#else
396#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
397#endif
398
399
400 /*
401 * Find a segment which begins after this one does.
402 */
403 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
404 if (GETIP(q)->ip_off > ip->ip_off)
405 break;
406
407 /*
408 * If there is a preceding segment, it may provide some of
409 * our data already. If so, drop the data from the incoming
410 * segment. If it provides all of our data, drop us, otherwise
411 * stick new segment in the proper place.
412 *
413 * If some of the data is dropped from the the preceding
414 * segment, then it's checksum is invalidated.
415 */
416 if (p)
417 {
418 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
419 if (i > 0)
420 {
421 if (i >= ip->ip_len)
422 goto dropfrag;
423 m_adj(m, i);
424 ip->ip_off += i;
425 ip->ip_len -= i;
426 }
427 m->m_nextpkt = p->m_nextpkt;
428 p->m_nextpkt = m;
429 }
430 else
431 {
432 m->m_nextpkt = fp->ipq_frags;
433 fp->ipq_frags = m;
434 }
435
436 /*
437 * While we overlap succeeding segments trim them or,
438 * if they are completely covered, dequeue them.
439 */
440 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
441 q = nq)
442 {
443 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
444 if (i < GETIP(q)->ip_len)
445 {
446 GETIP(q)->ip_len -= i;
447 GETIP(q)->ip_off += i;
448 m_adj(q, i);
449 break;
450 }
451 nq = q->m_nextpkt;
452 m->m_nextpkt = nq;
453 ipstat.ips_fragdropped++;
454 fp->ipq_nfrags--;
455 m_freem(pData, q);
456 }
457
458 /*
459 * Check for complete reassembly and perform frag per packet
460 * limiting.
461 *
462 * Frag limiting is performed here so that the nth frag has
463 * a chance to complete the packet before we drop the packet.
464 * As a result, n+1 frags are actually allowed per packet, but
465 * only n will ever be stored. (n = maxfragsperpacket.)
466 *
467 */
468 next = 0;
469 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
470 {
471 if (GETIP(q)->ip_off != next)
472 {
473 if (fp->ipq_nfrags > maxfragsperpacket)
474 {
475 ipstat.ips_fragdropped += fp->ipq_nfrags;
476 ip_freef(pData, head, fp);
477 }
478 goto done;
479 }
480 next += GETIP(q)->ip_len;
481 }
482 /* Make sure the last packet didn't have the IP_MF flag */
483 if (p->m_flags & M_FRAG)
484 {
485 if (fp->ipq_nfrags > maxfragsperpacket)
486 {
487 ipstat.ips_fragdropped += fp->ipq_nfrags;
488 ip_freef(pData, head, fp);
489 }
490 goto done;
491 }
492
493 /*
494 * Reassembly is complete. Make sure the packet is a sane size.
495 */
496 q = fp->ipq_frags;
497 ip = GETIP(q);
498 hlen = ip->ip_hl << 2;
499 if (next + hlen > IP_MAXPACKET)
500 {
501 ipstat.ips_fragdropped += fp->ipq_nfrags;
502 ip_freef(pData, head, fp);
503 goto done;
504 }
505
506 /*
507 * Concatenate fragments.
508 */
509 m = q;
510 nq = q->m_nextpkt;
511 q->m_nextpkt = NULL;
512 for (q = nq; q != NULL; q = nq)
513 {
514 nq = q->m_nextpkt;
515 q->m_nextpkt = NULL;
516 m_cat(pData, m, q);
517
518 m->m_len += hlen;
519 m->m_data -= hlen;
520 ip = mtod(m, struct ip *); /*update ip pointer */
521 hlen = ip->ip_hl << 2;
522 m->m_len -= hlen;
523 m->m_data += hlen;
524 }
525 m->m_len += hlen;
526 m->m_data -= hlen;
527
528 /*
529 * Create header for new ip packet by modifying header of first
530 * packet; dequeue and discard fragment reassembly header.
531 * Make header visible.
532 */
533
534 ip->ip_len = next;
535 ip->ip_src = fp->ipq_src;
536 ip->ip_dst = fp->ipq_dst;
537 TAILQ_REMOVE(head, fp, ipq_list);
538 nipq--;
539 RTMemFree(fp);
540
541 Assert((ip->ip_len == next));
542 /* some debugging cruft by sklower, below, will go away soon */
543#if 0
544 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
545 m_fixhdr(m);
546#endif
547 ipstat.ips_reassembled++;
548 return (m);
549
550dropfrag:
551 ipstat.ips_fragdropped++;
552 if (fp != NULL)
553 fp->ipq_nfrags--;
554 m_freem(pData, m);
555
556done:
557 return NULL;
558
559#undef GETIP
560}
561
562void
563ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
564{
565 struct mbuf *q;
566
567 while (fp->ipq_frags)
568 {
569 q = fp->ipq_frags;
570 fp->ipq_frags = q->m_nextpkt;
571 m_freem(pData, q);
572 }
573 TAILQ_REMOVE(fhp, fp, ipq_list);
574 RTMemFree(fp);
575 nipq--;
576}
577
578/*
579 * IP timer processing;
580 * if a timer expires on a reassembly
581 * queue, discard it.
582 */
583void
584ip_slowtimo(PNATState pData)
585{
586 register struct ipq_t *fp;
587
588 /* XXX: the fragment expiration is the same but requier
589 * additional loop see (see ip_input.c in FreeBSD tree)
590 */
591 int i;
592 DEBUG_CALL("ip_slowtimo");
593 for (i = 0; i < IPREASS_NHASH; i++)
594 {
595 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
596 {
597 struct ipq_t *fpp;
598
599 fpp = fp;
600 fp = TAILQ_NEXT(fp, ipq_list);
601 if(--fpp->ipq_ttl == 0)
602 {
603 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
604 ip_freef(pData, &ipq[i], fpp);
605 }
606 }
607 }
608 /*
609 * If we are over the maximum number of fragments
610 * (due to the limit being lowered), drain off
611 * enough to get down to the new limit.
612 */
613 if (maxnipq >= 0 && nipq > maxnipq)
614 {
615 for (i = 0; i < IPREASS_NHASH; i++)
616 {
617 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
618 {
619 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
620 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
621 }
622 }
623 }
624}
625
626
627/*
628 * Strip out IP options, at higher
629 * level protocol in the kernel.
630 * Second argument is buffer to which options
631 * will be moved, and return value is their length.
632 * (XXX) should be deleted; last arg currently ignored.
633 */
634void
635ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
636{
637 register int i;
638 struct ip *ip = mtod(m, struct ip *);
639 register caddr_t opts;
640 int olen;
641
642 olen = (ip->ip_hl<<2) - sizeof(struct ip);
643 opts = (caddr_t)(ip + 1);
644 i = m->m_len - (sizeof(struct ip) + olen);
645 memcpy(opts, opts + olen, (unsigned)i);
646 m->m_len -= olen;
647
648 ip->ip_hl = sizeof(struct ip) >> 2;
649}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette