VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 52245

Last change on this file since 52245 was 52245, checked in by vboxsync, 10 years ago

NAT: Drop packets to multicast destinations, the rest of the code is
not prepared to handle multicast meaningfully and doesn't do IGMP.
While here, drop packets to reserved class e addresses too (except to
255.255.255.255 limited broadcast) and drop packets with non-unicast
source addresses.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.4 KB
Line 
1/* $Id: ip_input.c 52245 2014-07-31 12:06:03Z vboxsync $ */
2/** @file
3 * NAT - IP input.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1993
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
53 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP are
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66#include "alias.h"
67
68
69/*
70 * IP initialization: fill in IP protocol switch table.
71 * All protocols not implemented in kernel go to raw IP protocol handler.
72 */
73void
74ip_init(PNATState pData)
75{
76 int i = 0;
77 for (i = 0; i < IPREASS_NHASH; ++i)
78 TAILQ_INIT(&ipq[i]);
79 maxnipq = 100; /* ??? */
80 maxfragsperpacket = 16;
81 nipq = 0;
82 ip_currid = tt.tv_sec & 0xffff;
83 udp_init(pData);
84 tcp_init(pData);
85}
86
87static struct libalias *select_alias(PNATState pData, struct mbuf* m)
88{
89 struct libalias *la = pData->proxy_alias;
90
91 struct m_tag *t;
92 if ((t = m_tag_find(m, PACKET_TAG_ALIAS, NULL)) != 0)
93 return (struct libalias *)&t[1];
94
95 return la;
96}
97
98/*
99 * Ip input routine. Checksum and byte swap header. If fragmented
100 * try to reassemble. Process options. Pass to next level.
101 */
102void
103ip_input(PNATState pData, struct mbuf *m)
104{
105 register struct ip *ip;
106 int hlen = 0;
107 int mlen = 0;
108
109 STAM_PROFILE_START(&pData->StatIP_input, a);
110
111 LogFlowFunc(("ENTER: m = %lx\n", (long)m));
112 ip = mtod(m, struct ip *);
113 Log2(("ip_dst=%RTnaipv4(len:%d) m_len = %d\n", ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
114
115 ipstat.ips_total++;
116 {
117 int rc;
118 if (!(m->m_flags & M_SKIP_FIREWALL))
119 {
120 STAM_PROFILE_START(&pData->StatALIAS_input, b);
121 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m_length(m, NULL));
122 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
123 Log2(("NAT: LibAlias return %d\n", rc));
124 }
125 else
126 m->m_flags &= ~M_SKIP_FIREWALL;
127 if (m->m_len != RT_N2H_U16(ip->ip_len))
128 m->m_len = RT_N2H_U16(ip->ip_len);
129 }
130
131 mlen = m->m_len;
132
133 if (mlen < sizeof(struct ip))
134 {
135 ipstat.ips_toosmall++;
136 goto bad_free_m;
137 }
138
139 ip = mtod(m, struct ip *);
140 if (ip->ip_v != IPVERSION)
141 {
142 ipstat.ips_badvers++;
143 goto bad_free_m;
144 }
145
146 hlen = ip->ip_hl << 2;
147 if ( hlen < sizeof(struct ip)
148 || hlen > m->m_len)
149 {
150 /* min header length */
151 ipstat.ips_badhlen++; /* or packet too short */
152 goto bad_free_m;
153 }
154
155 /* keep ip header intact for ICMP reply
156 * ip->ip_sum = cksum(m, hlen);
157 * if (ip->ip_sum) {
158 */
159 if (cksum(m, hlen))
160 {
161 ipstat.ips_badsum++;
162 goto bad_free_m;
163 }
164
165 /*
166 * Convert fields to host representation.
167 */
168 NTOHS(ip->ip_len);
169 if (ip->ip_len < hlen)
170 {
171 ipstat.ips_badlen++;
172 goto bad_free_m;
173 }
174
175 NTOHS(ip->ip_id);
176 NTOHS(ip->ip_off);
177
178 /*
179 * Check that the amount of data in the buffers
180 * is as at least much as the IP header would have us expect.
181 * Trim mbufs if longer than we expect.
182 * Drop packet if shorter than we expect.
183 */
184 if (mlen < ip->ip_len)
185 {
186 ipstat.ips_tooshort++;
187 goto bad_free_m;
188 }
189
190 /* Should drop packet if mbuf too long? hmmm... */
191 if (mlen > ip->ip_len)
192 m_adj(m, ip->ip_len - m->m_len);
193
194 /* source must be unicast */
195 if ((ip->ip_src.s_addr & RT_N2H_U32_C(0xe0000000)) == RT_N2H_U32_C(0xe0000000))
196 goto free_m;
197
198 /* check ip_ttl for a correct ICMP reply */
199 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
200 {
201 /* XXX: if we're in destination so perhaps we need to send ICMP_TIMXCEED_REASS */
202 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
203 goto no_free_m;
204 }
205
206 ip->ip_ttl--;
207
208 /*
209 * Drop multicast (class d) and reserved (class e) here. The rest
210 * of the code is not yet prepared to deal with it. IGMP is not
211 * implemented either.
212 */
213 if ( (ip->ip_dst.s_addr & RT_N2H_U32_C(0xe0000000)) == RT_N2H_U32_C(0xe0000000)
214 && ip->ip_dst.s_addr != 0xffffffff)
215 {
216 goto free_m;
217 }
218
219 /*
220 * If offset or IP_MF are set, must reassemble.
221 * Otherwise, nothing need be done.
222 * (We could look in the reassembly queue to see
223 * if the packet was previously fragmented,
224 * but it's not worth the time; just let them time out.)
225 *
226 */
227 if (ip->ip_off & (IP_MF | IP_OFFMASK))
228 {
229 m = ip_reass(pData, m);
230 if (m == NULL)
231 goto no_free_m;
232 ip = mtod(m, struct ip *);
233 hlen = ip->ip_hl << 2;
234 }
235 else
236 ip->ip_len -= hlen;
237
238 /*
239 * Switch out to protocol's input routine.
240 */
241 ipstat.ips_delivered++;
242 switch (ip->ip_p)
243 {
244 case IPPROTO_TCP:
245 tcp_input(pData, m, hlen, (struct socket *)NULL);
246 break;
247 case IPPROTO_UDP:
248 udp_input(pData, m, hlen);
249 break;
250 case IPPROTO_ICMP:
251 icmp_input(pData, m, hlen);
252 break;
253 default:
254 ipstat.ips_noproto++;
255 m_freem(pData, m);
256 }
257 goto no_free_m;
258
259bad_free_m:
260 Log2(("NAT: IP datagram to %RTnaipv4 with size(%d) claimed as bad\n",
261 ip->ip_dst, ip->ip_len));
262free_m:
263 m_freem(pData, m);
264no_free_m:
265 STAM_PROFILE_STOP(&pData->StatIP_input, a);
266 LogFlowFuncLeave();
267 return;
268}
269
270struct mbuf *
271ip_reass(PNATState pData, struct mbuf* m)
272{
273 struct ip *ip;
274 struct mbuf *p, *q, *nq;
275 struct ipq_t *fp = NULL;
276 struct ipqhead *head;
277 int i, hlen, next;
278 u_short hash;
279
280 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
281 LogFlowFunc(("ENTER: m:%p\n", m));
282 if ( maxnipq == 0
283 || maxfragsperpacket == 0)
284 {
285 ipstat.ips_fragments++;
286 ipstat.ips_fragdropped++;
287 m_freem(pData, m);
288 LogFlowFunc(("LEAVE: NULL\n"));
289 return (NULL);
290 }
291
292 ip = mtod(m, struct ip *);
293 hlen = ip->ip_hl << 2;
294
295 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
296 head = &ipq[hash];
297
298 /*
299 * Look for queue of fragments
300 * of this datagram.
301 */
302 TAILQ_FOREACH(fp, head, ipq_list)
303 if (ip->ip_id == fp->ipq_id &&
304 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
305 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
306 ip->ip_p == fp->ipq_p)
307 goto found;
308
309 fp = NULL;
310
311 /*
312 * Attempt to trim the number of allocated fragment queues if it
313 * exceeds the administrative limit.
314 */
315 if ((nipq > maxnipq) && (maxnipq > 0))
316 {
317 /*
318 * drop something from the tail of the current queue
319 * before proceeding further
320 */
321 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
322 if (pHead == NULL)
323 {
324 /* gak */
325 for (i = 0; i < IPREASS_NHASH; i++)
326 {
327 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
328 if (pTail)
329 {
330 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
331 ip_freef(pData, &ipq[i], pTail);
332 break;
333 }
334 }
335 }
336 else
337 {
338 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
339 ip_freef(pData, head, pHead);
340 }
341 }
342
343found:
344 /*
345 * Adjust ip_len to not reflect header,
346 * convert offset of this to bytes.
347 */
348 ip->ip_len -= hlen;
349 if (ip->ip_off & IP_MF)
350 {
351 /*
352 * Make sure that fragments have a data length
353 * that's a non-zero multiple of 8 bytes.
354 */
355 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
356 {
357 ipstat.ips_toosmall++; /* XXX */
358 goto dropfrag;
359 }
360 m->m_flags |= M_FRAG;
361 }
362 else
363 m->m_flags &= ~M_FRAG;
364 ip->ip_off <<= 3;
365
366
367 /*
368 * Attempt reassembly; if it succeeds, proceed.
369 * ip_reass() will return a different mbuf.
370 */
371 ipstat.ips_fragments++;
372
373 /* Previous ip_reass() started here. */
374 /*
375 * Presence of header sizes in mbufs
376 * would confuse code below.
377 */
378 m->m_data += hlen;
379 m->m_len -= hlen;
380
381 /*
382 * If first fragment to arrive, create a reassembly queue.
383 */
384 if (fp == NULL)
385 {
386 fp = RTMemAlloc(sizeof(struct ipq_t));
387 if (fp == NULL)
388 goto dropfrag;
389 TAILQ_INSERT_HEAD(head, fp, ipq_list);
390 nipq++;
391 fp->ipq_nfrags = 1;
392 fp->ipq_ttl = IPFRAGTTL;
393 fp->ipq_p = ip->ip_p;
394 fp->ipq_id = ip->ip_id;
395 fp->ipq_src = ip->ip_src;
396 fp->ipq_dst = ip->ip_dst;
397 fp->ipq_frags = m;
398 m->m_nextpkt = NULL;
399 goto done;
400 }
401 else
402 {
403 fp->ipq_nfrags++;
404 }
405
406#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
407
408 /*
409 * Find a segment which begins after this one does.
410 */
411 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
412 if (GETIP(q)->ip_off > ip->ip_off)
413 break;
414
415 /*
416 * If there is a preceding segment, it may provide some of
417 * our data already. If so, drop the data from the incoming
418 * segment. If it provides all of our data, drop us, otherwise
419 * stick new segment in the proper place.
420 *
421 * If some of the data is dropped from the preceding
422 * segment, then it's checksum is invalidated.
423 */
424 if (p)
425 {
426 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
427 if (i > 0)
428 {
429 if (i >= ip->ip_len)
430 goto dropfrag;
431 m_adj(m, i);
432 ip->ip_off += i;
433 ip->ip_len -= i;
434 }
435 m->m_nextpkt = p->m_nextpkt;
436 p->m_nextpkt = m;
437 }
438 else
439 {
440 m->m_nextpkt = fp->ipq_frags;
441 fp->ipq_frags = m;
442 }
443
444 /*
445 * While we overlap succeeding segments trim them or,
446 * if they are completely covered, dequeue them.
447 */
448 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
449 q = nq)
450 {
451 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
452 if (i < GETIP(q)->ip_len)
453 {
454 GETIP(q)->ip_len -= i;
455 GETIP(q)->ip_off += i;
456 m_adj(q, i);
457 break;
458 }
459 nq = q->m_nextpkt;
460 m->m_nextpkt = nq;
461 ipstat.ips_fragdropped++;
462 fp->ipq_nfrags--;
463 m_freem(pData, q);
464 }
465
466 /*
467 * Check for complete reassembly and perform frag per packet
468 * limiting.
469 *
470 * Frag limiting is performed here so that the nth frag has
471 * a chance to complete the packet before we drop the packet.
472 * As a result, n+1 frags are actually allowed per packet, but
473 * only n will ever be stored. (n = maxfragsperpacket.)
474 *
475 */
476 next = 0;
477 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
478 {
479 if (GETIP(q)->ip_off != next)
480 {
481 if (fp->ipq_nfrags > maxfragsperpacket)
482 {
483 ipstat.ips_fragdropped += fp->ipq_nfrags;
484 ip_freef(pData, head, fp);
485 }
486 goto done;
487 }
488 next += GETIP(q)->ip_len;
489 }
490 /* Make sure the last packet didn't have the IP_MF flag */
491 if (p->m_flags & M_FRAG)
492 {
493 if (fp->ipq_nfrags > maxfragsperpacket)
494 {
495 ipstat.ips_fragdropped += fp->ipq_nfrags;
496 ip_freef(pData, head, fp);
497 }
498 goto done;
499 }
500
501 /*
502 * Reassembly is complete. Make sure the packet is a sane size.
503 */
504 q = fp->ipq_frags;
505 ip = GETIP(q);
506 hlen = ip->ip_hl << 2;
507 if (next + hlen > IP_MAXPACKET)
508 {
509 ipstat.ips_fragdropped += fp->ipq_nfrags;
510 ip_freef(pData, head, fp);
511 goto done;
512 }
513
514 /*
515 * Concatenate fragments.
516 */
517 m = q;
518 nq = q->m_nextpkt;
519 q->m_nextpkt = NULL;
520 for (q = nq; q != NULL; q = nq)
521 {
522 nq = q->m_nextpkt;
523 q->m_nextpkt = NULL;
524 m_cat(pData, m, q);
525
526 m->m_len += hlen;
527 m->m_data -= hlen;
528 ip = mtod(m, struct ip *); /*update ip pointer */
529 hlen = ip->ip_hl << 2;
530 m->m_len -= hlen;
531 m->m_data += hlen;
532 }
533 m->m_len += hlen;
534 m->m_data -= hlen;
535
536 /*
537 * Create header for new ip packet by modifying header of first
538 * packet; dequeue and discard fragment reassembly header.
539 * Make header visible.
540 */
541
542 ip->ip_len = next;
543 ip->ip_src = fp->ipq_src;
544 ip->ip_dst = fp->ipq_dst;
545 TAILQ_REMOVE(head, fp, ipq_list);
546 nipq--;
547 RTMemFree(fp);
548
549 Assert((ip->ip_len == next));
550 /* some debugging cruft by sklower, below, will go away soon */
551#if 0
552 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
553 m_fixhdr(m);
554#endif
555 ipstat.ips_reassembled++;
556 LogFlowFunc(("LEAVE: %p\n", m));
557 return (m);
558
559dropfrag:
560 ipstat.ips_fragdropped++;
561 if (fp != NULL)
562 fp->ipq_nfrags--;
563 m_freem(pData, m);
564
565done:
566 LogFlowFunc(("LEAVE: NULL\n"));
567 return NULL;
568
569#undef GETIP
570}
571
572void
573ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
574{
575 struct mbuf *q;
576
577 while (fp->ipq_frags)
578 {
579 q = fp->ipq_frags;
580 fp->ipq_frags = q->m_nextpkt;
581 m_freem(pData, q);
582 }
583 TAILQ_REMOVE(fhp, fp, ipq_list);
584 RTMemFree(fp);
585 nipq--;
586}
587
588/*
589 * IP timer processing;
590 * if a timer expires on a reassembly
591 * queue, discard it.
592 */
593void
594ip_slowtimo(PNATState pData)
595{
596 register struct ipq_t *fp;
597
598 /* XXX: the fragment expiration is the same but requier
599 * additional loop see (see ip_input.c in FreeBSD tree)
600 */
601 int i;
602 LogFlow(("ip_slowtimo:\n"));
603 for (i = 0; i < IPREASS_NHASH; i++)
604 {
605 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
606 {
607 struct ipq_t *fpp;
608
609 fpp = fp;
610 fp = TAILQ_NEXT(fp, ipq_list);
611 if(--fpp->ipq_ttl == 0)
612 {
613 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
614 ip_freef(pData, &ipq[i], fpp);
615 }
616 }
617 }
618 /*
619 * If we are over the maximum number of fragments
620 * (due to the limit being lowered), drain off
621 * enough to get down to the new limit.
622 */
623 if (maxnipq >= 0 && nipq > maxnipq)
624 {
625 for (i = 0; i < IPREASS_NHASH; i++)
626 {
627 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
628 {
629 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
630 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
631 }
632 }
633 }
634}
635
636
637/*
638 * Strip out IP options, at higher
639 * level protocol in the kernel.
640 * Second argument is buffer to which options
641 * will be moved, and return value is their length.
642 * (XXX) should be deleted; last arg currently ignored.
643 */
644void
645ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
646{
647 register int i;
648 struct ip *ip = mtod(m, struct ip *);
649 register caddr_t opts;
650 int olen;
651 NOREF(mopt); /* @todo: do we really will need this options buffer? */
652
653 olen = (ip->ip_hl<<2) - sizeof(struct ip);
654 opts = (caddr_t)(ip + 1);
655 i = m->m_len - (sizeof(struct ip) + olen);
656 memcpy(opts, opts + olen, (unsigned)i);
657 m->m_len -= olen;
658
659 ip->ip_hl = sizeof(struct ip) >> 2;
660}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette