VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 95573

Last change on this file since 95573 was 95573, checked in by vboxsync, 2 years ago

Network/slirp: Advertising clause for Danny Gasparovsky was unintentional, should have always been 3-clause BSD. Replace 4-clause BSD license by 3-clause, see retroactive license change by UC Berkeley https://www.freebsd.org/copyright/license/

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.8 KB
Line 
1/* $Id: ip_input.c 95573 2022-07-08 18:16:35Z vboxsync $ */
2/** @file
3 * NAT - IP input.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1993
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. Neither the name of the University nor the names of its contributors
33 * may be used to endorse or promote products derived from this software
34 * without specific prior written permission.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
49 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
50 */
51
52/*
53 * Changes and additions relating to SLiRP are
54 * Copyright (c) 1995 Danny Gasparovski.
55 *
56 * Please read the file COPYRIGHT for the
57 * terms and conditions of the copyright.
58 */
59
60#include <slirp.h>
61#include "ip_icmp.h"
62#include "alias.h"
63
64
65/*
66 * IP initialization: fill in IP protocol switch table.
67 * All protocols not implemented in kernel go to raw IP protocol handler.
68 */
69void
70ip_init(PNATState pData)
71{
72 int i = 0;
73 for (i = 0; i < IPREASS_NHASH; ++i)
74 TAILQ_INIT(&ipq[i]);
75 maxnipq = 100; /* ??? */
76 maxfragsperpacket = 16;
77 nipq = 0;
78 ip_currid = tt.tv_sec & 0xffff;
79 udp_init(pData);
80 tcp_init(pData);
81}
82
83/*
84 * Ip input routine. Checksum and byte swap header. If fragmented
85 * try to reassemble. Process options. Pass to next level.
86 */
87void
88ip_input(PNATState pData, struct mbuf *m)
89{
90 register struct ip *ip;
91 int hlen = 0;
92 int mlen = 0;
93 int iplen = 0;
94
95 STAM_PROFILE_START(&pData->StatIP_input, a);
96
97 LogFlowFunc(("ENTER: m = %p\n", m));
98 ip = mtod(m, struct ip *);
99 Log2(("ip_dst=%RTnaipv4(len:%d) m_len = %d\n", ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
100
101 ipstat.ips_total++;
102
103 mlen = m->m_len;
104
105 if (mlen < sizeof(struct ip))
106 {
107 ipstat.ips_toosmall++;
108 goto bad_free_m;
109 }
110
111 ip = mtod(m, struct ip *);
112 if (ip->ip_v != IPVERSION)
113 {
114 ipstat.ips_badvers++;
115 goto bad_free_m;
116 }
117
118 hlen = ip->ip_hl << 2;
119 if ( hlen < sizeof(struct ip)
120 || hlen > mlen)
121 {
122 /* min header length */
123 ipstat.ips_badhlen++; /* or packet too short */
124 goto bad_free_m;
125 }
126
127 /* keep ip header intact for ICMP reply
128 * ip->ip_sum = cksum(m, hlen);
129 * if (ip->ip_sum) {
130 */
131 if (cksum(m, hlen))
132 {
133 ipstat.ips_badsum++;
134 goto bad_free_m;
135 }
136
137 iplen = RT_N2H_U16(ip->ip_len);
138 if (iplen < hlen)
139 {
140 ipstat.ips_badlen++;
141 goto bad_free_m;
142 }
143
144 /*
145 * Check that the amount of data in the buffers
146 * is as at least much as the IP header would have us expect.
147 * Trim mbufs if longer than we expect.
148 * Drop packet if shorter than we expect.
149 */
150 if (mlen < iplen)
151 {
152 ipstat.ips_tooshort++;
153 goto bad_free_m;
154 }
155
156 /* Should drop packet if mbuf too long? hmmm... */
157 if (mlen > iplen)
158 {
159 m_adj(m, iplen - mlen);
160 mlen = m->m_len;
161 }
162
163 /* source must be unicast */
164 if ((ip->ip_src.s_addr & RT_N2H_U32_C(0xe0000000)) == RT_N2H_U32_C(0xe0000000))
165 goto free_m;
166
167 /*
168 * Drop multicast (class d) and reserved (class e) here. The rest
169 * of the code is not yet prepared to deal with it. IGMP is not
170 * implemented either.
171 */
172 if ( (ip->ip_dst.s_addr & RT_N2H_U32_C(0xe0000000)) == RT_N2H_U32_C(0xe0000000)
173 && ip->ip_dst.s_addr != 0xffffffff)
174 {
175 goto free_m;
176 }
177
178
179 /* do we need to "forward" this packet? */
180 if (!CTL_CHECK_MINE(ip->ip_dst.s_addr))
181 {
182 if (ip->ip_ttl <= 1)
183 {
184 /* icmp_error expects these in host order */
185 NTOHS(ip->ip_len);
186 NTOHS(ip->ip_id);
187 NTOHS(ip->ip_off);
188
189 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
190 goto no_free_m;
191 }
192
193 /* ignore packets to other nodes from our private network */
194 if ( CTL_CHECK_NETWORK(ip->ip_dst.s_addr)
195 && !CTL_CHECK_BROADCAST(ip->ip_dst.s_addr))
196 {
197 /* XXX: send ICMP_REDIRECT_HOST to be pedantic? */
198 goto free_m;
199 }
200
201 ip->ip_ttl--;
202 if (ip->ip_sum > RT_H2N_U16_C(0xffffU - (1 << 8)))
203 ip->ip_sum += RT_H2N_U16_C(1 << 8) + 1;
204 else
205 ip->ip_sum += RT_H2N_U16_C(1 << 8);
206 }
207
208 /* run it through libalias */
209 {
210 int rc;
211 if (!(m->m_flags & M_SKIP_FIREWALL))
212 {
213 STAM_PROFILE_START(&pData->StatALIAS_input, b);
214 rc = LibAliasIn(pData->proxy_alias, mtod(m, char *), mlen);
215 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
216 Log2(("NAT: LibAlias return %d\n", rc));
217 }
218 else
219 m->m_flags &= ~M_SKIP_FIREWALL;
220
221#if 0 /* disabled: no module we use does it in this direction */
222 /*
223 * XXX: spooky action at a distance - libalias may modify the
224 * packet and will update ip_len to reflect the new length.
225 */
226 if (iplen != RT_N2H_U16(ip->ip_len))
227 {
228 iplen = RT_N2H_U16(ip->ip_len);
229 m->m_len = iplen;
230 mlen = m->m_len;
231 }
232#endif
233 }
234
235 /*
236 * Convert fields to host representation.
237 */
238 NTOHS(ip->ip_len);
239 NTOHS(ip->ip_id);
240 NTOHS(ip->ip_off);
241
242 /*
243 * If offset or IP_MF are set, must reassemble.
244 * Otherwise, nothing need be done.
245 * (We could look in the reassembly queue to see
246 * if the packet was previously fragmented,
247 * but it's not worth the time; just let them time out.)
248 *
249 */
250 if (ip->ip_off & (IP_MF | IP_OFFMASK))
251 {
252 m = ip_reass(pData, m);
253 if (m == NULL)
254 goto no_free_m;
255 ip = mtod(m, struct ip *);
256 hlen = ip->ip_hl << 2;
257 }
258 else
259 ip->ip_len -= hlen;
260
261 /*
262 * Switch out to protocol's input routine.
263 */
264 ipstat.ips_delivered++;
265 switch (ip->ip_p)
266 {
267 case IPPROTO_TCP:
268 tcp_input(pData, m, hlen, (struct socket *)NULL);
269 break;
270 case IPPROTO_UDP:
271 udp_input(pData, m, hlen);
272 break;
273 case IPPROTO_ICMP:
274 icmp_input(pData, m, hlen);
275 break;
276 default:
277 ipstat.ips_noproto++;
278 m_freem(pData, m);
279 }
280 goto no_free_m;
281
282bad_free_m:
283 Log2(("NAT: IP datagram to %RTnaipv4 with size(%d) claimed as bad\n",
284 ip->ip_dst, ip->ip_len));
285free_m:
286 m_freem(pData, m);
287no_free_m:
288 STAM_PROFILE_STOP(&pData->StatIP_input, a);
289 LogFlowFuncLeave();
290 return;
291}
292
293struct mbuf *
294ip_reass(PNATState pData, struct mbuf* m)
295{
296 struct ip *ip;
297 struct mbuf *p, *q, *nq;
298 struct ipq_t *fp = NULL;
299 struct ipqhead *head;
300 int i, hlen, next;
301 u_short hash;
302
303 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
304 LogFlowFunc(("ENTER: m:%p\n", m));
305 if ( maxnipq == 0
306 || maxfragsperpacket == 0)
307 {
308 ipstat.ips_fragments++;
309 ipstat.ips_fragdropped++;
310 m_freem(pData, m);
311 LogFlowFunc(("LEAVE: NULL\n"));
312 return (NULL);
313 }
314
315 ip = mtod(m, struct ip *);
316 hlen = ip->ip_hl << 2;
317
318 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
319 head = &ipq[hash];
320
321 /*
322 * Look for queue of fragments
323 * of this datagram.
324 */
325 TAILQ_FOREACH(fp, head, ipq_list)
326 if (ip->ip_id == fp->ipq_id &&
327 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
328 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
329 ip->ip_p == fp->ipq_p)
330 goto found;
331
332 fp = NULL;
333
334 /*
335 * Attempt to trim the number of allocated fragment queues if it
336 * exceeds the administrative limit.
337 */
338 if ((nipq > maxnipq) && (maxnipq > 0))
339 {
340 /*
341 * drop something from the tail of the current queue
342 * before proceeding further
343 */
344 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
345 if (pHead == NULL)
346 {
347 /* gak */
348 for (i = 0; i < IPREASS_NHASH; i++)
349 {
350 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
351 if (pTail)
352 {
353 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
354 ip_freef(pData, &ipq[i], pTail);
355 break;
356 }
357 }
358 }
359 else
360 {
361 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
362 ip_freef(pData, head, pHead);
363 }
364 }
365
366found:
367 /*
368 * Adjust ip_len to not reflect header,
369 * convert offset of this to bytes.
370 */
371 ip->ip_len -= hlen;
372 if (ip->ip_off & IP_MF)
373 {
374 /*
375 * Make sure that fragments have a data length
376 * that's a non-zero multiple of 8 bytes.
377 */
378 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
379 {
380 ipstat.ips_toosmall++; /* XXX */
381 goto dropfrag;
382 }
383 m->m_flags |= M_FRAG;
384 }
385 else
386 m->m_flags &= ~M_FRAG;
387 ip->ip_off <<= 3;
388
389
390 /*
391 * Attempt reassembly; if it succeeds, proceed.
392 * ip_reass() will return a different mbuf.
393 */
394 ipstat.ips_fragments++;
395
396 /* Previous ip_reass() started here. */
397 /*
398 * Presence of header sizes in mbufs
399 * would confuse code below.
400 */
401 m->m_data += hlen;
402 m->m_len -= hlen;
403
404 /*
405 * If first fragment to arrive, create a reassembly queue.
406 */
407 if (fp == NULL)
408 {
409 fp = RTMemAlloc(sizeof(struct ipq_t));
410 if (fp == NULL)
411 goto dropfrag;
412 TAILQ_INSERT_HEAD(head, fp, ipq_list);
413 nipq++;
414 fp->ipq_nfrags = 1;
415 fp->ipq_ttl = IPFRAGTTL;
416 fp->ipq_p = ip->ip_p;
417 fp->ipq_id = ip->ip_id;
418 fp->ipq_src = ip->ip_src;
419 fp->ipq_dst = ip->ip_dst;
420 fp->ipq_frags = m;
421 m->m_nextpkt = NULL;
422 goto done;
423 }
424 else
425 {
426 fp->ipq_nfrags++;
427 }
428
429#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
430
431 /*
432 * Find a segment which begins after this one does.
433 */
434 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
435 if (GETIP(q)->ip_off > ip->ip_off)
436 break;
437
438 /*
439 * If there is a preceding segment, it may provide some of
440 * our data already. If so, drop the data from the incoming
441 * segment. If it provides all of our data, drop us, otherwise
442 * stick new segment in the proper place.
443 *
444 * If some of the data is dropped from the preceding
445 * segment, then it's checksum is invalidated.
446 */
447 if (p)
448 {
449 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
450 if (i > 0)
451 {
452 if (i >= ip->ip_len)
453 goto dropfrag;
454 m_adj(m, i);
455 ip->ip_off += i;
456 ip->ip_len -= i;
457 }
458 m->m_nextpkt = p->m_nextpkt;
459 p->m_nextpkt = m;
460 }
461 else
462 {
463 m->m_nextpkt = fp->ipq_frags;
464 fp->ipq_frags = m;
465 }
466
467 /*
468 * While we overlap succeeding segments trim them or,
469 * if they are completely covered, dequeue them.
470 */
471 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
472 q = nq)
473 {
474 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
475 if (i < GETIP(q)->ip_len)
476 {
477 GETIP(q)->ip_len -= i;
478 GETIP(q)->ip_off += i;
479 m_adj(q, i);
480 break;
481 }
482 nq = q->m_nextpkt;
483 m->m_nextpkt = nq;
484 ipstat.ips_fragdropped++;
485 fp->ipq_nfrags--;
486 m_freem(pData, q);
487 }
488
489 /*
490 * Check for complete reassembly and perform frag per packet
491 * limiting.
492 *
493 * Frag limiting is performed here so that the nth frag has
494 * a chance to complete the packet before we drop the packet.
495 * As a result, n+1 frags are actually allowed per packet, but
496 * only n will ever be stored. (n = maxfragsperpacket.)
497 *
498 */
499 next = 0;
500 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
501 {
502 if (GETIP(q)->ip_off != next)
503 {
504 if (fp->ipq_nfrags > maxfragsperpacket)
505 {
506 ipstat.ips_fragdropped += fp->ipq_nfrags;
507 ip_freef(pData, head, fp);
508 }
509 goto done;
510 }
511 next += GETIP(q)->ip_len;
512 }
513 /* Make sure the last packet didn't have the IP_MF flag */
514 if (p->m_flags & M_FRAG)
515 {
516 if (fp->ipq_nfrags > maxfragsperpacket)
517 {
518 ipstat.ips_fragdropped += fp->ipq_nfrags;
519 ip_freef(pData, head, fp);
520 }
521 goto done;
522 }
523
524 /*
525 * Reassembly is complete. Make sure the packet is a sane size.
526 */
527 q = fp->ipq_frags;
528 ip = GETIP(q);
529 hlen = ip->ip_hl << 2;
530 if (next + hlen > IP_MAXPACKET)
531 {
532 ipstat.ips_fragdropped += fp->ipq_nfrags;
533 ip_freef(pData, head, fp);
534 goto done;
535 }
536
537 /*
538 * Concatenate fragments.
539 */
540 m = q;
541 nq = q->m_nextpkt;
542 q->m_nextpkt = NULL;
543 for (q = nq; q != NULL; q = nq)
544 {
545 nq = q->m_nextpkt;
546 q->m_nextpkt = NULL;
547 m_cat(pData, m, q);
548
549 m->m_len += hlen;
550 m->m_data -= hlen;
551 ip = mtod(m, struct ip *); /*update ip pointer */
552 hlen = ip->ip_hl << 2;
553 m->m_len -= hlen;
554 m->m_data += hlen;
555 }
556 m->m_len += hlen;
557 m->m_data -= hlen;
558
559 /*
560 * Create header for new ip packet by modifying header of first
561 * packet; dequeue and discard fragment reassembly header.
562 * Make header visible.
563 */
564
565 ip->ip_len = next;
566 ip->ip_src = fp->ipq_src;
567 ip->ip_dst = fp->ipq_dst;
568 TAILQ_REMOVE(head, fp, ipq_list);
569 nipq--;
570 RTMemFree(fp);
571
572 Assert((ip->ip_len == next));
573 /* some debugging cruft by sklower, below, will go away soon */
574#if 0
575 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
576 m_fixhdr(m);
577#endif
578 ipstat.ips_reassembled++;
579 LogFlowFunc(("LEAVE: %p\n", m));
580 return (m);
581
582dropfrag:
583 ipstat.ips_fragdropped++;
584 if (fp != NULL)
585 fp->ipq_nfrags--;
586 m_freem(pData, m);
587
588done:
589 LogFlowFunc(("LEAVE: NULL\n"));
590 return NULL;
591
592#undef GETIP
593}
594
595void
596ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
597{
598 struct mbuf *q;
599
600 while (fp->ipq_frags)
601 {
602 q = fp->ipq_frags;
603 fp->ipq_frags = q->m_nextpkt;
604 m_freem(pData, q);
605 }
606 TAILQ_REMOVE(fhp, fp, ipq_list);
607 RTMemFree(fp);
608 nipq--;
609}
610
611/*
612 * IP timer processing;
613 * if a timer expires on a reassembly
614 * queue, discard it.
615 */
616void
617ip_slowtimo(PNATState pData)
618{
619 register struct ipq_t *fp;
620
621 /* XXX: the fragment expiration is the same but requier
622 * additional loop see (see ip_input.c in FreeBSD tree)
623 */
624 int i;
625 LogFlow(("ip_slowtimo:\n"));
626 for (i = 0; i < IPREASS_NHASH; i++)
627 {
628 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
629 {
630 struct ipq_t *fpp;
631
632 fpp = fp;
633 fp = TAILQ_NEXT(fp, ipq_list);
634 if(--fpp->ipq_ttl == 0)
635 {
636 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
637 ip_freef(pData, &ipq[i], fpp);
638 }
639 }
640 }
641 /*
642 * If we are over the maximum number of fragments
643 * (due to the limit being lowered), drain off
644 * enough to get down to the new limit.
645 */
646 if (maxnipq >= 0 && nipq > maxnipq)
647 {
648 for (i = 0; i < IPREASS_NHASH; i++)
649 {
650 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
651 {
652 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
653 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
654 }
655 }
656 }
657}
658
659
660/*
661 * Strip out IP options, at higher
662 * level protocol in the kernel.
663 * Second argument is buffer to which options
664 * will be moved, and return value is their length.
665 * (XXX) should be deleted; last arg currently ignored.
666 */
667void
668ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
669{
670 register int i;
671 struct ip *ip = mtod(m, struct ip *);
672 register caddr_t opts;
673 int olen;
674 NOREF(mopt); /** @todo do we really will need this options buffer? */
675
676 olen = (ip->ip_hl<<2) - sizeof(struct ip);
677 opts = (caddr_t)(ip + 1);
678 i = m->m_len - (sizeof(struct ip) + olen);
679 memcpy(opts, opts + olen, (unsigned)i);
680 m->m_len -= olen;
681
682 ip->ip_hl = sizeof(struct ip) >> 2;
683}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette