VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 25822

Last change on this file since 25822 was 25822, checked in by vboxsync, 15 years ago

NAT: -Wshadow fixes. Use RT_N2H and RT_H2N instead of ntoh and hton because the latter trigger this warning as well.

  • Property svn:eol-style set to native
File size: 16.3 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47#include "alias.h"
48
49
50/*
51 * IP initialization: fill in IP protocol switch table.
52 * All protocols not implemented in kernel go to raw IP protocol handler.
53 */
54void
55ip_init(PNATState pData)
56{
57 int i = 0;
58 for (i = 0; i < IPREASS_NHASH; ++i)
59 TAILQ_INIT(&ipq[i]);
60 maxnipq = 100; /* ??? */
61 maxfragsperpacket = 16;
62 nipq = 0;
63 ip_currid = tt.tv_sec & 0xffff;
64 udp_init(pData);
65 tcp_init(pData);
66}
67
68static struct libalias *select_alias(PNATState pData, struct mbuf* m)
69{
70 struct libalias *la = pData->proxy_alias;
71 struct udphdr *udp = NULL;
72 struct ip *pip = NULL;
73
74#ifndef VBOX_WITH_SLIRP_BSD_MBUF
75 if (m->m_la)
76 return m->m_la;
77#else
78 struct m_tag *t;
79 if (t = m_tag_find(m, PACKET_TAG_ALIAS, NULL) != 0)
80 {
81 return (struct libalias *)&t[1];
82 }
83#endif
84
85 return la;
86}
87
88/*
89 * Ip input routine. Checksum and byte swap header. If fragmented
90 * try to reassemble. Process options. Pass to next level.
91 */
92void
93ip_input(PNATState pData, struct mbuf *m)
94{
95 register struct ip *ip;
96 int hlen = 0;
97 int mlen = 0;
98
99 STAM_PROFILE_START(&pData->StatIP_input, a);
100
101 DEBUG_CALL("ip_input");
102 DEBUG_ARG("m = %lx", (long)m);
103 ip = mtod(m, struct ip *);
104 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
105 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
106
107 ipstat.ips_total++;
108 {
109 int rc;
110 STAM_PROFILE_START(&pData->StatALIAS_input, b);
111 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m->m_len);
112 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
113 Log2(("NAT: LibAlias return %d\n", rc));
114 if (m->m_len != RT_N2H_U16(ip->ip_len))
115 m->m_len = RT_N2H_U16(ip->ip_len);
116 }
117
118 mlen = m->m_len;
119
120 if (mlen < sizeof(struct ip))
121 {
122 ipstat.ips_toosmall++;
123 STAM_PROFILE_STOP(&pData->StatIP_input, a);
124 return;
125 }
126
127 ip = mtod(m, struct ip *);
128 if (ip->ip_v != IPVERSION)
129 {
130 ipstat.ips_badvers++;
131 goto bad;
132 }
133
134 hlen = ip->ip_hl << 2;
135 if ( hlen < sizeof(struct ip)
136 || hlen > m->m_len)
137 {
138 /* min header length */
139 ipstat.ips_badhlen++; /* or packet too short */
140 goto bad;
141 }
142
143 /* keep ip header intact for ICMP reply
144 * ip->ip_sum = cksum(m, hlen);
145 * if (ip->ip_sum) {
146 */
147 if (cksum(m, hlen))
148 {
149 ipstat.ips_badsum++;
150 goto bad;
151 }
152
153 /*
154 * Convert fields to host representation.
155 */
156 NTOHS(ip->ip_len);
157 if (ip->ip_len < hlen)
158 {
159 ipstat.ips_badlen++;
160 goto bad;
161 }
162
163 NTOHS(ip->ip_id);
164 NTOHS(ip->ip_off);
165
166 /*
167 * Check that the amount of data in the buffers
168 * is as at least much as the IP header would have us expect.
169 * Trim mbufs if longer than we expect.
170 * Drop packet if shorter than we expect.
171 */
172 if (mlen < ip->ip_len)
173 {
174 ipstat.ips_tooshort++;
175 goto bad;
176 }
177
178 /* Should drop packet if mbuf too long? hmmm... */
179 if (mlen > ip->ip_len)
180 m_adj(m, ip->ip_len - m->m_len);
181
182 /* check ip_ttl for a correct ICMP reply */
183 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
184 {
185 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
186 goto bad;
187 }
188
189 ip->ip_ttl--;
190 /*
191 * If offset or IP_MF are set, must reassemble.
192 * Otherwise, nothing need be done.
193 * (We could look in the reassembly queue to see
194 * if the packet was previously fragmented,
195 * but it's not worth the time; just let them time out.)
196 *
197 */
198 if (ip->ip_off & (IP_MF | IP_OFFMASK))
199 {
200 m = ip_reass(pData, m);
201 if (m == NULL)
202 {
203 STAM_PROFILE_STOP(&pData->StatIP_input, a);
204 return;
205 }
206 ip = mtod(m, struct ip *);
207 hlen = ip->ip_hl << 2;
208 }
209 else
210 ip->ip_len -= hlen;
211
212 /*
213 * Switch out to protocol's input routine.
214 */
215 ipstat.ips_delivered++;
216 switch (ip->ip_p)
217 {
218 case IPPROTO_TCP:
219 tcp_input(pData, m, hlen, (struct socket *)NULL);
220 break;
221 case IPPROTO_UDP:
222 udp_input(pData, m, hlen);
223 break;
224 case IPPROTO_ICMP:
225 icmp_input(pData, m, hlen);
226 break;
227 default:
228 ipstat.ips_noproto++;
229 m_free(pData, m);
230 }
231 STAM_PROFILE_STOP(&pData->StatIP_input, a);
232 return;
233
234bad:
235 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
236 &ip->ip_dst, ip->ip_len));
237 m_freem(pData, m);
238 STAM_PROFILE_STOP(&pData->StatIP_input, a);
239 return;
240}
241
242struct mbuf *
243ip_reass(PNATState pData, struct mbuf* m)
244{
245 struct ip *ip;
246 struct mbuf *p, *q, *nq;
247 struct ipq_t *fp = NULL;
248 struct ipqhead *head;
249 int i, hlen, next;
250 u_short hash;
251
252 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
253 if ( maxnipq == 0
254 || maxfragsperpacket == 0)
255 {
256 ipstat.ips_fragments++;
257 ipstat.ips_fragdropped++;
258 m_freem(pData, m);
259 return (NULL);
260 }
261
262 ip = mtod(m, struct ip *);
263 hlen = ip->ip_hl << 2;
264
265 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
266 head = &ipq[hash];
267
268 /*
269 * Look for queue of fragments
270 * of this datagram.
271 */
272 TAILQ_FOREACH(fp, head, ipq_list)
273 if (ip->ip_id == fp->ipq_id &&
274 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
275 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
276 ip->ip_p == fp->ipq_p)
277 goto found;
278
279 fp = NULL;
280
281 /*
282 * Attempt to trim the number of allocated fragment queues if it
283 * exceeds the administrative limit.
284 */
285 if ((nipq > maxnipq) && (maxnipq > 0))
286 {
287 /*
288 * drop something from the tail of the current queue
289 * before proceeding further
290 */
291 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
292 if (pHead == NULL)
293 {
294 /* gak */
295 for (i = 0; i < IPREASS_NHASH; i++)
296 {
297 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
298 if (pTail)
299 {
300 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
301 ip_freef(pData, &ipq[i], pTail);
302 break;
303 }
304 }
305 }
306 else
307 {
308 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
309 ip_freef(pData, head, pHead);
310 }
311 }
312
313found:
314 /*
315 * Adjust ip_len to not reflect header,
316 * convert offset of this to bytes.
317 */
318 ip->ip_len -= hlen;
319 if (ip->ip_off & IP_MF)
320 {
321 /*
322 * Make sure that fragments have a data length
323 * that's a non-zero multiple of 8 bytes.
324 */
325 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
326 {
327 ipstat.ips_toosmall++; /* XXX */
328 goto dropfrag;
329 }
330 m->m_flags |= M_FRAG;
331 }
332 else
333 m->m_flags &= ~M_FRAG;
334 ip->ip_off <<= 3;
335
336
337 /*
338 * Attempt reassembly; if it succeeds, proceed.
339 * ip_reass() will return a different mbuf.
340 */
341 ipstat.ips_fragments++;
342
343 /* Previous ip_reass() started here. */
344 /*
345 * Presence of header sizes in mbufs
346 * would confuse code below.
347 */
348 m->m_data += hlen;
349 m->m_len -= hlen;
350
351 /*
352 * If first fragment to arrive, create a reassembly queue.
353 */
354 if (fp == NULL)
355 {
356 fp = RTMemAlloc(sizeof(struct ipq_t));
357 if (fp == NULL)
358 goto dropfrag;
359 TAILQ_INSERT_HEAD(head, fp, ipq_list);
360 nipq++;
361 fp->ipq_nfrags = 1;
362 fp->ipq_ttl = IPFRAGTTL;
363 fp->ipq_p = ip->ip_p;
364 fp->ipq_id = ip->ip_id;
365 fp->ipq_src = ip->ip_src;
366 fp->ipq_dst = ip->ip_dst;
367 fp->ipq_frags = m;
368 m->m_nextpkt = NULL;
369 goto done;
370 }
371 else
372 {
373 fp->ipq_nfrags++;
374 }
375
376#ifndef VBOX_WITH_SLIRP_BSD_MBUF
377#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
378#else
379#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
380#endif
381
382
383 /*
384 * Find a segment which begins after this one does.
385 */
386 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
387 if (GETIP(q)->ip_off > ip->ip_off)
388 break;
389
390 /*
391 * If there is a preceding segment, it may provide some of
392 * our data already. If so, drop the data from the incoming
393 * segment. If it provides all of our data, drop us, otherwise
394 * stick new segment in the proper place.
395 *
396 * If some of the data is dropped from the the preceding
397 * segment, then it's checksum is invalidated.
398 */
399 if (p)
400 {
401 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
402 if (i > 0)
403 {
404 if (i >= ip->ip_len)
405 goto dropfrag;
406 m_adj(m, i);
407 ip->ip_off += i;
408 ip->ip_len -= i;
409 }
410 m->m_nextpkt = p->m_nextpkt;
411 p->m_nextpkt = m;
412 }
413 else
414 {
415 m->m_nextpkt = fp->ipq_frags;
416 fp->ipq_frags = m;
417 }
418
419 /*
420 * While we overlap succeeding segments trim them or,
421 * if they are completely covered, dequeue them.
422 */
423 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
424 q = nq)
425 {
426 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
427 if (i < GETIP(q)->ip_len)
428 {
429 GETIP(q)->ip_len -= i;
430 GETIP(q)->ip_off += i;
431 m_adj(q, i);
432 break;
433 }
434 nq = q->m_nextpkt;
435 m->m_nextpkt = nq;
436 ipstat.ips_fragdropped++;
437 fp->ipq_nfrags--;
438 m_freem(pData, q);
439 }
440
441 /*
442 * Check for complete reassembly and perform frag per packet
443 * limiting.
444 *
445 * Frag limiting is performed here so that the nth frag has
446 * a chance to complete the packet before we drop the packet.
447 * As a result, n+1 frags are actually allowed per packet, but
448 * only n will ever be stored. (n = maxfragsperpacket.)
449 *
450 */
451 next = 0;
452 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
453 {
454 if (GETIP(q)->ip_off != next)
455 {
456 if (fp->ipq_nfrags > maxfragsperpacket)
457 {
458 ipstat.ips_fragdropped += fp->ipq_nfrags;
459 ip_freef(pData, head, fp);
460 }
461 goto done;
462 }
463 next += GETIP(q)->ip_len;
464 }
465 /* Make sure the last packet didn't have the IP_MF flag */
466 if (p->m_flags & M_FRAG)
467 {
468 if (fp->ipq_nfrags > maxfragsperpacket)
469 {
470 ipstat.ips_fragdropped += fp->ipq_nfrags;
471 ip_freef(pData, head, fp);
472 }
473 goto done;
474 }
475
476 /*
477 * Reassembly is complete. Make sure the packet is a sane size.
478 */
479 q = fp->ipq_frags;
480 ip = GETIP(q);
481 hlen = ip->ip_hl << 2;
482 if (next + hlen > IP_MAXPACKET)
483 {
484 ipstat.ips_fragdropped += fp->ipq_nfrags;
485 ip_freef(pData, head, fp);
486 goto done;
487 }
488
489 /*
490 * Concatenate fragments.
491 */
492 m = q;
493 nq = q->m_nextpkt;
494 q->m_nextpkt = NULL;
495 for (q = nq; q != NULL; q = nq)
496 {
497 nq = q->m_nextpkt;
498 q->m_nextpkt = NULL;
499 m_cat(pData, m, q);
500
501 m->m_len += hlen;
502 m->m_data -= hlen;
503 ip = mtod(m, struct ip *); /*update ip pointer */
504 hlen = ip->ip_hl << 2;
505 m->m_len -= hlen;
506 m->m_data += hlen;
507 }
508 m->m_len += hlen;
509 m->m_data -= hlen;
510
511 /*
512 * Create header for new ip packet by modifying header of first
513 * packet; dequeue and discard fragment reassembly header.
514 * Make header visible.
515 */
516
517 ip->ip_len = next;
518 ip->ip_src = fp->ipq_src;
519 ip->ip_dst = fp->ipq_dst;
520 TAILQ_REMOVE(head, fp, ipq_list);
521 nipq--;
522 RTMemFree(fp);
523
524 Assert((ip->ip_len == next));
525 /* some debugging cruft by sklower, below, will go away soon */
526#if 0
527 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
528 m_fixhdr(m);
529#endif
530 ipstat.ips_reassembled++;
531 return (m);
532
533dropfrag:
534 ipstat.ips_fragdropped++;
535 if (fp != NULL)
536 fp->ipq_nfrags--;
537 m_freem(pData, m);
538
539done:
540 return NULL;
541
542#undef GETIP
543}
544
545void
546ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
547{
548 struct mbuf *q;
549
550 while (fp->ipq_frags)
551 {
552 q = fp->ipq_frags;
553 fp->ipq_frags = q->m_nextpkt;
554 m_freem(pData, q);
555 }
556 TAILQ_REMOVE(fhp, fp, ipq_list);
557 RTMemFree(fp);
558 nipq--;
559}
560
561/*
562 * IP timer processing;
563 * if a timer expires on a reassembly
564 * queue, discard it.
565 */
566void
567ip_slowtimo(PNATState pData)
568{
569 register struct ipq_t *fp;
570
571 /* XXX: the fragment expiration is the same but requier
572 * additional loop see (see ip_input.c in FreeBSD tree)
573 */
574 int i;
575 DEBUG_CALL("ip_slowtimo");
576 for (i = 0; i < IPREASS_NHASH; i++)
577 {
578 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
579 {
580 struct ipq_t *fpp;
581
582 fpp = fp;
583 fp = TAILQ_NEXT(fp, ipq_list);
584 if(--fpp->ipq_ttl == 0)
585 {
586 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
587 ip_freef(pData, &ipq[i], fpp);
588 }
589 }
590 }
591 /*
592 * If we are over the maximum number of fragments
593 * (due to the limit being lowered), drain off
594 * enough to get down to the new limit.
595 */
596 if (maxnipq >= 0 && nipq > maxnipq)
597 {
598 for (i = 0; i < IPREASS_NHASH; i++)
599 {
600 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
601 {
602 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
603 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
604 }
605 }
606 }
607}
608
609
610/*
611 * Strip out IP options, at higher
612 * level protocol in the kernel.
613 * Second argument is buffer to which options
614 * will be moved, and return value is their length.
615 * (XXX) should be deleted; last arg currently ignored.
616 */
617void
618ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
619{
620 register int i;
621 struct ip *ip = mtod(m, struct ip *);
622 register caddr_t opts;
623 int olen;
624
625 olen = (ip->ip_hl<<2) - sizeof(struct ip);
626 opts = (caddr_t)(ip + 1);
627 i = m->m_len - (sizeof(struct ip) + olen);
628 memcpy(opts, opts + olen, (unsigned)i);
629 m->m_len -= olen;
630
631 ip->ip_hl = sizeof(struct ip) >> 2;
632}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette