VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 21716

Last change on this file since 21716 was 21716, checked in by vboxsync, 15 years ago

NAT: counters

  • Property svn:eol-style set to native
File size: 15.6 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47#ifdef VBOX_WITH_SLIRP_ALIAS
48# include "alias.h"
49#endif
50
51
52/*
53 * IP initialization: fill in IP protocol switch table.
54 * All protocols not implemented in kernel go to raw IP protocol handler.
55 */
56void
57ip_init(PNATState pData)
58{
59 int i = 0;
60 for (i = 0; i < IPREASS_NHASH; ++i)
61 TAILQ_INIT(&ipq[i]);
62 maxnipq = 100; /* ??? */
63 maxfragsperpacket = 16;
64 nipq = 0;
65 ip_currid = tt.tv_sec & 0xffff;
66 udp_init(pData);
67 tcp_init(pData);
68}
69
70/*
71 * Ip input routine. Checksum and byte swap header. If fragmented
72 * try to reassemble. Process options. Pass to next level.
73 */
74void
75ip_input(PNATState pData, struct mbuf *m)
76{
77 register struct ip *ip;
78 int hlen = 0;
79 STAM_PROFILE_START(&pData->StatIP_input, a);
80
81 DEBUG_CALL("ip_input");
82 DEBUG_ARG("m = %lx", (long)m);
83 ip = mtod(m, struct ip *);
84 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, ntohs(ip->ip_len), m->m_len));
85 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, ntohs(ip->ip_len), m->m_len));
86
87 ipstat.ips_total++;
88#ifdef VBOX_WITH_SLIRP_ALIAS
89 {
90 int rc;
91 STAM_PROFILE_START(&pData->StatALIAS_input, a);
92 rc = LibAliasIn(m->m_la ? m->m_la : pData->proxy_alias, mtod(m, char *),
93 m->m_size);
94 STAM_PROFILE_STOP(&pData->StatALIAS_input, a);
95 Log2(("NAT: LibAlias return %d\n", rc));
96 }
97#endif
98
99 if (m->m_len < sizeof(struct ip))
100 {
101 ipstat.ips_toosmall++;
102 STAM_PROFILE_STOP(&pData->StatIP_input, a);
103 return;
104 }
105
106 ip = mtod(m, struct ip *);
107 if (ip->ip_v != IPVERSION)
108 {
109 ipstat.ips_badvers++;
110 goto bad;
111 }
112
113 hlen = ip->ip_hl << 2;
114 if ( hlen < sizeof(struct ip)
115 || hlen > m->m_len)
116 {
117 /* min header length */
118 ipstat.ips_badhlen++; /* or packet too short */
119 goto bad;
120 }
121
122 /* keep ip header intact for ICMP reply
123 * ip->ip_sum = cksum(m, hlen);
124 * if (ip->ip_sum) {
125 */
126 if (cksum(m, hlen))
127 {
128 ipstat.ips_badsum++;
129 goto bad;
130 }
131
132 /*
133 * Convert fields to host representation.
134 */
135 NTOHS(ip->ip_len);
136 if (ip->ip_len < hlen)
137 {
138 ipstat.ips_badlen++;
139 goto bad;
140 }
141 NTOHS(ip->ip_id);
142 NTOHS(ip->ip_off);
143
144 /*
145 * Check that the amount of data in the buffers
146 * is as at least much as the IP header would have us expect.
147 * Trim mbufs if longer than we expect.
148 * Drop packet if shorter than we expect.
149 */
150 if (m->m_len < ip->ip_len)
151 {
152 ipstat.ips_tooshort++;
153 goto bad;
154 }
155 /* Should drop packet if mbuf too long? hmmm... */
156 if (m->m_len > ip->ip_len)
157 m_adj(m, ip->ip_len - m->m_len);
158
159 /* check ip_ttl for a correct ICMP reply */
160 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
161 {
162 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
163 goto bad;
164 }
165
166 ip->ip_ttl--;
167 /*
168 * If offset or IP_MF are set, must reassemble.
169 * Otherwise, nothing need be done.
170 * (We could look in the reassembly queue to see
171 * if the packet was previously fragmented,
172 * but it's not worth the time; just let them time out.)
173 *
174 * XXX This should fail, don't fragment yet
175 */
176 if (ip->ip_off & (IP_MF | IP_OFFMASK))
177 {
178 m = ip_reass(pData, m);
179 if (m == NULL)
180 {
181 STAM_PROFILE_STOP(&pData->StatIP_input, a);
182 return;
183 }
184 ip = mtod(m, struct ip *);
185 hlen = ip->ip_len;
186 }
187 else
188 ip->ip_len -= hlen;
189
190 /*
191 * Switch out to protocol's input routine.
192 */
193 ipstat.ips_delivered++;
194 switch (ip->ip_p)
195 {
196 case IPPROTO_TCP:
197 tcp_input(pData, m, hlen, (struct socket *)NULL);
198 break;
199 case IPPROTO_UDP:
200 udp_input(pData, m, hlen);
201 break;
202 case IPPROTO_ICMP:
203 icmp_input(pData, m, hlen);
204 break;
205 default:
206 ipstat.ips_noproto++;
207 m_free(pData, m);
208 }
209 STAM_PROFILE_STOP(&pData->StatIP_input, a);
210 return;
211bad:
212 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
213 &ip->ip_dst, ip->ip_len));
214 m_freem(pData, m);
215 STAM_PROFILE_STOP(&pData->StatIP_input, a);
216 return;
217}
218
219struct mbuf *
220ip_reass(PNATState pData, struct mbuf* m)
221{
222 struct ip *ip;
223 struct mbuf *p, *q, *nq;
224 struct ipq_t *fp = NULL;
225 struct ipqhead *head;
226 int i, hlen, next;
227 u_short hash;
228
229 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
230 if ( maxnipq == 0
231 || maxfragsperpacket == 0)
232 {
233 ipstat.ips_fragments++;
234 ipstat.ips_fragdropped++;
235 m_freem(pData, m);
236 return (NULL);
237 }
238
239 ip = mtod(m, struct ip *);
240 hlen = ip->ip_hl << 2;
241
242 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
243 head = &ipq[hash];
244
245 /*
246 * Look for queue of fragments
247 * of this datagram.
248 */
249 TAILQ_FOREACH(fp, head, ipq_list)
250 if (ip->ip_id == fp->ipq_id &&
251 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
252 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
253 ip->ip_p == fp->ipq_p)
254 goto found;
255
256 fp = NULL;
257
258 /*
259 * Attempt to trim the number of allocated fragment queues if it
260 * exceeds the administrative limit.
261 */
262 if ((nipq > maxnipq) && (maxnipq > 0))
263 {
264 /*
265 * drop something from the tail of the current queue
266 * before proceeding further
267 */
268 struct ipq_t *q = TAILQ_LAST(head, ipqhead);
269 if (q == NULL)
270 {
271 /* gak */
272 for (i = 0; i < IPREASS_NHASH; i++)
273 {
274 struct ipq_t *r = TAILQ_LAST(&ipq[i], ipqhead);
275 if (r)
276 {
277 ipstat.ips_fragtimeout += r->ipq_nfrags;
278 ip_freef(pData, &ipq[i], r);
279 break;
280 }
281 }
282 }
283 else
284 {
285 ipstat.ips_fragtimeout += q->ipq_nfrags;
286 ip_freef(pData, head, q);
287 }
288 }
289
290found:
291 /*
292 * Adjust ip_len to not reflect header,
293 * convert offset of this to bytes.
294 */
295 ip->ip_len -= hlen;
296 if (ip->ip_off & IP_MF)
297 {
298 /*
299 * Make sure that fragments have a data length
300 * that's a non-zero multiple of 8 bytes.
301 */
302 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
303 {
304 ipstat.ips_toosmall++; /* XXX */
305 goto dropfrag;
306 }
307 m->m_flags |= M_FRAG;
308 }
309 else
310 m->m_flags &= ~M_FRAG;
311 ip->ip_off <<= 3;
312
313
314 /*
315 * Attempt reassembly; if it succeeds, proceed.
316 * ip_reass() will return a different mbuf.
317 */
318 ipstat.ips_fragments++;
319
320 /* Previous ip_reass() started here. */
321 /*
322 * Presence of header sizes in mbufs
323 * would confuse code below.
324 */
325 m->m_data += hlen;
326 m->m_len -= hlen;
327
328 /*
329 * If first fragment to arrive, create a reassembly queue.
330 */
331 if (fp == NULL)
332 {
333 fp = RTMemAlloc(sizeof(struct ipq_t));
334 if (fp == NULL)
335 goto dropfrag;
336 TAILQ_INSERT_HEAD(head, fp, ipq_list);
337 nipq++;
338 fp->ipq_nfrags = 1;
339 fp->ipq_ttl = IPFRAGTTL;
340 fp->ipq_p = ip->ip_p;
341 fp->ipq_id = ip->ip_id;
342 fp->ipq_src = ip->ip_src;
343 fp->ipq_dst = ip->ip_dst;
344 fp->ipq_frags = m;
345 m->m_nextpkt = NULL;
346 goto done;
347 }
348 else
349 {
350 fp->ipq_nfrags++;
351 }
352
353#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
354
355
356 /*
357 * Find a segment which begins after this one does.
358 */
359 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
360 if (GETIP(q)->ip_off > ip->ip_off)
361 break;
362
363 /*
364 * If there is a preceding segment, it may provide some of
365 * our data already. If so, drop the data from the incoming
366 * segment. If it provides all of our data, drop us, otherwise
367 * stick new segment in the proper place.
368 *
369 * If some of the data is dropped from the the preceding
370 * segment, then it's checksum is invalidated.
371 */
372 if (p)
373 {
374 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
375 if (i > 0)
376 {
377 if (i >= ip->ip_len)
378 goto dropfrag;
379 m_adj(m, i);
380 ip->ip_off += i;
381 ip->ip_len -= i;
382 }
383 m->m_nextpkt = p->m_nextpkt;
384 p->m_nextpkt = m;
385 }
386 else
387 {
388 m->m_nextpkt = fp->ipq_frags;
389 fp->ipq_frags = m;
390 }
391
392 /*
393 * While we overlap succeeding segments trim them or,
394 * if they are completely covered, dequeue them.
395 */
396 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
397 q = nq)
398 {
399 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
400 if (i < GETIP(q)->ip_len)
401 {
402 GETIP(q)->ip_len -= i;
403 GETIP(q)->ip_off += i;
404 m_adj(q, i);
405 break;
406 }
407 nq = q->m_nextpkt;
408 m->m_nextpkt = nq;
409 ipstat.ips_fragdropped++;
410 fp->ipq_nfrags--;
411 m_freem(pData, q);
412 }
413
414 /*
415 * Check for complete reassembly and perform frag per packet
416 * limiting.
417 *
418 * Frag limiting is performed here so that the nth frag has
419 * a chance to complete the packet before we drop the packet.
420 * As a result, n+1 frags are actually allowed per packet, but
421 * only n will ever be stored. (n = maxfragsperpacket.)
422 *
423 */
424 next = 0;
425 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
426 {
427 if (GETIP(q)->ip_off != next)
428 {
429 if (fp->ipq_nfrags > maxfragsperpacket)
430 {
431 ipstat.ips_fragdropped += fp->ipq_nfrags;
432 ip_freef(pData, head, fp);
433 }
434 goto done;
435 }
436 next += GETIP(q)->ip_len;
437 }
438 /* Make sure the last packet didn't have the IP_MF flag */
439 if (p->m_flags & M_FRAG)
440 {
441 if (fp->ipq_nfrags > maxfragsperpacket)
442 {
443 ipstat.ips_fragdropped += fp->ipq_nfrags;
444 ip_freef(pData, head, fp);
445 }
446 goto done;
447 }
448
449 /*
450 * Reassembly is complete. Make sure the packet is a sane size.
451 */
452 q = fp->ipq_frags;
453 ip = GETIP(q);
454 if (next + (ip->ip_hl << 2) > IP_MAXPACKET)
455 {
456 ipstat.ips_fragdropped += fp->ipq_nfrags;
457 ip_freef(pData, head, fp);
458 goto done;
459 }
460
461 /*
462 * Concatenate fragments.
463 */
464 m = q;
465 nq = q->m_nextpkt;
466 q->m_nextpkt = NULL;
467 for (q = nq; q != NULL; q = nq)
468 {
469 nq = q->m_nextpkt;
470 q->m_nextpkt = NULL;
471 m_cat(pData, m, q);
472 }
473
474 /*
475 * Create header for new ip packet by modifying header of first
476 * packet; dequeue and discard fragment reassembly header.
477 * Make header visible.
478 */
479#if 0
480 ip->ip_len = (ip->ip_hl << 2) + next;
481#else
482 ip->ip_len = next;
483#endif
484 ip->ip_src = fp->ipq_src;
485 ip->ip_dst = fp->ipq_dst;
486 TAILQ_REMOVE(head, fp, ipq_list);
487 nipq--;
488 RTMemFree(fp);
489
490 m->m_len += (ip->ip_hl << 2);
491 m->m_data -= (ip->ip_hl << 2);
492 /* some debugging cruft by sklower, below, will go away soon */
493#if 0
494 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
495 m_fixhdr(m);
496#endif
497 ipstat.ips_reassembled++;
498 return (m);
499
500dropfrag:
501 ipstat.ips_fragdropped++;
502 if (fp != NULL)
503 fp->ipq_nfrags--;
504 m_freem(pData, m);
505
506done:
507 return NULL;
508
509#undef GETIP
510}
511
512void
513ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
514{
515 struct mbuf *q;
516
517 while (fp->ipq_frags)
518 {
519 q = fp->ipq_frags;
520 fp->ipq_frags = q->m_nextpkt;
521 m_freem(pData, q);
522 }
523 TAILQ_REMOVE(fhp, fp, ipq_list);
524 RTMemFree(fp);
525 nipq--;
526}
527
528/*
529 * IP timer processing;
530 * if a timer expires on a reassembly
531 * queue, discard it.
532 */
533void
534ip_slowtimo(PNATState pData)
535{
536 register struct ipq_t *fp;
537
538 /* XXX: the fragment expiration is the same but requier
539 * additional loop see (see ip_input.c in FreeBSD tree)
540 */
541 int i;
542 DEBUG_CALL("ip_slowtimo");
543 for (i = 0; i < IPREASS_NHASH; i++)
544 {
545 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
546 {
547 struct ipq_t *fpp;
548
549 fpp = fp;
550 fp = TAILQ_NEXT(fp, ipq_list);
551 if(--fpp->ipq_ttl == 0)
552 {
553 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
554 ip_freef(pData, &ipq[i], fpp);
555 }
556 }
557 }
558 /*
559 * If we are over the maximum number of fragments
560 * (due to the limit being lowered), drain off
561 * enough to get down to the new limit.
562 */
563 if (maxnipq >= 0 && nipq > maxnipq)
564 {
565 for (i = 0; i < IPREASS_NHASH; i++)
566 {
567 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
568 {
569 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
570 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
571 }
572 }
573 }
574}
575
576
577/*
578 * Strip out IP options, at higher
579 * level protocol in the kernel.
580 * Second argument is buffer to which options
581 * will be moved, and return value is their length.
582 * (XXX) should be deleted; last arg currently ignored.
583 */
584void
585ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
586{
587 register int i;
588 struct ip *ip = mtod(m, struct ip *);
589 register caddr_t opts;
590 int olen;
591
592 olen = (ip->ip_hl<<2) - sizeof(struct ip);
593 opts = (caddr_t)(ip + 1);
594 i = m->m_len - (sizeof(struct ip) + olen);
595 memcpy(opts, opts + olen, (unsigned)i);
596 m->m_len -= olen;
597
598 ip->ip_hl = sizeof(struct ip) >> 2;
599}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette