VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 20053

Last change on this file since 20053 was 20053, checked in by vboxsync, 16 years ago

NAT: LibAlias enabling + tcp_emu replaced with ftp_module

  • Property svn:eol-style set to native
File size: 14.9 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47#ifdef VBOX_WITH_SLIRP_ALIAS
48# include "alias.h"
49#endif
50
51
52/*
53 * IP initialization: fill in IP protocol switch table.
54 * All protocols not implemented in kernel go to raw IP protocol handler.
55 */
56void
57ip_init(PNATState pData)
58{
59 int i = 0;
60 for (i = 0; i < IPREASS_NHASH; ++i)
61 TAILQ_INIT(&ipq[i]);
62 maxnipq = 100; /* ??? */
63 maxfragsperpacket = 16;
64 nipq = 0;
65 ip_currid = tt.tv_sec & 0xffff;
66 udp_init(pData);
67 tcp_init(pData);
68}
69
70/*
71 * Ip input routine. Checksum and byte swap header. If fragmented
72 * try to reassemble. Process options. Pass to next level.
73 */
74void
75ip_input(PNATState pData, struct mbuf *m)
76{
77 register struct ip *ip;
78 int hlen;
79
80 DEBUG_CALL("ip_input");
81 DEBUG_ARG("m = %lx", (long)m);
82 DEBUG_ARG("m_len = %d", m->m_len);
83
84 ipstat.ips_total++;
85#ifdef VBOX_WITH_SLIRP_ALIAS
86 {
87 int rc;
88 rc = LibAliasIn(LIST_FIRST(&instancehead), mtod(m, char *), m->m_len);
89 Log2(("NAT: LibAlias return %d\n", rc));
90 }
91#endif
92
93 if (m->m_len < sizeof(struct ip))
94 {
95 ipstat.ips_toosmall++;
96 return;
97 }
98
99 ip = mtod(m, struct ip *);
100 if (ip->ip_v != IPVERSION)
101 {
102 ipstat.ips_badvers++;
103 goto bad;
104 }
105
106 hlen = ip->ip_hl << 2;
107 if ( hlen < sizeof(struct ip)
108 || hlen > m->m_len)
109 {
110 /* min header length */
111 ipstat.ips_badhlen++; /* or packet too short */
112 goto bad;
113 }
114
115 /* keep ip header intact for ICMP reply
116 * ip->ip_sum = cksum(m, hlen);
117 * if (ip->ip_sum) {
118 */
119 if (cksum(m, hlen))
120 {
121 ipstat.ips_badsum++;
122 goto bad;
123 }
124
125 /*
126 * Convert fields to host representation.
127 */
128 NTOHS(ip->ip_len);
129 if (ip->ip_len < hlen)
130 {
131 ipstat.ips_badlen++;
132 goto bad;
133 }
134 NTOHS(ip->ip_id);
135 NTOHS(ip->ip_off);
136
137 /*
138 * Check that the amount of data in the buffers
139 * is as at least much as the IP header would have us expect.
140 * Trim mbufs if longer than we expect.
141 * Drop packet if shorter than we expect.
142 */
143 if (m->m_len < ip->ip_len)
144 {
145 ipstat.ips_tooshort++;
146 goto bad;
147 }
148 /* Should drop packet if mbuf too long? hmmm... */
149 if (m->m_len > ip->ip_len)
150 m_adj(m, ip->ip_len - m->m_len);
151
152 /* check ip_ttl for a correct ICMP reply */
153 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
154 {
155 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
156 goto bad;
157 }
158
159 ip->ip_ttl--;
160 /*
161 * If offset or IP_MF are set, must reassemble.
162 * Otherwise, nothing need be done.
163 * (We could look in the reassembly queue to see
164 * if the packet was previously fragmented,
165 * but it's not worth the time; just let them time out.)
166 *
167 * XXX This should fail, don't fragment yet
168 */
169 if (ip->ip_off & (IP_MF | IP_OFFMASK))
170 {
171 m = ip_reass(pData, m);
172 if (m == NULL)
173 return;
174 ip = mtod(m, struct ip *);
175 hlen = ip->ip_len;
176 }
177 else
178 ip->ip_len -= hlen;
179
180 /*
181 * Switch out to protocol's input routine.
182 */
183 ipstat.ips_delivered++;
184 switch (ip->ip_p)
185 {
186 case IPPROTO_TCP:
187 tcp_input(pData, m, hlen, (struct socket *)NULL);
188 break;
189 case IPPROTO_UDP:
190 udp_input(pData, m, hlen);
191 break;
192 case IPPROTO_ICMP:
193 icmp_input(pData, m, hlen);
194 break;
195 default:
196 ipstat.ips_noproto++;
197 m_free(pData, m);
198 }
199 return;
200bad:
201 m_freem(pData, m);
202 return;
203}
204
205struct mbuf *
206ip_reass(PNATState pData, struct mbuf* m)
207{
208 struct ip *ip;
209 struct mbuf *p, *q, *nq;
210 struct ipq_t *fp = NULL;
211 struct ipqhead *head;
212 int i, hlen, next;
213 u_short hash;
214
215 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
216 if ( maxnipq == 0
217 || maxfragsperpacket == 0)
218 {
219 ipstat.ips_fragments++;
220 ipstat.ips_fragdropped++;
221 m_freem(pData, m);
222 return (NULL);
223 }
224
225 ip = mtod(m, struct ip *);
226 hlen = ip->ip_hl << 2;
227
228 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
229 head = &ipq[hash];
230
231 /*
232 * Look for queue of fragments
233 * of this datagram.
234 */
235 TAILQ_FOREACH(fp, head, ipq_list)
236 if (ip->ip_id == fp->ipq_id &&
237 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
238 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
239 ip->ip_p == fp->ipq_p)
240 goto found;
241
242 fp = NULL;
243
244 /*
245 * Attempt to trim the number of allocated fragment queues if it
246 * exceeds the administrative limit.
247 */
248 if ((nipq > maxnipq) && (maxnipq > 0))
249 {
250 /*
251 * drop something from the tail of the current queue
252 * before proceeding further
253 */
254 struct ipq_t *q = TAILQ_LAST(head, ipqhead);
255 if (q == NULL)
256 {
257 /* gak */
258 for (i = 0; i < IPREASS_NHASH; i++)
259 {
260 struct ipq_t *r = TAILQ_LAST(&ipq[i], ipqhead);
261 if (r)
262 {
263 ipstat.ips_fragtimeout += r->ipq_nfrags;
264 ip_freef(pData, &ipq[i], r);
265 break;
266 }
267 }
268 }
269 else
270 {
271 ipstat.ips_fragtimeout += q->ipq_nfrags;
272 ip_freef(pData, head, q);
273 }
274 }
275
276found:
277 /*
278 * Adjust ip_len to not reflect header,
279 * convert offset of this to bytes.
280 */
281 ip->ip_len -= hlen;
282 if (ip->ip_off & IP_MF)
283 {
284 /*
285 * Make sure that fragments have a data length
286 * that's a non-zero multiple of 8 bytes.
287 */
288 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
289 {
290 ipstat.ips_toosmall++; /* XXX */
291 goto dropfrag;
292 }
293 m->m_flags |= M_FRAG;
294 }
295 else
296 m->m_flags &= ~M_FRAG;
297 ip->ip_off <<= 3;
298
299
300 /*
301 * Attempt reassembly; if it succeeds, proceed.
302 * ip_reass() will return a different mbuf.
303 */
304 ipstat.ips_fragments++;
305
306 /* Previous ip_reass() started here. */
307 /*
308 * Presence of header sizes in mbufs
309 * would confuse code below.
310 */
311 m->m_data += hlen;
312 m->m_len -= hlen;
313
314 /*
315 * If first fragment to arrive, create a reassembly queue.
316 */
317 if (fp == NULL)
318 {
319 fp = RTMemAlloc(sizeof(struct ipq_t));
320 if (fp == NULL)
321 goto dropfrag;
322 TAILQ_INSERT_HEAD(head, fp, ipq_list);
323 nipq++;
324 fp->ipq_nfrags = 1;
325 fp->ipq_ttl = IPFRAGTTL;
326 fp->ipq_p = ip->ip_p;
327 fp->ipq_id = ip->ip_id;
328 fp->ipq_src = ip->ip_src;
329 fp->ipq_dst = ip->ip_dst;
330 fp->ipq_frags = m;
331 m->m_nextpkt = NULL;
332 goto done;
333 }
334 else
335 {
336 fp->ipq_nfrags++;
337 }
338
339#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
340
341
342 /*
343 * Find a segment which begins after this one does.
344 */
345 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
346 if (GETIP(q)->ip_off > ip->ip_off)
347 break;
348
349 /*
350 * If there is a preceding segment, it may provide some of
351 * our data already. If so, drop the data from the incoming
352 * segment. If it provides all of our data, drop us, otherwise
353 * stick new segment in the proper place.
354 *
355 * If some of the data is dropped from the the preceding
356 * segment, then it's checksum is invalidated.
357 */
358 if (p)
359 {
360 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
361 if (i > 0)
362 {
363 if (i >= ip->ip_len)
364 goto dropfrag;
365 m_adj(m, i);
366 ip->ip_off += i;
367 ip->ip_len -= i;
368 }
369 m->m_nextpkt = p->m_nextpkt;
370 p->m_nextpkt = m;
371 }
372 else
373 {
374 m->m_nextpkt = fp->ipq_frags;
375 fp->ipq_frags = m;
376 }
377
378 /*
379 * While we overlap succeeding segments trim them or,
380 * if they are completely covered, dequeue them.
381 */
382 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
383 q = nq)
384 {
385 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
386 if (i < GETIP(q)->ip_len)
387 {
388 GETIP(q)->ip_len -= i;
389 GETIP(q)->ip_off += i;
390 m_adj(q, i);
391 break;
392 }
393 nq = q->m_nextpkt;
394 m->m_nextpkt = nq;
395 ipstat.ips_fragdropped++;
396 fp->ipq_nfrags--;
397 m_freem(pData, q);
398 }
399
400 /*
401 * Check for complete reassembly and perform frag per packet
402 * limiting.
403 *
404 * Frag limiting is performed here so that the nth frag has
405 * a chance to complete the packet before we drop the packet.
406 * As a result, n+1 frags are actually allowed per packet, but
407 * only n will ever be stored. (n = maxfragsperpacket.)
408 *
409 */
410 next = 0;
411 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
412 {
413 if (GETIP(q)->ip_off != next)
414 {
415 if (fp->ipq_nfrags > maxfragsperpacket)
416 {
417 ipstat.ips_fragdropped += fp->ipq_nfrags;
418 ip_freef(pData, head, fp);
419 }
420 goto done;
421 }
422 next += GETIP(q)->ip_len;
423 }
424 /* Make sure the last packet didn't have the IP_MF flag */
425 if (p->m_flags & M_FRAG)
426 {
427 if (fp->ipq_nfrags > maxfragsperpacket)
428 {
429 ipstat.ips_fragdropped += fp->ipq_nfrags;
430 ip_freef(pData, head, fp);
431 }
432 goto done;
433 }
434
435 /*
436 * Reassembly is complete. Make sure the packet is a sane size.
437 */
438 q = fp->ipq_frags;
439 ip = GETIP(q);
440 if (next + (ip->ip_hl << 2) > IP_MAXPACKET)
441 {
442 ipstat.ips_fragdropped += fp->ipq_nfrags;
443 ip_freef(pData, head, fp);
444 goto done;
445 }
446
447 /*
448 * Concatenate fragments.
449 */
450 m = q;
451 nq = q->m_nextpkt;
452 q->m_nextpkt = NULL;
453 for (q = nq; q != NULL; q = nq)
454 {
455 nq = q->m_nextpkt;
456 q->m_nextpkt = NULL;
457 m_cat(pData, m, q);
458 }
459
460 /*
461 * Create header for new ip packet by modifying header of first
462 * packet; dequeue and discard fragment reassembly header.
463 * Make header visible.
464 */
465#if 0
466 ip->ip_len = (ip->ip_hl << 2) + next;
467#else
468 ip->ip_len = next;
469#endif
470 ip->ip_src = fp->ipq_src;
471 ip->ip_dst = fp->ipq_dst;
472 TAILQ_REMOVE(head, fp, ipq_list);
473 nipq--;
474 RTMemFree(fp);
475
476 m->m_len += (ip->ip_hl << 2);
477 m->m_data -= (ip->ip_hl << 2);
478 /* some debugging cruft by sklower, below, will go away soon */
479#if 0
480 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
481 m_fixhdr(m);
482#endif
483 ipstat.ips_reassembled++;
484 return (m);
485
486dropfrag:
487 ipstat.ips_fragdropped++;
488 if (fp != NULL)
489 fp->ipq_nfrags--;
490 m_freem(pData, m);
491
492done:
493 return NULL;
494
495#undef GETIP
496}
497
498void
499ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
500{
501 struct mbuf *q;
502
503 while (fp->ipq_frags)
504 {
505 q = fp->ipq_frags;
506 fp->ipq_frags = q->m_nextpkt;
507 m_freem(pData, q);
508 }
509 TAILQ_REMOVE(fhp, fp, ipq_list);
510 RTMemFree(fp);
511 nipq--;
512}
513
514/*
515 * IP timer processing;
516 * if a timer expires on a reassembly
517 * queue, discard it.
518 */
519void
520ip_slowtimo(PNATState pData)
521{
522 register struct ipq_t *fp;
523
524 /* XXX: the fragment expiration is the same but requier
525 * additional loop see (see ip_input.c in FreeBSD tree)
526 */
527 int i;
528 DEBUG_CALL("ip_slowtimo");
529 for (i = 0; i < IPREASS_NHASH; i++)
530 {
531 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
532 {
533 struct ipq_t *fpp;
534
535 fpp = fp;
536 fp = TAILQ_NEXT(fp, ipq_list);
537 if(--fpp->ipq_ttl == 0)
538 {
539 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
540 ip_freef(pData, &ipq[i], fpp);
541 }
542 }
543 }
544 /*
545 * If we are over the maximum number of fragments
546 * (due to the limit being lowered), drain off
547 * enough to get down to the new limit.
548 */
549 if (maxnipq >= 0 && nipq > maxnipq)
550 {
551 for (i = 0; i < IPREASS_NHASH; i++)
552 {
553 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
554 {
555 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
556 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
557 }
558 }
559 }
560}
561
562
563/*
564 * Strip out IP options, at higher
565 * level protocol in the kernel.
566 * Second argument is buffer to which options
567 * will be moved, and return value is their length.
568 * (XXX) should be deleted; last arg currently ignored.
569 */
570void
571ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
572{
573 register int i;
574 struct ip *ip = mtod(m, struct ip *);
575 register caddr_t opts;
576 int olen;
577
578 olen = (ip->ip_hl<<2) - sizeof(struct ip);
579 opts = (caddr_t)(ip + 1);
580 i = m->m_len - (sizeof(struct ip) + olen);
581 memcpy(opts, opts + olen, (unsigned)i);
582 m->m_len -= olen;
583
584 ip->ip_hl = sizeof(struct ip) >> 2;
585}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette