VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 15458

Last change on this file since 15458 was 15453, checked in by vboxsync, 16 years ago

slirp: removed the old 64-bit incompatible reassemble code

  • Property svn:eol-style set to native
File size: 14.8 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49/*
50 * IP initialization: fill in IP protocol switch table.
51 * All protocols not implemented in kernel go to raw IP protocol handler.
52 */
53void
54ip_init(PNATState pData)
55{
56 int i = 0;
57 for (i = 0; i < IPREASS_NHASH; ++i)
58 TAILQ_INIT(&ipq[i]);
59 maxnipq = 100; /* ??? */
60 maxfragsperpacket = 16;
61 nipq = 0;
62 ip_currid = tt.tv_sec & 0xffff;
63 udp_init(pData);
64 tcp_init(pData);
65}
66
67/*
68 * Ip input routine. Checksum and byte swap header. If fragmented
69 * try to reassemble. Process options. Pass to next level.
70 */
71void
72ip_input(PNATState pData, struct mbuf *m)
73{
74 register struct ip *ip;
75 int hlen;
76
77 DEBUG_CALL("ip_input");
78 DEBUG_ARG("m = %lx", (long)m);
79 DEBUG_ARG("m_len = %d", m->m_len);
80
81 ipstat.ips_total++;
82
83 if (m->m_len < sizeof(struct ip))
84 {
85 ipstat.ips_toosmall++;
86 return;
87 }
88
89 ip = mtod(m, struct ip *);
90 if (ip->ip_v != IPVERSION)
91 {
92 ipstat.ips_badvers++;
93 goto bad;
94 }
95
96 hlen = ip->ip_hl << 2;
97 if ( hlen < sizeof(struct ip)
98 || hlen > m->m_len)
99 {
100 /* min header length */
101 ipstat.ips_badhlen++; /* or packet too short */
102 goto bad;
103 }
104
105 /* keep ip header intact for ICMP reply
106 * ip->ip_sum = cksum(m, hlen);
107 * if (ip->ip_sum) {
108 */
109 if(cksum(m,hlen))
110 {
111 ipstat.ips_badsum++;
112 goto bad;
113 }
114
115 /*
116 * Convert fields to host representation.
117 */
118 NTOHS(ip->ip_len);
119 if (ip->ip_len < hlen)
120 {
121 ipstat.ips_badlen++;
122 goto bad;
123 }
124 NTOHS(ip->ip_id);
125 NTOHS(ip->ip_off);
126
127 /*
128 * Check that the amount of data in the buffers
129 * is as at least much as the IP header would have us expect.
130 * Trim mbufs if longer than we expect.
131 * Drop packet if shorter than we expect.
132 */
133 if (m->m_len < ip->ip_len)
134 {
135 ipstat.ips_tooshort++;
136 goto bad;
137 }
138 /* Should drop packet if mbuf too long? hmmm... */
139 if (m->m_len > ip->ip_len)
140 m_adj(m, ip->ip_len - m->m_len);
141
142 /* check ip_ttl for a correct ICMP reply */
143 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
144 {
145 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
146 goto bad;
147 }
148
149#ifdef VBOX_WITH_SLIRP_ICMP
150 ip->ip_ttl--;
151#endif
152 /*
153 * If offset or IP_MF are set, must reassemble.
154 * Otherwise, nothing need be done.
155 * (We could look in the reassembly queue to see
156 * if the packet was previously fragmented,
157 * but it's not worth the time; just let them time out.)
158 *
159 * XXX This should fail, don't fragment yet
160 */
161 if (ip->ip_off & (IP_MF | IP_OFFMASK))
162 {
163 m = ip_reass(pData, m);
164 if (m == NULL)
165 return;
166 ip = mtod(m, struct ip *);
167 hlen = ip->ip_len;
168 }
169 else
170 ip->ip_len -= hlen;
171
172 /*
173 * Switch out to protocol's input routine.
174 */
175 ipstat.ips_delivered++;
176 switch (ip->ip_p)
177 {
178 case IPPROTO_TCP:
179 tcp_input(pData, m, hlen, (struct socket *)NULL);
180 break;
181 case IPPROTO_UDP:
182 udp_input(pData, m, hlen);
183 break;
184 case IPPROTO_ICMP:
185 icmp_input(pData, m, hlen);
186 break;
187 default:
188 ipstat.ips_noproto++;
189 m_free(pData, m);
190 }
191 return;
192bad:
193 m_freem(pData, m);
194 return;
195}
196
197struct mbuf *
198ip_reass(PNATState pData, struct mbuf* m)
199{
200 struct ip *ip;
201 struct mbuf *p, *q, *nq, *t;
202 struct ipq_t *fp = NULL;
203 struct ipqhead *head;
204 int i, hlen, next;
205 u_short hash;
206
207 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
208 if ( maxnipq == 0
209 || maxfragsperpacket == 0)
210 {
211 ipstat.ips_fragments++;
212 ipstat.ips_fragdropped++;
213 m_freem(pData, m);
214 return (NULL);
215 }
216
217 ip = mtod(m, struct ip *);
218 hlen = ip->ip_hl << 2;
219
220 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
221 head = &ipq[hash];
222
223 /*
224 * Look for queue of fragments
225 * of this datagram.
226 */
227 TAILQ_FOREACH(fp, head, ipq_list)
228 if (ip->ip_id == fp->ipq_id &&
229 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
230 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
231 ip->ip_p == fp->ipq_p)
232 goto found;
233
234 fp = NULL;
235
236 /*
237 * Attempt to trim the number of allocated fragment queues if it
238 * exceeds the administrative limit.
239 */
240 if ((nipq > maxnipq) && (maxnipq > 0))
241 {
242 /*
243 * drop something from the tail of the current queue
244 * before proceeding further
245 */
246 struct ipq_t *q = TAILQ_LAST(head, ipqhead);
247 if (q == NULL)
248 {
249 /* gak */
250 for (i = 0; i < IPREASS_NHASH; i++)
251 {
252 struct ipq_t *r = TAILQ_LAST(&ipq[i], ipqhead);
253 if (r)
254 {
255 ipstat.ips_fragtimeout += r->ipq_nfrags;
256 ip_freef(pData, &ipq[i], r);
257 break;
258 }
259 }
260 }
261 else
262 {
263 ipstat.ips_fragtimeout += q->ipq_nfrags;
264 ip_freef(pData, head, q);
265 }
266 }
267
268found:
269 /*
270 * Adjust ip_len to not reflect header,
271 * convert offset of this to bytes.
272 */
273 ip->ip_len -= hlen;
274 if (ip->ip_off & IP_MF)
275 {
276 /*
277 * Make sure that fragments have a data length
278 * that's a non-zero multiple of 8 bytes.
279 */
280 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
281 {
282 ipstat.ips_toosmall++; /* XXX */
283 goto dropfrag;
284 }
285 m->m_flags |= M_FRAG;
286 }
287 else
288 m->m_flags &= ~M_FRAG;
289 ip->ip_off <<= 3;
290
291
292 /*
293 * Attempt reassembly; if it succeeds, proceed.
294 * ip_reass() will return a different mbuf.
295 */
296 ipstat.ips_fragments++;
297 m->m_hdr.header = ip;
298
299 /* Previous ip_reass() started here. */
300 /*
301 * Presence of header sizes in mbufs
302 * would confuse code below.
303 */
304 m->m_data += hlen;
305 m->m_len -= hlen;
306
307 /*
308 * If first fragment to arrive, create a reassembly queue.
309 */
310 if (fp == NULL)
311 {
312 fp = malloc(sizeof(struct ipq_t));
313 if (fp == NULL)
314 goto dropfrag;
315 TAILQ_INSERT_HEAD(head, fp, ipq_list);
316 nipq++;
317 fp->ipq_nfrags = 1;
318 fp->ipq_ttl = IPFRAGTTL;
319 fp->ipq_p = ip->ip_p;
320 fp->ipq_id = ip->ip_id;
321 fp->ipq_src = ip->ip_src;
322 fp->ipq_dst = ip->ip_dst;
323 fp->ipq_frags = m;
324 m->m_nextpkt = NULL;
325 goto done;
326 }
327 else
328 {
329 fp->ipq_nfrags++;
330 }
331
332#define GETIP(m) ((struct ip*)((m)->m_hdr.header))
333
334
335 /*
336 * Find a segment which begins after this one does.
337 */
338 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
339 if (GETIP(q)->ip_off > ip->ip_off)
340 break;
341
342 /*
343 * If there is a preceding segment, it may provide some of
344 * our data already. If so, drop the data from the incoming
345 * segment. If it provides all of our data, drop us, otherwise
346 * stick new segment in the proper place.
347 *
348 * If some of the data is dropped from the the preceding
349 * segment, then it's checksum is invalidated.
350 */
351 if (p)
352 {
353 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
354 if (i > 0)
355 {
356 if (i >= ip->ip_len)
357 goto dropfrag;
358 m_adj(m, i);
359 ip->ip_off += i;
360 ip->ip_len -= i;
361 }
362 m->m_nextpkt = p->m_nextpkt;
363 p->m_nextpkt = m;
364 }
365 else
366 {
367 m->m_nextpkt = fp->ipq_frags;
368 fp->ipq_frags = m;
369 }
370
371 /*
372 * While we overlap succeeding segments trim them or,
373 * if they are completely covered, dequeue them.
374 */
375 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
376 q = nq)
377 {
378 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
379 if (i < GETIP(q)->ip_len)
380 {
381 GETIP(q)->ip_len -= i;
382 GETIP(q)->ip_off += i;
383 m_adj(q, i);
384 break;
385 }
386 nq = q->m_nextpkt;
387 m->m_nextpkt = nq;
388 ipstat.ips_fragdropped++;
389 fp->ipq_nfrags--;
390 m_freem(pData, q);
391 }
392
393 /*
394 * Check for complete reassembly and perform frag per packet
395 * limiting.
396 *
397 * Frag limiting is performed here so that the nth frag has
398 * a chance to complete the packet before we drop the packet.
399 * As a result, n+1 frags are actually allowed per packet, but
400 * only n will ever be stored. (n = maxfragsperpacket.)
401 *
402 */
403 next = 0;
404 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
405 {
406 if (GETIP(q)->ip_off != next)
407 {
408 if (fp->ipq_nfrags > maxfragsperpacket)
409 {
410 ipstat.ips_fragdropped += fp->ipq_nfrags;
411 ip_freef(pData, head, fp);
412 }
413 goto done;
414 }
415 next += GETIP(q)->ip_len;
416 }
417 /* Make sure the last packet didn't have the IP_MF flag */
418 if (p->m_flags & M_FRAG)
419 {
420 if (fp->ipq_nfrags > maxfragsperpacket)
421 {
422 ipstat.ips_fragdropped += fp->ipq_nfrags;
423 ip_freef(pData, head, fp);
424 }
425 goto done;
426 }
427
428 /*
429 * Reassembly is complete. Make sure the packet is a sane size.
430 */
431 q = fp->ipq_frags;
432 ip = GETIP(q);
433 if (next + (ip->ip_hl << 2) > IP_MAXPACKET)
434 {
435 ipstat.ips_fragdropped += fp->ipq_nfrags;
436 ip_freef(pData, head, fp);
437 goto done;
438 }
439
440 /*
441 * Concatenate fragments.
442 */
443 m = q;
444#if 0
445 t = m->m_next;
446 m->m_next = NULL;
447 m_cat(pData, m, t);
448#endif
449 nq = q->m_nextpkt;
450 q->m_nextpkt = NULL;
451 for (q = nq; q != NULL; q = nq)
452 {
453 nq = q->m_nextpkt;
454 q->m_nextpkt = NULL;
455 m_cat(pData, m, q);
456 }
457
458 /*
459 * Create header for new ip packet by modifying header of first
460 * packet; dequeue and discard fragment reassembly header.
461 * Make header visible.
462 */
463#if 0
464 ip->ip_len = (ip->ip_hl << 2) + next;
465#else
466 ip->ip_len = next;
467#endif
468 ip->ip_src = fp->ipq_src;
469 ip->ip_dst = fp->ipq_dst;
470 TAILQ_REMOVE(head, fp, ipq_list);
471 nipq--;
472 free(fp);
473
474 m->m_len += (ip->ip_hl << 2);
475 m->m_data -= (ip->ip_hl << 2);
476 /* some debugging cruft by sklower, below, will go away soon */
477#if 0
478 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
479 m_fixhdr(m);
480#endif
481 ipstat.ips_reassembled++;
482 return (m);
483
484dropfrag:
485 ipstat.ips_fragdropped++;
486 if (fp != NULL)
487 fp->ipq_nfrags--;
488 m_freem(pData, m);
489
490done:
491 return NULL;
492
493#undef GETIP
494}
495
496void
497ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
498{
499 struct mbuf *q;
500
501 while (fp->ipq_frags)
502 {
503 q = fp->ipq_frags;
504 fp->ipq_frags = q->m_nextpkt;
505 m_freem(pData, q);
506 }
507 TAILQ_REMOVE(fhp, fp, ipq_list);
508 free(fp);
509 nipq--;
510}
511
512/*
513 * IP timer processing;
514 * if a timer expires on a reassembly
515 * queue, discard it.
516 */
517void
518ip_slowtimo(PNATState pData)
519{
520 register struct ipq_t *fp;
521
522 /* XXX: the fragment expiration is the same but requier
523 * additional loop see (see ip_input.c in FreeBSD tree)
524 */
525 int i;
526 DEBUG_CALL("ip_slowtimo");
527 for (i = 0; i < IPREASS_NHASH; i++)
528 {
529 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
530 {
531 struct ipq_t *fpp;
532
533 fpp = fp;
534 fp = TAILQ_NEXT(fp, ipq_list);
535 if(--fpp->ipq_ttl == 0) {
536 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
537 ip_freef(pData, &ipq[i], fpp);
538 }
539 }
540 }
541 /*
542 * If we are over the maximum number of fragments
543 * (due to the limit being lowered), drain off
544 * enough to get down to the new limit.
545 */
546 if (maxnipq >= 0 && nipq > maxnipq)
547 {
548 for (i = 0; i < IPREASS_NHASH; i++)
549 {
550 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
551 {
552 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
553 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
554 }
555 }
556 }
557}
558
559
560/*
561 * Strip out IP options, at higher
562 * level protocol in the kernel.
563 * Second argument is buffer to which options
564 * will be moved, and return value is their length.
565 * (XXX) should be deleted; last arg currently ignored.
566 */
567void
568ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
569{
570 register int i;
571 struct ip *ip = mtod(m, struct ip *);
572 register caddr_t opts;
573 int olen;
574
575 olen = (ip->ip_hl<<2) - sizeof(struct ip);
576 opts = (caddr_t)(ip + 1);
577 i = m->m_len - (sizeof(struct ip) + olen);
578 memcpy(opts, opts + olen, (unsigned)i);
579 m->m_len -= olen;
580
581 ip->ip_hl = sizeof(struct ip) >> 2;
582}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette