VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 21715

Last change on this file since 21715 was 21715, checked in by vboxsync, 16 years ago

NAT: MSVC is C compiler, when compiling C files, thus profile counter
initializations leading to code like: int foo; foo = rdtsc(); can only be
used once (as code block starts, when setting value so no more var
declarations allowed). Maybe best approach would be to fix macroses to be
usable from C multiple times, just disabled for now.

  • Property svn:eol-style set to native
File size: 15.6 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47#ifdef VBOX_WITH_SLIRP_ALIAS
48# include "alias.h"
49#endif
50
51
52/*
53 * IP initialization: fill in IP protocol switch table.
54 * All protocols not implemented in kernel go to raw IP protocol handler.
55 */
56void
57ip_init(PNATState pData)
58{
59 int i = 0;
60 for (i = 0; i < IPREASS_NHASH; ++i)
61 TAILQ_INIT(&ipq[i]);
62 maxnipq = 100; /* ??? */
63 maxfragsperpacket = 16;
64 nipq = 0;
65 ip_currid = tt.tv_sec & 0xffff;
66 udp_init(pData);
67 tcp_init(pData);
68}
69
70/*
71 * Ip input routine. Checksum and byte swap header. If fragmented
72 * try to reassemble. Process options. Pass to next level.
73 */
74void
75ip_input(PNATState pData, struct mbuf *m)
76{
77 register struct ip *ip;
78 int hlen = 0;
79 STAM_PROFILE_START(&pData->StatIP_input, a);
80#ifdef VBOX_WITH_SLIRP_ALIAS
81 //STAM_PROFILE_START(&pData->StatALIAS_input, b);
82#endif
83
84 DEBUG_CALL("ip_input");
85 DEBUG_ARG("m = %lx", (long)m);
86 ip = mtod(m, struct ip *);
87 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, ntohs(ip->ip_len), m->m_len));
88 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, ntohs(ip->ip_len), m->m_len));
89
90 ipstat.ips_total++;
91#ifdef VBOX_WITH_SLIRP_ALIAS
92 {
93 int rc;
94 rc = LibAliasIn(m->m_la ? m->m_la : pData->proxy_alias, mtod(m, char *),
95 m->m_len);
96 //STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
97 Log2(("NAT: LibAlias return %d\n", rc));
98 }
99#endif
100
101 if (m->m_len < sizeof(struct ip))
102 {
103 ipstat.ips_toosmall++;
104 STAM_PROFILE_STOP(&pData->StatIP_input, a);
105 return;
106 }
107
108 ip = mtod(m, struct ip *);
109 if (ip->ip_v != IPVERSION)
110 {
111 ipstat.ips_badvers++;
112 goto bad;
113 }
114
115 hlen = ip->ip_hl << 2;
116 if ( hlen < sizeof(struct ip)
117 || hlen > m->m_len)
118 {
119 /* min header length */
120 ipstat.ips_badhlen++; /* or packet too short */
121 goto bad;
122 }
123
124 /* keep ip header intact for ICMP reply
125 * ip->ip_sum = cksum(m, hlen);
126 * if (ip->ip_sum) {
127 */
128 if (cksum(m, hlen))
129 {
130 ipstat.ips_badsum++;
131 goto bad;
132 }
133
134 /*
135 * Convert fields to host representation.
136 */
137 NTOHS(ip->ip_len);
138 if (ip->ip_len < hlen)
139 {
140 ipstat.ips_badlen++;
141 goto bad;
142 }
143 NTOHS(ip->ip_id);
144 NTOHS(ip->ip_off);
145
146 /*
147 * Check that the amount of data in the buffers
148 * is as at least much as the IP header would have us expect.
149 * Trim mbufs if longer than we expect.
150 * Drop packet if shorter than we expect.
151 */
152 if (m->m_len < ip->ip_len)
153 {
154 ipstat.ips_tooshort++;
155 goto bad;
156 }
157 /* Should drop packet if mbuf too long? hmmm... */
158 if (m->m_len > ip->ip_len)
159 m_adj(m, ip->ip_len - m->m_len);
160
161 /* check ip_ttl for a correct ICMP reply */
162 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
163 {
164 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
165 goto bad;
166 }
167
168 ip->ip_ttl--;
169 /*
170 * If offset or IP_MF are set, must reassemble.
171 * Otherwise, nothing need be done.
172 * (We could look in the reassembly queue to see
173 * if the packet was previously fragmented,
174 * but it's not worth the time; just let them time out.)
175 *
176 * XXX This should fail, don't fragment yet
177 */
178 if (ip->ip_off & (IP_MF | IP_OFFMASK))
179 {
180 m = ip_reass(pData, m);
181 if (m == NULL)
182 {
183 STAM_PROFILE_STOP(&pData->StatIP_input, a);
184 return;
185 }
186 ip = mtod(m, struct ip *);
187 hlen = ip->ip_len;
188 }
189 else
190 ip->ip_len -= hlen;
191
192 /*
193 * Switch out to protocol's input routine.
194 */
195 ipstat.ips_delivered++;
196 switch (ip->ip_p)
197 {
198 case IPPROTO_TCP:
199 tcp_input(pData, m, hlen, (struct socket *)NULL);
200 break;
201 case IPPROTO_UDP:
202 udp_input(pData, m, hlen);
203 break;
204 case IPPROTO_ICMP:
205 icmp_input(pData, m, hlen);
206 break;
207 default:
208 ipstat.ips_noproto++;
209 m_free(pData, m);
210 }
211 STAM_PROFILE_STOP(&pData->StatIP_input, a);
212 return;
213bad:
214 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
215 &ip->ip_dst, ip->ip_len));
216 m_freem(pData, m);
217 STAM_PROFILE_STOP(&pData->StatIP_input, a);
218 return;
219}
220
221struct mbuf *
222ip_reass(PNATState pData, struct mbuf* m)
223{
224 struct ip *ip;
225 struct mbuf *p, *q, *nq;
226 struct ipq_t *fp = NULL;
227 struct ipqhead *head;
228 int i, hlen, next;
229 u_short hash;
230
231 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
232 if ( maxnipq == 0
233 || maxfragsperpacket == 0)
234 {
235 ipstat.ips_fragments++;
236 ipstat.ips_fragdropped++;
237 m_freem(pData, m);
238 return (NULL);
239 }
240
241 ip = mtod(m, struct ip *);
242 hlen = ip->ip_hl << 2;
243
244 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
245 head = &ipq[hash];
246
247 /*
248 * Look for queue of fragments
249 * of this datagram.
250 */
251 TAILQ_FOREACH(fp, head, ipq_list)
252 if (ip->ip_id == fp->ipq_id &&
253 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
254 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
255 ip->ip_p == fp->ipq_p)
256 goto found;
257
258 fp = NULL;
259
260 /*
261 * Attempt to trim the number of allocated fragment queues if it
262 * exceeds the administrative limit.
263 */
264 if ((nipq > maxnipq) && (maxnipq > 0))
265 {
266 /*
267 * drop something from the tail of the current queue
268 * before proceeding further
269 */
270 struct ipq_t *q = TAILQ_LAST(head, ipqhead);
271 if (q == NULL)
272 {
273 /* gak */
274 for (i = 0; i < IPREASS_NHASH; i++)
275 {
276 struct ipq_t *r = TAILQ_LAST(&ipq[i], ipqhead);
277 if (r)
278 {
279 ipstat.ips_fragtimeout += r->ipq_nfrags;
280 ip_freef(pData, &ipq[i], r);
281 break;
282 }
283 }
284 }
285 else
286 {
287 ipstat.ips_fragtimeout += q->ipq_nfrags;
288 ip_freef(pData, head, q);
289 }
290 }
291
292found:
293 /*
294 * Adjust ip_len to not reflect header,
295 * convert offset of this to bytes.
296 */
297 ip->ip_len -= hlen;
298 if (ip->ip_off & IP_MF)
299 {
300 /*
301 * Make sure that fragments have a data length
302 * that's a non-zero multiple of 8 bytes.
303 */
304 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
305 {
306 ipstat.ips_toosmall++; /* XXX */
307 goto dropfrag;
308 }
309 m->m_flags |= M_FRAG;
310 }
311 else
312 m->m_flags &= ~M_FRAG;
313 ip->ip_off <<= 3;
314
315
316 /*
317 * Attempt reassembly; if it succeeds, proceed.
318 * ip_reass() will return a different mbuf.
319 */
320 ipstat.ips_fragments++;
321
322 /* Previous ip_reass() started here. */
323 /*
324 * Presence of header sizes in mbufs
325 * would confuse code below.
326 */
327 m->m_data += hlen;
328 m->m_len -= hlen;
329
330 /*
331 * If first fragment to arrive, create a reassembly queue.
332 */
333 if (fp == NULL)
334 {
335 fp = RTMemAlloc(sizeof(struct ipq_t));
336 if (fp == NULL)
337 goto dropfrag;
338 TAILQ_INSERT_HEAD(head, fp, ipq_list);
339 nipq++;
340 fp->ipq_nfrags = 1;
341 fp->ipq_ttl = IPFRAGTTL;
342 fp->ipq_p = ip->ip_p;
343 fp->ipq_id = ip->ip_id;
344 fp->ipq_src = ip->ip_src;
345 fp->ipq_dst = ip->ip_dst;
346 fp->ipq_frags = m;
347 m->m_nextpkt = NULL;
348 goto done;
349 }
350 else
351 {
352 fp->ipq_nfrags++;
353 }
354
355#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
356
357
358 /*
359 * Find a segment which begins after this one does.
360 */
361 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
362 if (GETIP(q)->ip_off > ip->ip_off)
363 break;
364
365 /*
366 * If there is a preceding segment, it may provide some of
367 * our data already. If so, drop the data from the incoming
368 * segment. If it provides all of our data, drop us, otherwise
369 * stick new segment in the proper place.
370 *
371 * If some of the data is dropped from the the preceding
372 * segment, then it's checksum is invalidated.
373 */
374 if (p)
375 {
376 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
377 if (i > 0)
378 {
379 if (i >= ip->ip_len)
380 goto dropfrag;
381 m_adj(m, i);
382 ip->ip_off += i;
383 ip->ip_len -= i;
384 }
385 m->m_nextpkt = p->m_nextpkt;
386 p->m_nextpkt = m;
387 }
388 else
389 {
390 m->m_nextpkt = fp->ipq_frags;
391 fp->ipq_frags = m;
392 }
393
394 /*
395 * While we overlap succeeding segments trim them or,
396 * if they are completely covered, dequeue them.
397 */
398 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
399 q = nq)
400 {
401 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
402 if (i < GETIP(q)->ip_len)
403 {
404 GETIP(q)->ip_len -= i;
405 GETIP(q)->ip_off += i;
406 m_adj(q, i);
407 break;
408 }
409 nq = q->m_nextpkt;
410 m->m_nextpkt = nq;
411 ipstat.ips_fragdropped++;
412 fp->ipq_nfrags--;
413 m_freem(pData, q);
414 }
415
416 /*
417 * Check for complete reassembly and perform frag per packet
418 * limiting.
419 *
420 * Frag limiting is performed here so that the nth frag has
421 * a chance to complete the packet before we drop the packet.
422 * As a result, n+1 frags are actually allowed per packet, but
423 * only n will ever be stored. (n = maxfragsperpacket.)
424 *
425 */
426 next = 0;
427 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
428 {
429 if (GETIP(q)->ip_off != next)
430 {
431 if (fp->ipq_nfrags > maxfragsperpacket)
432 {
433 ipstat.ips_fragdropped += fp->ipq_nfrags;
434 ip_freef(pData, head, fp);
435 }
436 goto done;
437 }
438 next += GETIP(q)->ip_len;
439 }
440 /* Make sure the last packet didn't have the IP_MF flag */
441 if (p->m_flags & M_FRAG)
442 {
443 if (fp->ipq_nfrags > maxfragsperpacket)
444 {
445 ipstat.ips_fragdropped += fp->ipq_nfrags;
446 ip_freef(pData, head, fp);
447 }
448 goto done;
449 }
450
451 /*
452 * Reassembly is complete. Make sure the packet is a sane size.
453 */
454 q = fp->ipq_frags;
455 ip = GETIP(q);
456 if (next + (ip->ip_hl << 2) > IP_MAXPACKET)
457 {
458 ipstat.ips_fragdropped += fp->ipq_nfrags;
459 ip_freef(pData, head, fp);
460 goto done;
461 }
462
463 /*
464 * Concatenate fragments.
465 */
466 m = q;
467 nq = q->m_nextpkt;
468 q->m_nextpkt = NULL;
469 for (q = nq; q != NULL; q = nq)
470 {
471 nq = q->m_nextpkt;
472 q->m_nextpkt = NULL;
473 m_cat(pData, m, q);
474 }
475
476 /*
477 * Create header for new ip packet by modifying header of first
478 * packet; dequeue and discard fragment reassembly header.
479 * Make header visible.
480 */
481#if 0
482 ip->ip_len = (ip->ip_hl << 2) + next;
483#else
484 ip->ip_len = next;
485#endif
486 ip->ip_src = fp->ipq_src;
487 ip->ip_dst = fp->ipq_dst;
488 TAILQ_REMOVE(head, fp, ipq_list);
489 nipq--;
490 RTMemFree(fp);
491
492 m->m_len += (ip->ip_hl << 2);
493 m->m_data -= (ip->ip_hl << 2);
494 /* some debugging cruft by sklower, below, will go away soon */
495#if 0
496 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
497 m_fixhdr(m);
498#endif
499 ipstat.ips_reassembled++;
500 return (m);
501
502dropfrag:
503 ipstat.ips_fragdropped++;
504 if (fp != NULL)
505 fp->ipq_nfrags--;
506 m_freem(pData, m);
507
508done:
509 return NULL;
510
511#undef GETIP
512}
513
514void
515ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
516{
517 struct mbuf *q;
518
519 while (fp->ipq_frags)
520 {
521 q = fp->ipq_frags;
522 fp->ipq_frags = q->m_nextpkt;
523 m_freem(pData, q);
524 }
525 TAILQ_REMOVE(fhp, fp, ipq_list);
526 RTMemFree(fp);
527 nipq--;
528}
529
530/*
531 * IP timer processing;
532 * if a timer expires on a reassembly
533 * queue, discard it.
534 */
535void
536ip_slowtimo(PNATState pData)
537{
538 register struct ipq_t *fp;
539
540 /* XXX: the fragment expiration is the same but requier
541 * additional loop see (see ip_input.c in FreeBSD tree)
542 */
543 int i;
544 DEBUG_CALL("ip_slowtimo");
545 for (i = 0; i < IPREASS_NHASH; i++)
546 {
547 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
548 {
549 struct ipq_t *fpp;
550
551 fpp = fp;
552 fp = TAILQ_NEXT(fp, ipq_list);
553 if(--fpp->ipq_ttl == 0)
554 {
555 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
556 ip_freef(pData, &ipq[i], fpp);
557 }
558 }
559 }
560 /*
561 * If we are over the maximum number of fragments
562 * (due to the limit being lowered), drain off
563 * enough to get down to the new limit.
564 */
565 if (maxnipq >= 0 && nipq > maxnipq)
566 {
567 for (i = 0; i < IPREASS_NHASH; i++)
568 {
569 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
570 {
571 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
572 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
573 }
574 }
575 }
576}
577
578
579/*
580 * Strip out IP options, at higher
581 * level protocol in the kernel.
582 * Second argument is buffer to which options
583 * will be moved, and return value is their length.
584 * (XXX) should be deleted; last arg currently ignored.
585 */
586void
587ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
588{
589 register int i;
590 struct ip *ip = mtod(m, struct ip *);
591 register caddr_t opts;
592 int olen;
593
594 olen = (ip->ip_hl<<2) - sizeof(struct ip);
595 opts = (caddr_t)(ip + 1);
596 i = m->m_len - (sizeof(struct ip) + olen);
597 memcpy(opts, opts + olen, (unsigned)i);
598 m->m_len -= olen;
599
600 ip->ip_hl = sizeof(struct ip) >> 2;
601}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette