VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 1052

Last change on this file since 1052 was 1048, checked in by vboxsync, 18 years ago

slirp insque/remque fixes for amd64

  • Property svn:eol-style set to native
File size: 20.0 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48#ifndef VBOX
49int ip_defttl;
50struct ipstat ipstat;
51struct ipq ipq;
52#endif /* !VBOX */
53
54/*
55 * IP initialization: fill in IP protocol switch table.
56 * All protocols not implemented in kernel go to raw IP protocol handler.
57 */
58void
59#ifdef VBOX
60ip_init(PNATState pData)
61#else /* !VBOX */
62ip_init()
63#endif /* !VBOX */
64{
65 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq);
66#ifdef VBOX
67 ip_currid = tt.tv_sec & 0xffff;
68 udp_init(pData);
69 tcp_init(pData);
70#else /* !VBOX */
71 ip_id = tt.tv_sec & 0xffff;
72 udp_init();
73 tcp_init();
74#endif /* !VBOX */
75#ifndef VBOX
76 ip_defttl = IPDEFTTL;
77#endif /* !VBOX */
78}
79
80/*
81 * Ip input routine. Checksum and byte swap header. If fragmented
82 * try to reassemble. Process options. Pass to next level.
83 */
84void
85#ifdef VBOX
86ip_input(PNATState pData, struct mbuf *m)
87#else /* !VBOX */
88ip_input(m)
89 struct mbuf *m;
90#endif /* !VBOX */
91{
92 register struct ip *ip;
93 int hlen;
94
95 DEBUG_CALL("ip_input");
96 DEBUG_ARG("m = %lx", (long)m);
97 DEBUG_ARG("m_len = %d", m->m_len);
98
99 ipstat.ips_total++;
100
101 if (m->m_len < sizeof (struct ip)) {
102 ipstat.ips_toosmall++;
103 return;
104 }
105
106 ip = mtod(m, struct ip *);
107
108 if (ip->ip_v != IPVERSION) {
109 ipstat.ips_badvers++;
110 goto bad;
111 }
112
113 hlen = ip->ip_hl << 2;
114 if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */
115 ipstat.ips_badhlen++; /* or packet too short */
116 goto bad;
117 }
118
119 /* keep ip header intact for ICMP reply
120 * ip->ip_sum = cksum(m, hlen);
121 * if (ip->ip_sum) {
122 */
123 if(cksum(m,hlen)) {
124 ipstat.ips_badsum++;
125 goto bad;
126 }
127
128 /*
129 * Convert fields to host representation.
130 */
131 NTOHS(ip->ip_len);
132 if (ip->ip_len < hlen) {
133 ipstat.ips_badlen++;
134 goto bad;
135 }
136 NTOHS(ip->ip_id);
137 NTOHS(ip->ip_off);
138
139 /*
140 * Check that the amount of data in the buffers
141 * is as at least much as the IP header would have us expect.
142 * Trim mbufs if longer than we expect.
143 * Drop packet if shorter than we expect.
144 */
145 if (m->m_len < ip->ip_len) {
146 ipstat.ips_tooshort++;
147 goto bad;
148 }
149 /* Should drop packet if mbuf too long? hmmm... */
150 if (m->m_len > ip->ip_len)
151 m_adj(m, ip->ip_len - m->m_len);
152
153 /* check ip_ttl for a correct ICMP reply */
154 if(ip->ip_ttl==0 || ip->ip_ttl==1) {
155#ifdef VBOX
156 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
157#else /* !VBOX */
158 icmp_error(m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
159#endif /* !VBOX */
160 goto bad;
161 }
162
163 /*
164 * Process options and, if not destined for us,
165 * ship it on. ip_dooptions returns 1 when an
166 * error was detected (causing an icmp message
167 * to be sent and the original packet to be freed).
168 */
169/* We do no IP options */
170/* if (hlen > sizeof (struct ip) && ip_dooptions(m))
171 * goto next;
172 */
173 /*
174 * If offset or IP_MF are set, must reassemble.
175 * Otherwise, nothing need be done.
176 * (We could look in the reassembly queue to see
177 * if the packet was previously fragmented,
178 * but it's not worth the time; just let them time out.)
179 *
180 * XXX This should fail, don't fragment yet
181 */
182 if (ip->ip_off &~ IP_DF) {
183 register struct ipq_t *fp;
184 /*
185 * Look for queue of fragments
186 * of this datagram.
187 */
188 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); fp != &ipq;
189 fp = u32_to_ptr(pData, fp->next, struct ipq_t *))
190 if (ip->ip_id == fp->ipq_id &&
191 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
192 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
193 ip->ip_p == fp->ipq_p)
194 goto found;
195 fp = 0;
196 found:
197
198 /*
199 * Adjust ip_len to not reflect header,
200 * set ip_mff if more fragments are expected,
201 * convert offset of this to bytes.
202 */
203 ip->ip_len -= hlen;
204 if (ip->ip_off & IP_MF)
205 ((struct ipasfrag *)ip)->ipf_mff |= 1;
206 else
207 ((struct ipasfrag *)ip)->ipf_mff &= ~1;
208
209 ip->ip_off <<= 3;
210
211 /*
212 * If datagram marked as having more fragments
213 * or if this is not the first fragment,
214 * attempt reassembly; if it succeeds, proceed.
215 */
216 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) {
217 ipstat.ips_fragments++;
218#ifdef VBOX
219 ip = ip_reass(pData, (struct ipasfrag *)ip, fp);
220#else /* !VBOX */
221 ip = ip_reass((struct ipasfrag *)ip, fp);
222#endif /* !VBOX */
223 if (ip == 0)
224 return;
225 ipstat.ips_reassembled++;
226#ifdef VBOX
227 m = dtom(pData, ip);
228#else /* !VBOX */
229 m = dtom(ip);
230#endif /* !VBOX */
231 } else
232 if (fp)
233#ifdef VBOX
234 ip_freef(pData, fp);
235#else /* !VBOX */
236 ip_freef(fp);
237#endif /* !VBOX */
238
239 } else
240 ip->ip_len -= hlen;
241
242 /*
243 * Switch out to protocol's input routine.
244 */
245 ipstat.ips_delivered++;
246 switch (ip->ip_p) {
247 case IPPROTO_TCP:
248#ifdef VBOX
249 tcp_input(pData, m, hlen, (struct socket *)NULL);
250#else /* !VBOX */
251 tcp_input(m, hlen, (struct socket *)NULL);
252#endif /* !VBOX */
253 break;
254 case IPPROTO_UDP:
255#ifdef VBOX
256 udp_input(pData, m, hlen);
257#else /* !VBOX */
258 udp_input(m, hlen);
259#endif /* !VBOX */
260 break;
261 case IPPROTO_ICMP:
262#ifdef VBOX
263 icmp_input(pData, m, hlen);
264#else /* !VBOX */
265 icmp_input(m, hlen);
266#endif /* !VBOX */
267 break;
268 default:
269 ipstat.ips_noproto++;
270#ifdef VBOX
271 m_free(pData, m);
272#else /* !VBOX */
273 m_free(m);
274#endif /* !VBOX */
275 }
276 return;
277bad:
278#ifdef VBOX
279 m_freem(pData, m);
280#else /* !VBOX */
281 m_freem(m);
282#endif /* !VBOX */
283 return;
284}
285
286/*
287 * Take incoming datagram fragment and try to
288 * reassemble it into whole datagram. If a chain for
289 * reassembly of this datagram already exists, then it
290 * is given as fp; otherwise have to make a chain.
291 */
292struct ip *
293#ifdef VBOX
294ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp)
295#else /* !VBOX */
296ip_reass(ip, fp)
297 register struct ipasfrag *ip;
298 register struct ipq_t *fp;
299#endif /* !VBOX */
300{
301#ifdef VBOX
302 register struct mbuf *m = dtom(pData, ip);
303#else /* !VBOX */
304 register struct mbuf *m = dtom(ip);
305#endif /* !VBOX */
306 register struct ipasfrag *q;
307 int hlen = ip->ip_hl << 2;
308 int i, next;
309
310 DEBUG_CALL("ip_reass");
311 DEBUG_ARG("ip = %lx", (long)ip);
312 DEBUG_ARG("fp = %lx", (long)fp);
313 DEBUG_ARG("m = %lx", (long)m);
314
315 /*
316 * Presence of header sizes in mbufs
317 * would confuse code below.
318 * Fragment m_data is concatenated.
319 */
320 m->m_data += hlen;
321 m->m_len -= hlen;
322
323 /*
324 * If first fragment to arrive, create a reassembly queue.
325 */
326 if (fp == 0) {
327 struct mbuf *t;
328#ifdef VBOX
329 if ((t = m_get(pData)) == NULL) goto dropfrag;
330#else /* !VBOX */
331 if ((t = m_get()) == NULL) goto dropfrag;
332#endif /* !VBOX */
333 fp = mtod(t, struct ipq_t *);
334 insque_32(pData, fp, &ipq);
335 fp->ipq_ttl = IPFRAGTTL;
336 fp->ipq_p = ip->ip_p;
337 fp->ipq_id = ip->ip_id;
338 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp);
339 fp->ipq_src = ((struct ip *)ip)->ip_src;
340 fp->ipq_dst = ((struct ip *)ip)->ip_dst;
341 q = (struct ipasfrag *)fp;
342 goto insert;
343 }
344
345 /*
346 * Find a segment which begins after this one does.
347 */
348 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
349 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
350 if (q->ip_off > ip->ip_off)
351 break;
352
353 /*
354 * If there is a preceding segment, it may provide some of
355 * our data already. If so, drop the data from the incoming
356 * segment. If it provides all of our data, drop us.
357 */
358 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp) {
359 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off +
360 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off;
361 if (i > 0) {
362 if (i >= ip->ip_len)
363 goto dropfrag;
364#ifdef VBOX
365 m_adj(dtom(pData, ip), i);
366#else /* !VBOX */
367 m_adj(dtom(ip), i);
368#endif /* !VBOX */
369 ip->ip_off += i;
370 ip->ip_len -= i;
371 }
372 }
373
374 /*
375 * While we overlap succeeding segments trim them or,
376 * if they are completely covered, dequeue them.
377 */
378 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) {
379 i = (ip->ip_off + ip->ip_len) - q->ip_off;
380 if (i < q->ip_len) {
381 q->ip_len -= i;
382 q->ip_off += i;
383#ifdef VBOX
384 m_adj(dtom(pData, q), i);
385#else /* !VBOX */
386 m_adj(dtom(q), i);
387#endif /* !VBOX */
388 break;
389 }
390 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
391 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)));
392 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
393 }
394
395insert:
396 /*
397 * Stick new segment in its place;
398 * check for complete reassembly.
399 */
400 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
401 next = 0;
402 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
403 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) {
404 if (q->ip_off != next)
405 return (0);
406 next += q->ip_len;
407 }
408 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1)
409 return (0);
410
411 /*
412 * Reassembly is complete; concatenate fragments.
413 */
414 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
415#ifdef VBOX
416 m = dtom(pData, q);
417#else /* !VBOX */
418 m = dtom(q);
419#endif /* !VBOX */
420
421 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
422 while (q != (struct ipasfrag *)fp) {
423 struct mbuf *t;
424#ifdef VBOX
425 t = dtom(pData, q);
426#else /* !VBOX */
427 t = dtom(q);
428#endif /* !VBOX */
429 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
430#ifdef VBOX
431 m_cat(pData, m, t);
432#else /* !VBOX */
433 m_cat(m, t);
434#endif /* !VBOX */
435 }
436
437 /*
438 * Create header for new ip packet by
439 * modifying header of first packet;
440 * dequeue and discard fragment reassembly header.
441 * Make header visible.
442 */
443 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
444
445 /*
446 * If the fragments concatenated to an mbuf that's
447 * bigger than the total size of the fragment, then and
448 * m_ext buffer was alloced. But fp->ipq_next points to
449 * the old buffer (in the mbuf), so we must point ip
450 * into the new buffer.
451 */
452 if (m->m_flags & M_EXT) {
453 int delta;
454 delta = (char *)ip - m->m_dat;
455 ip = (struct ipasfrag *)(m->m_ext + delta);
456 }
457
458 /* DEBUG_ARG("ip = %lx", (long)ip);
459 * ip=(struct ipasfrag *)m->m_data; */
460
461 ip->ip_len = next;
462 ip->ipf_mff &= ~1;
463 ((struct ip *)ip)->ip_src = fp->ipq_src;
464 ((struct ip *)ip)->ip_dst = fp->ipq_dst;
465 remque_32(pData, fp);
466#ifdef VBOX
467 (void) m_free(pData, dtom(pData, fp));
468 m = dtom(pData, ip);
469#else /* !VBOX */
470 (void) m_free(dtom(fp));
471 m = dtom(ip);
472#endif /* !VBOX */
473 m->m_len += (ip->ip_hl << 2);
474 m->m_data -= (ip->ip_hl << 2);
475
476 return ((struct ip *)ip);
477
478dropfrag:
479 ipstat.ips_fragdropped++;
480#ifdef VBOX
481 m_freem(pData, m);
482#else /* !VBOX */
483 m_freem(m);
484#endif /* !VBOX */
485 return (0);
486}
487
488/*
489 * Free a fragment reassembly header and all
490 * associated datagrams.
491 */
492void
493#ifdef VBOX
494ip_freef(PNATState pData, struct ipq_t *fp)
495#else /* !VBOX */
496ip_freef(fp)
497 struct ipq_t *fp;
498#endif /* !VBOX */
499{
500 register struct ipasfrag *q, *p;
501
502 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
503 q = p) {
504 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
505 ip_deq(pData, q);
506#ifdef VBOX
507 m_freem(pData, dtom(pData, q));
508#else /* !VBOX */
509 m_freem(dtom(q));
510#endif /* !VBOX */
511 }
512 remque_32(pData, fp);
513#ifdef VBOX
514 (void) m_free(pData, dtom(pData, fp));
515#else /* !VBOX */
516 (void) m_free(dtom(fp));
517#endif /* !VBOX */
518}
519
520/*
521 * Put an ip fragment on a reassembly chain.
522 * Like insque, but pointers in middle of structure.
523 */
524void
525ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev)
526{
527 DEBUG_CALL("ip_enq");
528 DEBUG_ARG("prev = %lx", (long)prev);
529 p->ipf_prev = ptr_to_u32(pData, prev);
530 p->ipf_next = prev->ipf_next;
531 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p);
532 prev->ipf_next = ptr_to_u32(pData, p);
533}
534
535/*
536 * To ip_enq as remque is to insque.
537 */
538void
539ip_deq(PNATState pData, register struct ipasfrag *p)
540{
541 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *);
542 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *);
543 u32ptr_done(pData, prev->ipf_next, p);
544 prev->ipf_next = p->ipf_next;
545 next->ipf_prev = p->ipf_prev;
546}
547
548/*
549 * IP timer processing;
550 * if a timer expires on a reassembly
551 * queue, discard it.
552 */
553void
554#ifdef VBOX
555ip_slowtimo(PNATState pData)
556#else /* !VBOX */
557ip_slowtimo()
558#endif /* !VBOX */
559{
560 register struct ipq_t *fp;
561
562 DEBUG_CALL("ip_slowtimo");
563
564 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
565 if (fp == 0)
566 return;
567
568 while (fp != &ipq) {
569 --fp->ipq_ttl;
570 fp = u32_to_ptr(pData, fp->next, struct ipq_t *);
571 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0) {
572 ipstat.ips_fragtimeout++;
573 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *));
574 }
575 }
576}
577
578/*
579 * Do option processing on a datagram,
580 * possibly discarding it if bad options are encountered,
581 * or forwarding it if source-routed.
582 * Returns 1 if packet has been forwarded/freed,
583 * 0 if the packet should be processed further.
584 */
585
586#ifdef notdef
587
588int
589ip_dooptions(m)
590 struct mbuf *m;
591{
592 register struct ip *ip = mtod(m, struct ip *);
593 register u_char *cp;
594 register struct ip_timestamp *ipt;
595 register struct in_ifaddr *ia;
596/* int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; */
597 int opt, optlen, cnt, off, code, type, forward = 0;
598 struct in_addr *sin, dst;
599typedef u_int32_t n_time;
600 n_time ntime;
601
602 dst = ip->ip_dst;
603 cp = (u_char *)(ip + 1);
604 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
605 for (; cnt > 0; cnt -= optlen, cp += optlen) {
606 opt = cp[IPOPT_OPTVAL];
607 if (opt == IPOPT_EOL)
608 break;
609 if (opt == IPOPT_NOP)
610 optlen = 1;
611 else {
612 optlen = cp[IPOPT_OLEN];
613 if (optlen <= 0 || optlen > cnt) {
614 code = &cp[IPOPT_OLEN] - (u_char *)ip;
615 goto bad;
616 }
617 }
618 switch (opt) {
619
620 default:
621 break;
622
623 /*
624 * Source routing with record.
625 * Find interface with current destination address.
626 * If none on this machine then drop if strictly routed,
627 * or do nothing if loosely routed.
628 * Record interface address and bring up next address
629 * component. If strictly routed make sure next
630 * address is on directly accessible net.
631 */
632 case IPOPT_LSRR:
633 case IPOPT_SSRR:
634 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
635 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
636 goto bad;
637 }
638 ipaddr.sin_addr = ip->ip_dst;
639 ia = (struct in_ifaddr *)
640 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
641 if (ia == 0) {
642 if (opt == IPOPT_SSRR) {
643 type = ICMP_UNREACH;
644 code = ICMP_UNREACH_SRCFAIL;
645 goto bad;
646 }
647 /*
648 * Loose routing, and not at next destination
649 * yet; nothing to do except forward.
650 */
651 break;
652 }
653 off--; / * 0 origin * /
654 if (off > optlen - sizeof(struct in_addr)) {
655 /*
656 * End of source route. Should be for us.
657 */
658 save_rte(cp, ip->ip_src);
659 break;
660 }
661 /*
662 * locate outgoing interface
663 */
664 bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr,
665 sizeof(ipaddr.sin_addr));
666 if (opt == IPOPT_SSRR) {
667#define INA struct in_ifaddr *
668#define SA struct sockaddr *
669 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0)
670 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
671 } else
672 ia = ip_rtaddr(ipaddr.sin_addr);
673 if (ia == 0) {
674 type = ICMP_UNREACH;
675 code = ICMP_UNREACH_SRCFAIL;
676 goto bad;
677 }
678 ip->ip_dst = ipaddr.sin_addr;
679 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
680 (caddr_t)(cp + off), sizeof(struct in_addr));
681 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
682 /*
683 * Let ip_intr's mcast routing check handle mcast pkts
684 */
685 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
686 break;
687
688 case IPOPT_RR:
689 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
690 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
691 goto bad;
692 }
693 /*
694 * If no space remains, ignore.
695 */
696 off--; * 0 origin *
697 if (off > optlen - sizeof(struct in_addr))
698 break;
699 bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr,
700 sizeof(ipaddr.sin_addr));
701 /*
702 * locate outgoing interface; if we're the destination,
703 * use the incoming interface (should be same).
704 */
705 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 &&
706 (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) {
707 type = ICMP_UNREACH;
708 code = ICMP_UNREACH_HOST;
709 goto bad;
710 }
711 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
712 (caddr_t)(cp + off), sizeof(struct in_addr));
713 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
714 break;
715
716 case IPOPT_TS:
717 code = cp - (u_char *)ip;
718 ipt = (struct ip_timestamp *)cp;
719 if (ipt->ipt_len < 5)
720 goto bad;
721 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) {
722 if (++ipt->ipt_oflw == 0)
723 goto bad;
724 break;
725 }
726 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1);
727 switch (ipt->ipt_flg) {
728
729 case IPOPT_TS_TSONLY:
730 break;
731
732 case IPOPT_TS_TSANDADDR:
733 if (ipt->ipt_ptr + sizeof(n_time) +
734 sizeof(struct in_addr) > ipt->ipt_len)
735 goto bad;
736 ipaddr.sin_addr = dst;
737 ia = (INA)ifaof_ i f p foraddr((SA)&ipaddr,
738 m->m_pkthdr.rcvif);
739 if (ia == 0)
740 continue;
741 bcopy((caddr_t)&IA_SIN(ia)->sin_addr,
742 (caddr_t)sin, sizeof(struct in_addr));
743 ipt->ipt_ptr += sizeof(struct in_addr);
744 break;
745
746 case IPOPT_TS_PRESPEC:
747 if (ipt->ipt_ptr + sizeof(n_time) +
748 sizeof(struct in_addr) > ipt->ipt_len)
749 goto bad;
750 bcopy((caddr_t)sin, (caddr_t)&ipaddr.sin_addr,
751 sizeof(struct in_addr));
752 if (ifa_ifwithaddr((SA)&ipaddr) == 0)
753 continue;
754 ipt->ipt_ptr += sizeof(struct in_addr);
755 break;
756
757 default:
758 goto bad;
759 }
760 ntime = iptime();
761 bcopy((caddr_t)&ntime, (caddr_t)cp + ipt->ipt_ptr - 1,
762 sizeof(n_time));
763 ipt->ipt_ptr += sizeof(n_time);
764 }
765 }
766 if (forward) {
767 ip_forward(m, 1);
768 return (1);
769 }
770 }
771 }
772 return (0);
773bad:
774 /* ip->ip_len -= ip->ip_hl << 2; XXX icmp_error adds in hdr length */
775
776/* Not yet */
777 icmp_error(m, type, code, 0, 0);
778
779 ipstat.ips_badoptions++;
780 return (1);
781}
782
783#endif /* notdef */
784
785/*
786 * Strip out IP options, at higher
787 * level protocol in the kernel.
788 * Second argument is buffer to which options
789 * will be moved, and return value is their length.
790 * (XXX) should be deleted; last arg currently ignored.
791 */
792void
793ip_stripoptions(m, mopt)
794 register struct mbuf *m;
795 struct mbuf *mopt;
796{
797 register int i;
798 struct ip *ip = mtod(m, struct ip *);
799 register caddr_t opts;
800 int olen;
801
802 olen = (ip->ip_hl<<2) - sizeof (struct ip);
803 opts = (caddr_t)(ip + 1);
804 i = m->m_len - (sizeof (struct ip) + olen);
805 memcpy(opts, opts + olen, (unsigned)i);
806 m->m_len -= olen;
807
808 ip->ip_hl = sizeof(struct ip) >> 2;
809}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette