VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 13704

Last change on this file since 13704 was 13704, checked in by vboxsync, 16 years ago

TCP sync was intoducedTCP sync was intoduced

  • Property svn:eol-style set to native
File size: 19.4 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49/*
50 * IP initialization: fill in IP protocol switch table.
51 * All protocols not implemented in kernel go to raw IP protocol handler.
52 */
53void
54ip_init(PNATState pData)
55{
56 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq);
57 ip_currid = tt.tv_sec & 0xffff;
58 udp_init(pData);
59 tcp_init(pData);
60}
61
62/*
63 * Ip input routine. Checksum and byte swap header. If fragmented
64 * try to reassemble. Process options. Pass to next level.
65 */
66void
67ip_input(PNATState pData, struct mbuf *m)
68{
69 register struct ip *ip;
70 int hlen;
71
72 DEBUG_CALL("ip_input");
73 DEBUG_ARG("m = %lx", (long)m);
74 DEBUG_ARG("m_len = %d", m->m_len);
75
76#ifdef VBOX_WITH_SYNC_SLIRP
77 int rc;
78 rc = RTSemMutexRequest(m->m_mutex, RT_INDEFINITE_WAIT);
79 AssertReleaseRC(rc);
80#endif
81
82 ipstat.ips_total++;
83
84 if (m->m_len < sizeof (struct ip)) {
85 ipstat.ips_toosmall++;
86#ifdef VBOX_WITH_SYNC_SLIRP
87 rc = RTSemMutexRelease(m->m_mutex);
88 AssertReleaseRC(rc);
89#endif
90 return;
91 }
92
93 ip = mtod(m, struct ip *);
94
95 if (ip->ip_v != IPVERSION) {
96 ipstat.ips_badvers++;
97 goto bad;
98 }
99
100 hlen = ip->ip_hl << 2;
101 if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */
102 ipstat.ips_badhlen++; /* or packet too short */
103 goto bad;
104 }
105
106 /* keep ip header intact for ICMP reply
107 * ip->ip_sum = cksum(m, hlen);
108 * if (ip->ip_sum) {
109 */
110 if(cksum(m,hlen)) {
111 ipstat.ips_badsum++;
112 goto bad;
113 }
114
115 /*
116 * Convert fields to host representation.
117 */
118 NTOHS(ip->ip_len);
119 if (ip->ip_len < hlen) {
120 ipstat.ips_badlen++;
121 goto bad;
122 }
123 NTOHS(ip->ip_id);
124 NTOHS(ip->ip_off);
125
126 /*
127 * Check that the amount of data in the buffers
128 * is as at least much as the IP header would have us expect.
129 * Trim mbufs if longer than we expect.
130 * Drop packet if shorter than we expect.
131 */
132 if (m->m_len < ip->ip_len) {
133 ipstat.ips_tooshort++;
134 goto bad;
135 }
136 /* Should drop packet if mbuf too long? hmmm... */
137 if (m->m_len > ip->ip_len)
138 m_adj(m, ip->ip_len - m->m_len);
139
140 /* check ip_ttl for a correct ICMP reply */
141 if(ip->ip_ttl==0 || ip->ip_ttl==1) {
142 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
143 goto bad;
144 }
145
146 /*
147 * Process options and, if not destined for us,
148 * ship it on. ip_dooptions returns 1 when an
149 * error was detected (causing an icmp message
150 * to be sent and the original packet to be freed).
151 */
152/* We do no IP options */
153/* if (hlen > sizeof (struct ip) && ip_dooptions(m))
154 * goto next;
155 */
156 /*
157 * If offset or IP_MF are set, must reassemble.
158 * Otherwise, nothing need be done.
159 * (We could look in the reassembly queue to see
160 * if the packet was previously fragmented,
161 * but it's not worth the time; just let them time out.)
162 *
163 * XXX This should fail, don't fragment yet
164 */
165 if (ip->ip_off &~ IP_DF) {
166 register struct ipq_t *fp;
167 /*
168 * Look for queue of fragments
169 * of this datagram.
170 */
171 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); fp != &ipq;
172 fp = u32_to_ptr(pData, fp->next, struct ipq_t *))
173 if (ip->ip_id == fp->ipq_id &&
174 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
175 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
176 ip->ip_p == fp->ipq_p)
177 goto found;
178 fp = 0;
179 found:
180
181 /*
182 * Adjust ip_len to not reflect header,
183 * set ip_mff if more fragments are expected,
184 * convert offset of this to bytes.
185 */
186 ip->ip_len -= hlen;
187 if (ip->ip_off & IP_MF)
188 ((struct ipasfrag *)ip)->ipf_mff |= 1;
189 else
190 ((struct ipasfrag *)ip)->ipf_mff &= ~1;
191
192 ip->ip_off <<= 3;
193
194 /*
195 * If datagram marked as having more fragments
196 * or if this is not the first fragment,
197 * attempt reassembly; if it succeeds, proceed.
198 */
199 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) {
200 ipstat.ips_fragments++;
201 ip = ip_reass(pData, (struct ipasfrag *)ip, fp);
202 if (ip == 0)
203#ifndef VBOX_WITH_SYNC_SLIRP
204 return;
205#else
206 {
207 rc = RTSemMutexRelease(m->m_mutex);
208 AssertReleaseRC(rc);
209 return;
210 }
211#endif
212 ipstat.ips_reassembled++;
213#ifndef VBOX_WITH_SYNC_SLIRP
214 m = dtom(pData, ip);
215#else
216 rc = RTSemMutexRelease(m->m_mutex);
217 AssertReleaseRC(rc);
218 m = dtom(pData, ip);
219 rc = RTSemMutexRequest(m->m_mutex, RT_INDEFINITE_WAIT);
220 AssertReleaseRC(rc);
221#endif
222 } else
223 if (fp)
224 ip_freef(pData, fp);
225
226 } else
227 ip->ip_len -= hlen;
228
229 /*
230 * Switch out to protocol's input routine.
231 */
232 ipstat.ips_delivered++;
233 switch (ip->ip_p) {
234 case IPPROTO_TCP:
235 tcp_input(pData, m, hlen, (struct socket *)NULL);
236#ifdef VBOX_WITH_SYNC_SLIRP
237 rc = RTSemMutexRelease(m->m_mutex);
238 AssertReleaseRC(rc);
239#endif
240 break;
241 case IPPROTO_UDP:
242 udp_input(pData, m, hlen);
243#ifdef VBOX_WITH_SYNC_SLIRP
244 rc = RTSemMutexRelease(m->m_mutex);
245 AssertReleaseRC(rc);
246#endif
247 break;
248 case IPPROTO_ICMP:
249 icmp_input(pData, m, hlen);
250#ifdef VBOX_WITH_SYNC_SLIRP
251 rc = RTSemMutexRelease(m->m_mutex);
252 AssertReleaseRC(rc);
253#endif
254 break;
255 default:
256 ipstat.ips_noproto++;
257 m_free(pData, m);
258#ifdef VBOX_WITH_SYNC_SLIRP
259 if (m != NULL) {
260 rc = RTSemMutexRelease(m->m_mutex);
261 AssertReleaseRC(rc);
262 }
263#endif
264 }
265 return;
266bad:
267 m_freem(pData, m);
268#ifdef VBOX_WITH_SYNC_SLIRP
269 if (m != NULL) {
270 rc = RTSemMutexRelease(m->m_mutex);
271 AssertReleaseRC(rc);
272 }
273#endif
274 return;
275}
276
277/*
278 * Take incoming datagram fragment and try to
279 * reassemble it into whole datagram. If a chain for
280 * reassembly of this datagram already exists, then it
281 * is given as fp; otherwise have to make a chain.
282 */
283struct ip *
284ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp)
285{
286 register struct mbuf *m = dtom(pData, ip);
287 register struct ipasfrag *q;
288 int hlen = ip->ip_hl << 2;
289 int i, next;
290
291 DEBUG_CALL("ip_reass");
292 DEBUG_ARG("ip = %lx", (long)ip);
293 DEBUG_ARG("fp = %lx", (long)fp);
294 DEBUG_ARG("m = %lx", (long)m);
295
296 /*
297 * Presence of header sizes in mbufs
298 * would confuse code below.
299 * Fragment m_data is concatenated.
300 */
301 m->m_data += hlen;
302 m->m_len -= hlen;
303
304 /*
305 * If first fragment to arrive, create a reassembly queue.
306 */
307 if (fp == 0) {
308 struct mbuf *t;
309 if ((t = m_get(pData)) == NULL) goto dropfrag;
310 fp = mtod(t, struct ipq_t *);
311 insque_32(pData, fp, &ipq);
312 fp->ipq_ttl = IPFRAGTTL;
313 fp->ipq_p = ip->ip_p;
314 fp->ipq_id = ip->ip_id;
315 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp);
316 fp->ipq_src = ((struct ip *)ip)->ip_src;
317 fp->ipq_dst = ((struct ip *)ip)->ip_dst;
318 q = (struct ipasfrag *)fp;
319 goto insert;
320 }
321
322 /*
323 * Find a segment which begins after this one does.
324 */
325 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
326 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
327 if (q->ip_off > ip->ip_off)
328 break;
329
330 /*
331 * If there is a preceding segment, it may provide some of
332 * our data already. If so, drop the data from the incoming
333 * segment. If it provides all of our data, drop us.
334 */
335 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp) {
336 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off +
337 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off;
338 if (i > 0) {
339 if (i >= ip->ip_len)
340 goto dropfrag;
341 m_adj(dtom(pData, ip), i);
342 ip->ip_off += i;
343 ip->ip_len -= i;
344 }
345 }
346
347 /*
348 * While we overlap succeeding segments trim them or,
349 * if they are completely covered, dequeue them.
350 */
351 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) {
352 i = (ip->ip_off + ip->ip_len) - q->ip_off;
353 if (i < q->ip_len) {
354 q->ip_len -= i;
355 q->ip_off += i;
356 m_adj(dtom(pData, q), i);
357 break;
358 }
359 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
360 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)));
361 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
362 }
363
364insert:
365 /*
366 * Stick new segment in its place;
367 * check for complete reassembly.
368 */
369 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
370 next = 0;
371 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
372 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) {
373 if (q->ip_off != next)
374 return (0);
375 next += q->ip_len;
376 }
377 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1)
378 return (0);
379
380 /*
381 * Reassembly is complete; concatenate fragments.
382 */
383 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
384 m = dtom(pData, q);
385
386 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
387 while (q != (struct ipasfrag *)fp) {
388 struct mbuf *t;
389 t = dtom(pData, q);
390 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
391 m_cat(pData, m, t);
392 }
393
394 /*
395 * Create header for new ip packet by
396 * modifying header of first packet;
397 * dequeue and discard fragment reassembly header.
398 * Make header visible.
399 */
400 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
401
402 /*
403 * If the fragments concatenated to an mbuf that's
404 * bigger than the total size of the fragment, then and
405 * m_ext buffer was alloced. But fp->ipq_next points to
406 * the old buffer (in the mbuf), so we must point ip
407 * into the new buffer.
408 */
409 if (m->m_flags & M_EXT) {
410 int delta;
411 delta = (char *)ip - m->m_dat;
412 ip = (struct ipasfrag *)(m->m_ext + delta);
413 }
414
415 /* DEBUG_ARG("ip = %lx", (long)ip);
416 * ip=(struct ipasfrag *)m->m_data; */
417
418 ip->ip_len = next;
419 ip->ipf_mff &= ~1;
420 ((struct ip *)ip)->ip_src = fp->ipq_src;
421 ((struct ip *)ip)->ip_dst = fp->ipq_dst;
422 remque_32(pData, fp);
423 (void) m_free(pData, dtom(pData, fp));
424 m = dtom(pData, ip);
425 m->m_len += (ip->ip_hl << 2);
426 m->m_data -= (ip->ip_hl << 2);
427
428 return ((struct ip *)ip);
429
430dropfrag:
431 ipstat.ips_fragdropped++;
432 m_freem(pData, m);
433 return (0);
434}
435
436/*
437 * Free a fragment reassembly header and all
438 * associated datagrams.
439 */
440void
441ip_freef(PNATState pData, struct ipq_t *fp)
442{
443 register struct ipasfrag *q, *p;
444
445 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
446 q = p) {
447 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
448 ip_deq(pData, q);
449 m_freem(pData, dtom(pData, q));
450 }
451 remque_32(pData, fp);
452 (void) m_free(pData, dtom(pData, fp));
453}
454
455/*
456 * Put an ip fragment on a reassembly chain.
457 * Like insque, but pointers in middle of structure.
458 */
459void
460ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev)
461{
462 DEBUG_CALL("ip_enq");
463 DEBUG_ARG("prev = %lx", (long)prev);
464 p->ipf_prev = ptr_to_u32(pData, prev);
465 p->ipf_next = prev->ipf_next;
466 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p);
467 prev->ipf_next = ptr_to_u32(pData, p);
468}
469
470/*
471 * To ip_enq as remque is to insque.
472 */
473void
474ip_deq(PNATState pData, register struct ipasfrag *p)
475{
476 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *);
477 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *);
478 u32ptr_done(pData, prev->ipf_next, p);
479 prev->ipf_next = p->ipf_next;
480 next->ipf_prev = p->ipf_prev;
481}
482
483/*
484 * IP timer processing;
485 * if a timer expires on a reassembly
486 * queue, discard it.
487 */
488void
489ip_slowtimo(PNATState pData)
490{
491 register struct ipq_t *fp;
492
493 DEBUG_CALL("ip_slowtimo");
494
495 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
496 if (fp == 0)
497 return;
498
499 while (fp != &ipq) {
500 --fp->ipq_ttl;
501 fp = u32_to_ptr(pData, fp->next, struct ipq_t *);
502 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0) {
503 ipstat.ips_fragtimeout++;
504 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *));
505 }
506 }
507}
508
509/*
510 * Do option processing on a datagram,
511 * possibly discarding it if bad options are encountered,
512 * or forwarding it if source-routed.
513 * Returns 1 if packet has been forwarded/freed,
514 * 0 if the packet should be processed further.
515 */
516
517#ifdef notdef
518
519int
520ip_dooptions(m)
521 struct mbuf *m;
522{
523 register struct ip *ip = mtod(m, struct ip *);
524 register u_char *cp;
525 register struct ip_timestamp *ipt;
526 register struct in_ifaddr *ia;
527/* int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; */
528 int opt, optlen, cnt, off, code, type, forward = 0;
529 struct in_addr *sin, dst;
530typedef u_int32_t n_time;
531 n_time ntime;
532
533 dst = ip->ip_dst;
534 cp = (u_char *)(ip + 1);
535 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
536 for (; cnt > 0; cnt -= optlen, cp += optlen) {
537 opt = cp[IPOPT_OPTVAL];
538 if (opt == IPOPT_EOL)
539 break;
540 if (opt == IPOPT_NOP)
541 optlen = 1;
542 else {
543 optlen = cp[IPOPT_OLEN];
544 if (optlen <= 0 || optlen > cnt) {
545 code = &cp[IPOPT_OLEN] - (u_char *)ip;
546 goto bad;
547 }
548 }
549 switch (opt) {
550
551 default:
552 break;
553
554 /*
555 * Source routing with record.
556 * Find interface with current destination address.
557 * If none on this machine then drop if strictly routed,
558 * or do nothing if loosely routed.
559 * Record interface address and bring up next address
560 * component. If strictly routed make sure next
561 * address is on directly accessible net.
562 */
563 case IPOPT_LSRR:
564 case IPOPT_SSRR:
565 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
566 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
567 goto bad;
568 }
569 ipaddr.sin_addr = ip->ip_dst;
570 ia = (struct in_ifaddr *)
571 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
572 if (ia == 0) {
573 if (opt == IPOPT_SSRR) {
574 type = ICMP_UNREACH;
575 code = ICMP_UNREACH_SRCFAIL;
576 goto bad;
577 }
578 /*
579 * Loose routing, and not at next destination
580 * yet; nothing to do except forward.
581 */
582 break;
583 }
584 off--; / * 0 origin * /
585 if (off > optlen - sizeof(struct in_addr)) {
586 /*
587 * End of source route. Should be for us.
588 */
589 save_rte(cp, ip->ip_src);
590 break;
591 }
592 /*
593 * locate outgoing interface
594 */
595 bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr,
596 sizeof(ipaddr.sin_addr));
597 if (opt == IPOPT_SSRR) {
598#define INA struct in_ifaddr *
599#define SA struct sockaddr *
600 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0)
601 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
602 } else
603 ia = ip_rtaddr(ipaddr.sin_addr);
604 if (ia == 0) {
605 type = ICMP_UNREACH;
606 code = ICMP_UNREACH_SRCFAIL;
607 goto bad;
608 }
609 ip->ip_dst = ipaddr.sin_addr;
610 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
611 (caddr_t)(cp + off), sizeof(struct in_addr));
612 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
613 /*
614 * Let ip_intr's mcast routing check handle mcast pkts
615 */
616 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
617 break;
618
619 case IPOPT_RR:
620 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
621 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
622 goto bad;
623 }
624 /*
625 * If no space remains, ignore.
626 */
627 off--; * 0 origin *
628 if (off > optlen - sizeof(struct in_addr))
629 break;
630 bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr,
631 sizeof(ipaddr.sin_addr));
632 /*
633 * locate outgoing interface; if we're the destination,
634 * use the incoming interface (should be same).
635 */
636 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 &&
637 (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) {
638 type = ICMP_UNREACH;
639 code = ICMP_UNREACH_HOST;
640 goto bad;
641 }
642 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
643 (caddr_t)(cp + off), sizeof(struct in_addr));
644 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
645 break;
646
647 case IPOPT_TS:
648 code = cp - (u_char *)ip;
649 ipt = (struct ip_timestamp *)cp;
650 if (ipt->ipt_len < 5)
651 goto bad;
652 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) {
653 if (++ipt->ipt_oflw == 0)
654 goto bad;
655 break;
656 }
657 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1);
658 switch (ipt->ipt_flg) {
659
660 case IPOPT_TS_TSONLY:
661 break;
662
663 case IPOPT_TS_TSANDADDR:
664 if (ipt->ipt_ptr + sizeof(n_time) +
665 sizeof(struct in_addr) > ipt->ipt_len)
666 goto bad;
667 ipaddr.sin_addr = dst;
668 ia = (INA)ifaof_ i f p foraddr((SA)&ipaddr,
669 m->m_pkthdr.rcvif);
670 if (ia == 0)
671 continue;
672 bcopy((caddr_t)&IA_SIN(ia)->sin_addr,
673 (caddr_t)sin, sizeof(struct in_addr));
674 ipt->ipt_ptr += sizeof(struct in_addr);
675 break;
676
677 case IPOPT_TS_PRESPEC:
678 if (ipt->ipt_ptr + sizeof(n_time) +
679 sizeof(struct in_addr) > ipt->ipt_len)
680 goto bad;
681 bcopy((caddr_t)sin, (caddr_t)&ipaddr.sin_addr,
682 sizeof(struct in_addr));
683 if (ifa_ifwithaddr((SA)&ipaddr) == 0)
684 continue;
685 ipt->ipt_ptr += sizeof(struct in_addr);
686 break;
687
688 default:
689 goto bad;
690 }
691 ntime = iptime();
692 bcopy((caddr_t)&ntime, (caddr_t)cp + ipt->ipt_ptr - 1,
693 sizeof(n_time));
694 ipt->ipt_ptr += sizeof(n_time);
695 }
696 }
697 if (forward) {
698 ip_forward(m, 1);
699 return (1);
700 }
701 }
702 }
703 return (0);
704bad:
705 /* ip->ip_len -= ip->ip_hl << 2; XXX icmp_error adds in hdr length */
706
707/* Not yet */
708 icmp_error(m, type, code, 0, 0);
709
710 ipstat.ips_badoptions++;
711 return (1);
712}
713
714#endif /* notdef */
715
716/*
717 * Strip out IP options, at higher
718 * level protocol in the kernel.
719 * Second argument is buffer to which options
720 * will be moved, and return value is their length.
721 * (XXX) should be deleted; last arg currently ignored.
722 */
723void
724ip_stripoptions(m, mopt)
725 register struct mbuf *m;
726 struct mbuf *mopt;
727{
728 register int i;
729 struct ip *ip = mtod(m, struct ip *);
730 register caddr_t opts;
731 int olen;
732
733 olen = (ip->ip_hl<<2) - sizeof (struct ip);
734 opts = (caddr_t)(ip + 1);
735 i = m->m_len - (sizeof (struct ip) + olen);
736 memcpy(opts, opts + olen, (unsigned)i);
737 m->m_len -= olen;
738
739 ip->ip_hl = sizeof(struct ip) >> 2;
740}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette