VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/bsd/sys/mbuf.h@ 55192

Last change on this file since 55192 was 55192, checked in by vboxsync, 10 years ago

gcc 5 is very picky regarding FUNCTION and PRETTY_FUNCTION in C90 code

  • Property svn:eol-style set to native
File size: 35.9 KB
Line 
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
31 * $FreeBSD: src/sys/sys/mbuf.h,v 1.217.2.3.4.1 2009/04/15 03:14:26 kensmith Exp $
32 */
33
34#ifndef _SYS_MBUF_H_
35#define _SYS_MBUF_H_
36
37#ifndef VBOX
38/* XXX: These includes suck. Sorry! */
39#include <sys/queue.h>
40#ifdef _KERNEL
41#include <sys/systm.h>
42#include <vm/uma.h>
43#ifdef WITNESS
44#include <sys/lock.h>
45#endif
46#endif
47#else /* VBOX */
48# include <iprt/param.h>
49# include "misc.h"
50# include "ext.h"
51
52typedef const char *c_caddr_t;
53
54DECLNORETURN(static void) panic (char *fmt, ...)
55{
56 va_list args;
57 va_start(args, fmt);
58 vbox_slirp_printV(fmt, args);
59 va_end(args);
60 AssertFatalFailed();
61}
62/* for non-gnu compilers */
63# define __func__ RT_GCC_EXTENSION __FUNCTION__
64# ifndef __inline
65# define __inline
66# endif
67
68# define bzero(a1, len) memset((a1), 0, (len))
69
70/* (vvl) some definitions from sys/param.h */
71/*
72 * Constants related to network buffer management.
73 * MCLBYTES must be no larger than PAGE_SIZE.
74 */
75# ifndef MSIZE
76# define MSIZE 256 /* size of an mbuf */
77# endif /* MSIZE */
78
79# ifndef MCLSHIFT
80# define MCLSHIFT 11 /* convert bytes to mbuf clusters */
81# endif /* MCLSHIFT */
82
83# ifndef MCLBYTES
84# define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */
85# endif /*MCLBYTES*/
86
87# define MJUMPAGESIZE PAGE_SIZE /* jumbo cluster 4k */
88# define MJUM9BYTES (9 * 1024) /* jumbo cluster 9k */
89# define MJUM16BYTES (16 * 1024) /* jumbo cluster 16k */
90#endif /* VBOX */
91
92/*
93 * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
94 * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
95 * sys/param.h), which has no additional overhead and is used instead of the
96 * internal data area; this is done when at least MINCLSIZE of data must be
97 * stored. Additionally, it is possible to allocate a separate buffer
98 * externally and attach it to the mbuf in a way similar to that of mbuf
99 * clusters.
100 */
101#define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
102#define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
103#define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
104#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
105
106#if defined(_KERNEL) || defined(VBOX)
107/*-
108 * Macros for type conversion:
109 * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
110 * dtom(x) -- Convert data pointer within mbuf to mbuf pointer (XXX).
111 */
112#define mtod(m, t) ((t)((m)->m_data))
113#define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
114
115/*
116 * Argument structure passed to UMA routines during mbuf and packet
117 * allocations.
118 */
119struct mb_args {
120 int flags; /* Flags for mbuf being allocated */
121 short type; /* Type of mbuf being allocated */
122};
123#endif /* _KERNEL */
124
125#if defined(__LP64__)
126#define M_HDR_PAD 6
127#else
128#define M_HDR_PAD 2
129#endif
130
131/*
132 * Header present at the beginning of every mbuf.
133 */
134struct m_hdr {
135 struct mbuf *mh_next; /* next buffer in chain */
136 struct mbuf *mh_nextpkt; /* next chain in queue/record */
137 caddr_t mh_data; /* location of data */
138 int mh_len; /* amount of data in this mbuf */
139 int mh_flags; /* flags; see below */
140 short mh_type; /* type of data in this mbuf */
141#ifdef VBOX
142 struct socket *mh_so; /*socket assotiated with mbuf*/
143 TAILQ_ENTRY(mbuf) mh_ifq;
144#endif
145 uint8_t pad[M_HDR_PAD];/* word align */
146};
147
148/*
149 * Packet tag structure (see below for details).
150 */
151struct m_tag {
152 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
153 u_int16_t m_tag_id; /* Tag ID */
154 u_int16_t m_tag_len; /* Length of data */
155 u_int32_t m_tag_cookie; /* ABI/Module ID */
156 void (*m_tag_free)(struct m_tag *);
157};
158
159/*
160 * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
161 */
162struct pkthdr {
163 struct ifnet *rcvif; /* rcv interface */
164 /* variables for ip and tcp reassembly */
165 void *header; /* pointer to packet header */
166 int len; /* total packet length */
167 /* variables for hardware checksum */
168 int csum_flags; /* flags regarding checksum */
169 int csum_data; /* data field used by csum routines */
170 u_int16_t tso_segsz; /* TSO segment size */
171 u_int16_t ether_vtag; /* Ethernet 802.1p+q vlan tag */
172 SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
173};
174
175/*
176 * Description of external storage mapped into mbuf; valid only if M_EXT is
177 * set.
178 */
179struct m_ext {
180 caddr_t ext_buf; /* start of buffer */
181 void (*ext_free) /* free routine if not the usual */
182 (void *, void *);
183 void *ext_args; /* optional argument pointer */
184 u_int ext_size; /* size of buffer, for ext_free */
185#ifdef VBOX
186 volatile uint32_t *ref_cnt; /* pointer to ref count info */
187#else
188 volatile u_int *ref_cnt; /* pointer to ref count info */
189#endif
190 int ext_type; /* type of external storage */
191};
192
193/*
194 * The core of the mbuf object along with some shortcut defines for practical
195 * purposes.
196 */
197struct mbuf {
198 struct m_hdr m_hdr;
199 union {
200 struct {
201 struct pkthdr MH_pkthdr; /* M_PKTHDR set */
202 union {
203 struct m_ext MH_ext; /* M_EXT set */
204 char MH_databuf[MHLEN];
205 } MH_dat;
206 } MH;
207 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
208 } M_dat;
209};
210#define m_next m_hdr.mh_next
211#define m_len m_hdr.mh_len
212#define m_data m_hdr.mh_data
213#define m_type m_hdr.mh_type
214#define m_flags m_hdr.mh_flags
215#define m_nextpkt m_hdr.mh_nextpkt
216#define m_act m_nextpkt
217#define m_pkthdr M_dat.MH.MH_pkthdr
218#define m_ext M_dat.MH.MH_dat.MH_ext
219#define m_pktdat M_dat.MH.MH_dat.MH_databuf
220#define m_dat M_dat.M_databuf
221#ifdef VBOX
222# define m_so m_hdr.mh_so
223# define ifq_so m_hdr.mh_so
224# define m_ifq m_hdr.mh_ifq
225#endif
226
227/*
228 * mbuf flags.
229 */
230#define M_EXT 0x00000001 /* has associated external storage */
231#define M_PKTHDR 0x00000002 /* start of record */
232#define M_EOR 0x00000004 /* end of record */
233#define M_RDONLY 0x00000008 /* associated data is marked read-only */
234#define M_PROTO1 0x00000010 /* protocol-specific */
235#define M_PROTO2 0x00000020 /* protocol-specific */
236#define M_PROTO3 0x00000040 /* protocol-specific */
237#define M_PROTO4 0x00000080 /* protocol-specific */
238#define M_PROTO5 0x00000100 /* protocol-specific */
239#define M_BCAST 0x00000200 /* send/received as link-level broadcast */
240#define M_MCAST 0x00000400 /* send/received as link-level multicast */
241#define M_FRAG 0x00000800 /* packet is a fragment of a larger packet */
242#define M_FIRSTFRAG 0x00001000 /* packet is first fragment */
243#define M_LASTFRAG 0x00002000 /* packet is last fragment */
244#define M_SKIP_FIREWALL 0x00004000 /* skip firewall processing */
245#define M_FREELIST 0x00008000 /* mbuf is on the free list */
246#define M_VLANTAG 0x00010000 /* ether_vtag is valid */
247#define M_PROMISC 0x00020000 /* packet was not for us */
248#define M_NOFREE 0x00040000 /* do not free mbuf, embedded in cluster */
249#define M_PROTO6 0x00080000 /* protocol-specific */
250#define M_PROTO7 0x00100000 /* protocol-specific */
251#define M_PROTO8 0x00200000 /* protocol-specific */
252/*
253 * For RELENG_{6,7} steal these flags for limited multiple routing table
254 * support. In RELENG_8 and beyond, use just one flag and a tag.
255 */
256#define M_FIB 0xF0000000 /* steal some bits to store fib number. */
257
258#define M_NOTIFICATION M_PROTO5 /* SCTP notification */
259
260/*
261 * Flags to purge when crossing layers.
262 */
263#define M_PROTOFLAGS \
264 (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8)
265
266/*
267 * Flags preserved when copying m_pkthdr.
268 */
269#define M_COPYFLAGS \
270 (M_PKTHDR|M_EOR|M_RDONLY|M_PROTOFLAGS|M_SKIP_FIREWALL|M_BCAST|M_MCAST|\
271 M_FRAG|M_FIRSTFRAG|M_LASTFRAG|M_VLANTAG|M_PROMISC|M_FIB)
272
273/*
274 * External buffer types: identify ext_buf type.
275 */
276#define EXT_CLUSTER 1 /* mbuf cluster */
277#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
278#define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */
279#define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */
280#define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
281#define EXT_PACKET 6 /* mbuf+cluster from packet zone */
282#define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */
283#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
284#define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
285#define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */
286#define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */
287
288/*
289 * Flags indicating hw checksum support and sw checksum requirements. This
290 * field can be directly tested against if_data.ifi_hwassist.
291 */
292#define CSUM_IP 0x0001 /* will csum IP */
293#define CSUM_TCP 0x0002 /* will csum TCP */
294#define CSUM_UDP 0x0004 /* will csum UDP */
295#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
296#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
297#define CSUM_TSO 0x0020 /* will do TSO */
298
299#define CSUM_IP_CHECKED 0x0100 /* did csum IP */
300#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
301#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
302#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
303
304#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
305#define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
306
307/*
308 * mbuf types.
309 */
310#define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */
311#define MT_DATA 1 /* dynamic (data) allocation */
312#define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */
313#define MT_SONAME 8 /* socket name */
314#define MT_CONTROL 14 /* extra-data protocol message */
315#define MT_OOBDATA 15 /* expedited data */
316#define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */
317
318#define MT_NOINIT 255 /* Not a type but a flag to allocate
319 a non-initialized mbuf */
320
321#define MB_NOTAGS 0x1UL /* no tags attached to mbuf */
322
323/*
324 * General mbuf allocator statistics structure.
325 *
326 * Many of these statistics are no longer used; we instead track many
327 * allocator statistics through UMA's built in statistics mechanism.
328 */
329struct mbstat {
330 u_long m_mbufs; /* XXX */
331 u_long m_mclusts; /* XXX */
332
333 u_long m_drain; /* times drained protocols for space */
334 u_long m_mcfail; /* XXX: times m_copym failed */
335 u_long m_mpfail; /* XXX: times m_pullup failed */
336 u_long m_msize; /* length of an mbuf */
337 u_long m_mclbytes; /* length of an mbuf cluster */
338 u_long m_minclsize; /* min length of data to allocate a cluster */
339 u_long m_mlen; /* length of data in an mbuf */
340 u_long m_mhlen; /* length of data in a header mbuf */
341
342 /* Number of mbtypes (gives # elems in mbtypes[] array) */
343 short m_numtypes;
344
345 /* XXX: Sendfile stats should eventually move to their own struct */
346 u_long sf_iocnt; /* times sendfile had to do disk I/O */
347 u_long sf_allocfail; /* times sfbuf allocation failed */
348 u_long sf_allocwait; /* times sfbuf allocation had to wait */
349};
350
351/*
352 * Flags specifying how an allocation should be made.
353 *
354 * The flag to use is as follows:
355 * - M_DONTWAIT or M_NOWAIT from an interrupt handler to not block allocation.
356 * - M_WAIT or M_WAITOK or M_TRYWAIT from wherever it is safe to block.
357 *
358 * M_DONTWAIT/M_NOWAIT means that we will not block the thread explicitly and
359 * if we cannot allocate immediately we may return NULL, whereas
360 * M_WAIT/M_WAITOK/M_TRYWAIT means that if we cannot allocate resources we
361 * will block until they are available, and thus never return NULL.
362 *
363 * XXX Eventually just phase this out to use M_WAITOK/M_NOWAIT.
364 */
365#define MBTOM(how) (how)
366#ifndef VBOX
367#define M_DONTWAIT M_NOWAIT
368#define M_TRYWAIT M_WAITOK
369#define M_WAIT M_WAITOK
370#else
371/* @todo (r=vvl) not sure we can do it in NAT */
372# define M_WAITOK 0
373# define M_NOWAIT 0
374# define M_DONTWAIT 0
375# define M_TRYWAI 0
376# define M_WAIT 0
377#endif
378
379/*
380 * String names of mbuf-related UMA(9) and malloc(9) types. Exposed to
381 * !_KERNEL so that monitoring tools can look up the zones with
382 * libmemstat(3).
383 */
384#define MBUF_MEM_NAME "mbuf"
385#define MBUF_CLUSTER_MEM_NAME "mbuf_cluster"
386#define MBUF_PACKET_MEM_NAME "mbuf_packet"
387#define MBUF_JUMBOP_MEM_NAME "mbuf_jumbo_pagesize"
388#define MBUF_JUMBO9_MEM_NAME "mbuf_jumbo_9k"
389#define MBUF_JUMBO16_MEM_NAME "mbuf_jumbo_16k"
390#define MBUF_TAG_MEM_NAME "mbuf_tag"
391#define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
392
393#if defined(_KERNEL) || defined(VBOX)
394
395#ifdef WITNESS
396#define MBUF_CHECKSLEEP(how) do { \
397 if (how == M_WAITOK) \
398 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \
399 "Sleeping in \"%s\"", __func__); \
400} while (0)
401#else
402#define MBUF_CHECKSLEEP(how)
403#endif
404
405/*
406 * Network buffer allocation API
407 *
408 * The rest of it is defined in kern/kern_mbuf.c
409 */
410
411#ifndef VBOX
412extern uma_zone_t zone_mbuf;
413extern uma_zone_t zone_clust;
414extern uma_zone_t zone_pack;
415extern uma_zone_t zone_jumbop;
416extern uma_zone_t zone_jumbo9;
417extern uma_zone_t zone_jumbo16;
418extern uma_zone_t zone_ext_refcnt;
419#endif
420
421#ifndef VBOX
422static __inline struct mbuf *m_getcl(int how, short type, int flags);
423static __inline struct mbuf *m_get(int how, short type);
424static __inline struct mbuf *m_gethdr(int how, short type);
425static __inline struct mbuf *m_getjcl(int how, short type, int flags,
426 int size);
427static __inline struct mbuf *m_getclr(int how, short type); /* XXX */
428static __inline struct mbuf *m_free(struct mbuf *m);
429static __inline void m_clget(struct mbuf *m, int how);
430static __inline void *m_cljget(struct mbuf *m, int how, int size);
431void mb_free_ext(struct mbuf *);
432#else
433static __inline struct mbuf *m_getcl(PNATState pData, int how, short type, int flags);
434static __inline struct mbuf *m_get(PNATState pData, int how, short type);
435static __inline struct mbuf *m_gethdr(PNATState pData, int how, short type);
436static __inline struct mbuf *m_getjcl(PNATState pData, int how,
437 short type, int flags, int size);
438static __inline struct mbuf *m_getclr(PNATState pData, int how, short type); /* XXX */
439static __inline struct mbuf *m_free(PNATState pData, struct mbuf *m);
440static __inline void m_clget(PNATState pData, struct mbuf *m, int how);
441static __inline void *m_cljget(PNATState pData, struct mbuf *m, int how, int size);
442void mb_free_ext(PNATState, struct mbuf *);
443#endif
444static __inline void m_chtype(struct mbuf *m, short new_type);
445static __inline struct mbuf *m_last(struct mbuf *m);
446
447static __inline int
448m_gettype(int size)
449{
450 int type;
451
452 switch (size) {
453 case MSIZE:
454 type = EXT_MBUF;
455 break;
456 case MCLBYTES:
457 type = EXT_CLUSTER;
458 break;
459#if MJUMPAGESIZE != MCLBYTES
460 case MJUMPAGESIZE:
461 type = EXT_JUMBOP;
462 break;
463#endif
464 case MJUM9BYTES:
465 type = EXT_JUMBO9;
466 break;
467 case MJUM16BYTES:
468 type = EXT_JUMBO16;
469 break;
470 default:
471 panic("%s: m_getjcl: invalid cluster size", __func__);
472 }
473
474 return (type);
475}
476
477static __inline uma_zone_t
478#ifndef VBOX
479m_getzone(int size)
480#else
481m_getzone(PNATState pData, int size)
482#endif
483{
484 uma_zone_t zone;
485
486 switch (size) {
487 case MSIZE:
488 zone = zone_mbuf;
489 break;
490 case MCLBYTES:
491 zone = zone_clust;
492 break;
493#if MJUMPAGESIZE != MCLBYTES
494 case MJUMPAGESIZE:
495 zone = zone_jumbop;
496 break;
497#endif
498 case MJUM9BYTES:
499 zone = zone_jumbo9;
500 break;
501 case MJUM16BYTES:
502 zone = zone_jumbo16;
503 break;
504 default:
505 panic("%s: m_getjcl: invalid cluster type", __func__);
506 }
507
508 return (zone);
509}
510
511static __inline struct mbuf *
512#ifndef VBOX
513m_get(int how, short type)
514#else
515m_get(PNATState pData, int how, short type)
516#endif
517{
518 struct mb_args args;
519
520 args.flags = 0;
521 args.type = type;
522 return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
523}
524
525/*
526 * XXX This should be deprecated, very little use.
527 */
528static __inline struct mbuf *
529#ifndef VBOX
530m_getclr(int how, short type)
531#else
532m_getclr(PNATState pData, int how, short type)
533#endif
534{
535 struct mbuf *m;
536 struct mb_args args;
537
538 args.flags = 0;
539 args.type = type;
540 m = uma_zalloc_arg(zone_mbuf, &args, how);
541 if (m != NULL)
542 bzero(m->m_data, MLEN);
543 return (m);
544}
545
546static __inline struct mbuf *
547#ifndef VBOX
548m_gethdr(int how, short type)
549#else
550m_gethdr(PNATState pData, int how, short type)
551#endif
552{
553 struct mb_args args;
554
555 args.flags = M_PKTHDR;
556 args.type = type;
557 return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
558}
559
560static __inline struct mbuf *
561#ifndef VBOX
562m_getcl(int how, short type, int flags)
563#else
564m_getcl(PNATState pData, int how, short type, int flags)
565#endif
566{
567 struct mb_args args;
568
569 args.flags = flags;
570 args.type = type;
571 return ((struct mbuf *)(uma_zalloc_arg(zone_pack, &args, how)));
572}
573
574/*
575 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
576 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
577 *
578 * XXX: This is rather large, should be real function maybe.
579 */
580static __inline struct mbuf *
581#ifndef VBOX
582m_getjcl(int how, short type, int flags, int size)
583#else
584m_getjcl(PNATState pData, int how, short type, int flags, int size)
585#endif
586{
587 struct mb_args args;
588 struct mbuf *m, *n;
589 uma_zone_t zone;
590
591 args.flags = flags;
592 args.type = type;
593
594 m = uma_zalloc_arg(zone_mbuf, &args, how);
595 if (m == NULL)
596 return (NULL);
597
598#ifndef VBOX
599 zone = m_getzone(size);
600#else
601 zone = m_getzone(pData, size);
602#endif
603 n = uma_zalloc_arg(zone, m, how);
604 if (n == NULL) {
605 uma_zfree(zone_mbuf, m);
606 return (NULL);
607 }
608 return (m);
609}
610
611#ifndef VBOX
612static __inline void
613m_free_fast(struct mbuf *m)
614{
615 KASSERT(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
616
617 uma_zfree_arg(zone_mbuf, m, (void *)MB_NOTAGS);
618}
619#else
620static __inline void
621m_free_fast(PNATState pData, struct mbuf *m)
622{
623 AssertMsg(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
624
625 uma_zfree_arg(zone_mbuf, m, (void *)MB_NOTAGS);
626}
627#endif
628
629static __inline struct mbuf *
630#ifndef VBOX
631m_free(struct mbuf *m)
632#else
633m_free(PNATState pData, struct mbuf *m)
634#endif
635{
636 struct mbuf *n = m->m_next;
637
638 if (m->m_flags & M_EXT)
639#ifndef VBOX
640 mb_free_ext(m);
641#else
642 mb_free_ext(pData, m);
643#endif
644 else if ((m->m_flags & M_NOFREE) == 0)
645 uma_zfree(zone_mbuf, m);
646 return (n);
647}
648
649static __inline void
650#ifndef VBOX
651m_clget(struct mbuf *m, int how)
652#else
653m_clget(PNATState pData, struct mbuf *m, int how)
654#endif
655{
656
657 if (m->m_flags & M_EXT)
658 printf("%s: %p mbuf already has cluster\n", __func__, m);
659 m->m_ext.ext_buf = (char *)NULL;
660 uma_zalloc_arg(zone_clust, m, how);
661 /*
662 * On a cluster allocation failure, drain the packet zone and retry,
663 * we might be able to loosen a few clusters up on the drain.
664 */
665 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
666 zone_drain(zone_pack);
667 uma_zalloc_arg(zone_clust, m, how);
668 }
669}
670
671/*
672 * m_cljget() is different from m_clget() as it can allocate clusters without
673 * attaching them to an mbuf. In that case the return value is the pointer
674 * to the cluster of the requested size. If an mbuf was specified, it gets
675 * the cluster attached to it and the return value can be safely ignored.
676 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
677 */
678static __inline void *
679#ifndef VBOX
680m_cljget(struct mbuf *m, int how, int size)
681#else
682m_cljget(PNATState pData, struct mbuf *m, int how, int size)
683#endif
684{
685 uma_zone_t zone;
686
687 if (m && m->m_flags & M_EXT)
688 printf("%s: %p mbuf already has cluster\n", __func__, m);
689 if (m != NULL)
690 m->m_ext.ext_buf = NULL;
691
692#ifndef VBOX
693 zone = m_getzone(size);
694#else
695 zone = m_getzone(pData, size);
696#endif
697 return (uma_zalloc_arg(zone, m, how));
698}
699
700static __inline void
701#ifndef VBOX
702m_cljset(struct mbuf *m, void *cl, int type)
703#else
704m_cljset(PNATState pData, struct mbuf *m, void *cl, int type)
705#endif
706{
707 uma_zone_t zone;
708 int size;
709
710 switch (type) {
711 case EXT_CLUSTER:
712 size = MCLBYTES;
713 zone = zone_clust;
714 break;
715#if MJUMPAGESIZE != MCLBYTES
716 case EXT_JUMBOP:
717 size = MJUMPAGESIZE;
718 zone = zone_jumbop;
719 break;
720#endif
721 case EXT_JUMBO9:
722 size = MJUM9BYTES;
723 zone = zone_jumbo9;
724 break;
725 case EXT_JUMBO16:
726 size = MJUM16BYTES;
727 zone = zone_jumbo16;
728 break;
729 default:
730 panic("unknown cluster type");
731 break;
732 }
733
734 m->m_data = m->m_ext.ext_buf = cl;
735#ifdef VBOX
736 m->m_ext.ext_free = (void (*)(void *, void *))0;
737 m->m_ext.ext_args = NULL;
738#else
739 m->m_ext.ext_free = m->m_ext.ext_args = NULL;
740#endif
741 m->m_ext.ext_size = size;
742 m->m_ext.ext_type = type;
743 m->m_ext.ref_cnt = uma_find_refcnt(zone, cl);
744 m->m_flags |= M_EXT;
745
746}
747
748static __inline void
749m_chtype(struct mbuf *m, short new_type)
750{
751
752 m->m_type = new_type;
753}
754
755static __inline struct mbuf *
756m_last(struct mbuf *m)
757{
758
759 while (m->m_next)
760 m = m->m_next;
761 return (m);
762}
763
764/*
765 * mbuf, cluster, and external object allocation macros (for compatibility
766 * purposes).
767 */
768#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
769#ifndef VBOX
770#define MGET(m, how, type) ((m) = m_get((how), (type)))
771#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type)))
772#define MCLGET(m, how) m_clget((m), (how))
773#define MEXTADD(m, buf, size, free, args, flags, type) \
774 m_extadd((m), (caddr_t)(buf), (size), (free), (args), (flags), (type))
775#define m_getm(m, len, how, type) \
776 m_getm2((m), (len), (how), (type), M_PKTHDR)
777#else /*!VBOX*/
778#define MGET(m, how, type) ((m) = m_get(pData, (how), (type)))
779#define MGETHDR(m, how, type) ((m) = m_gethdr(pData, (how), (type)))
780#define MCLGET(m, how) m_clget(pData, (m), (how))
781#define MEXTADD(m, buf, size, free, args, flags, type) \
782 m_extadd(pData, (m), (caddr_t)(buf), (size), (free), (args), (flags), (type))
783#define m_getm(m, len, how, type) \
784 m_getm2(pData, (m), (len), (how), (type), M_PKTHDR)
785#endif
786
787/*
788 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
789 * be both the local data payload, or an external buffer area, depending on
790 * whether M_EXT is set).
791 */
792#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \
793 (!(((m)->m_flags & M_EXT)) || \
794 (*((m)->m_ext.ref_cnt) == 1)) ) \
795
796/* Check if the supplied mbuf has a packet header, or else panic. */
797#define M_ASSERTPKTHDR(m) \
798 KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
799 ("%s: no mbuf packet header!", __func__))
800
801/*
802 * Ensure that the supplied mbuf is a valid, non-free mbuf.
803 *
804 * XXX: Broken at the moment. Need some UMA magic to make it work again.
805 */
806#define M_ASSERTVALID(m) \
807 KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \
808 ("%s: attempted use of a free mbuf!", __func__))
809
810/*
811 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place an
812 * object of the specified size at the end of the mbuf, longword aligned.
813 */
814#define M_ALIGN(m, len) do { \
815 KASSERT(!((m)->m_flags & (M_PKTHDR|M_EXT)), \
816 ("%s: M_ALIGN not normal mbuf", __func__)); \
817 KASSERT((m)->m_data == (m)->m_dat, \
818 ("%s: M_ALIGN not a virgin mbuf", __func__)); \
819 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
820} while (0)
821
822/*
823 * As above, for mbufs allocated with m_gethdr/MGETHDR or initialized by
824 * M_DUP/MOVE_PKTHDR.
825 */
826#define MH_ALIGN(m, len) do { \
827 KASSERT((m)->m_flags & M_PKTHDR && !((m)->m_flags & M_EXT), \
828 ("%s: MH_ALIGN not PKTHDR mbuf", __func__)); \
829 KASSERT((m)->m_data == (m)->m_pktdat, \
830 ("%s: MH_ALIGN not a virgin mbuf", __func__)); \
831 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
832} while (0)
833
834/*
835 * Compute the amount of space available before the current start of data in
836 * an mbuf.
837 *
838 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
839 * of checking writability of the mbuf data area rests solely with the caller.
840 */
841#define M_LEADINGSPACE(m) \
842 ((m)->m_flags & M_EXT ? \
843 (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
844 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
845 (m)->m_data - (m)->m_dat)
846
847/*
848 * Compute the amount of space available after the end of data in an mbuf.
849 *
850 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
851 * of checking writability of the mbuf data area rests solely with the caller.
852 */
853#define M_TRAILINGSPACE(m) \
854 ((m)->m_flags & M_EXT ? \
855 (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
856 - ((m)->m_data + (m)->m_len) : 0) : \
857 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
858
859/*
860 * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be
861 * allocated, how specifies whether to wait. If the allocation fails, the
862 * original mbuf chain is freed and m is set to NULL.
863 */
864#define M_PREPEND(m, plen, how) do { \
865 struct mbuf **_mmp = &(m); \
866 struct mbuf *_mm = *_mmp; \
867 int _mplen = (plen); \
868 int __mhow = (how); \
869 \
870 MBUF_CHECKSLEEP(how); \
871 if (M_LEADINGSPACE(_mm) >= _mplen) { \
872 _mm->m_data -= _mplen; \
873 _mm->m_len += _mplen; \
874 } else \
875 _mm = m_prepend(_mm, _mplen, __mhow); \
876 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
877 _mm->m_pkthdr.len += _mplen; \
878 *_mmp = _mm; \
879} while (0)
880
881/*
882 * Change mbuf to new type. This is a relatively expensive operation and
883 * should be avoided.
884 */
885#define MCHTYPE(m, t) m_chtype((m), (t))
886
887/* Length to m_copy to copy all. */
888#define M_COPYALL 1000000000
889
890/* Compatibility with 4.3. */
891#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
892
893extern int max_datalen; /* MHLEN - max_hdr */
894extern int max_hdr; /* Largest link + protocol header */
895extern int max_linkhdr; /* Largest link-level header */
896extern int max_protohdr; /* Largest protocol header */
897extern struct mbstat mbstat; /* General mbuf stats/infos */
898extern int nmbclusters; /* Maximum number of clusters */
899
900struct uio;
901
902void m_align(struct mbuf *, int);
903int m_apply(struct mbuf *, int, int,
904 int (*)(void *, void *, u_int), void *);
905#ifndef VBOX
906void m_adj(struct mbuf *, int);
907int m_append(struct mbuf *, int, c_caddr_t);
908struct mbuf *m_defrag(struct mbuf *, int);
909struct mbuf *m_dup(struct mbuf *, int);
910void m_cat(struct mbuf *, struct mbuf *);
911struct mbuf *m_collapse(struct mbuf *, int, int);
912void m_copyback(struct mbuf *, int, int, c_caddr_t);
913struct mbuf *m_copym(struct mbuf *, int, int, int);
914struct mbuf *m_copymdata(struct mbuf *, struct mbuf *,
915 int, int, int, int);
916struct mbuf *m_copypacket(struct mbuf *, int);
917struct mbuf *m_copyup(struct mbuf *n, int len, int dstoff);
918void m_extadd(struct mbuf *, caddr_t, u_int,
919 void (*)(void *, void *), void *, int, int);
920#else
921void m_adj(PNATState, struct mbuf *, int);
922int m_append(PNATState pData, struct mbuf *, int, c_caddr_t);
923struct mbuf *m_defrag(PNATState, struct mbuf *, int);
924struct mbuf *m_dup(PNATState, struct mbuf *, int);
925void m_cat(PNATState, struct mbuf *, struct mbuf *);
926struct mbuf *m_collapse(PNATState, struct mbuf *, int, int);
927void m_copyback(PNATState, struct mbuf *, int, int, c_caddr_t);
928struct mbuf *m_copym(PNATState, struct mbuf *, int, int, int);
929struct mbuf *m_copymdata(PNATState, struct mbuf *, struct mbuf *,
930 int, int, int, int);
931struct mbuf *m_copypacket(PNATState, struct mbuf *, int);
932struct mbuf *m_copyup(PNATState, struct mbuf *n, int len, int dstoff);
933void m_extadd(PNATState pData, struct mbuf *, caddr_t, u_int,
934 void (*)(void *, void *), void *, int, int);
935#endif
936void m_copydata(const struct mbuf *, int, int, caddr_t);
937void m_copy_pkthdr(struct mbuf *, struct mbuf *);
938void m_demote(struct mbuf *, int);
939struct mbuf *m_devget(char *, int, int, struct ifnet *,
940 void (*)(char *, caddr_t, u_int));
941int m_dup_pkthdr(struct mbuf *, struct mbuf *, int);
942u_int m_fixhdr(struct mbuf *);
943struct mbuf *m_fragment(struct mbuf *, int, int);
944#ifndef VBOX
945void m_freem(struct mbuf *);
946struct mbuf *m_getm2(struct mbuf *, int, int, short, int);
947struct mbuf *m_prepend(struct mbuf *, int, int);
948struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
949struct mbuf *m_pullup(struct mbuf *, int);
950int m_sanity(struct mbuf *, int);
951struct mbuf *m_split(struct mbuf *, int, int);
952struct mbuf *m_unshare(struct mbuf *, int how);
953#else
954void m_freem(PNATState pData, struct mbuf *);
955struct mbuf *m_getm2(PNATState pData, struct mbuf *, int, int, short, int);
956struct mbuf *m_prepend(PNATState, struct mbuf *, int, int);
957struct mbuf *m_pulldown(PNATState, struct mbuf *, int, int, int *);
958struct mbuf *m_pullup(PNATState, struct mbuf *, int);
959int m_sanity(PNATState, struct mbuf *, int);
960struct mbuf *m_split(PNATState, struct mbuf *, int, int);
961struct mbuf *m_unshare(PNATState, struct mbuf *, int how);
962#endif
963struct mbuf *m_getptr(struct mbuf *, int, int *);
964u_int m_length(struct mbuf *, struct mbuf **);
965void m_move_pkthdr(struct mbuf *, struct mbuf *);
966void m_print(const struct mbuf *, int);
967struct mbuf *m_uiotombuf(struct uio *, int, int, int, int);
968
969/*-
970 * Network packets may have annotations attached by affixing a list of
971 * "packet tags" to the pkthdr structure. Packet tags are dynamically
972 * allocated semi-opaque data structures that have a fixed header
973 * (struct m_tag) that specifies the size of the memory block and a
974 * <cookie,type> pair that identifies it. The cookie is a 32-bit unique
975 * unsigned value used to identify a module or ABI. By convention this value
976 * is chosen as the date+time that the module is created, expressed as the
977 * number of seconds since the epoch (e.g., using date -u +'%s'). The type
978 * value is an ABI/module-specific value that identifies a particular
979 * annotation and is private to the module. For compatibility with systems
980 * like OpenBSD that define packet tags w/o an ABI/module cookie, the value
981 * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find
982 * compatibility shim functions and several tag types are defined below.
983 * Users that do not require compatibility should use a private cookie value
984 * so that packet tag-related definitions can be maintained privately.
985 *
986 * Note that the packet tag returned by m_tag_alloc has the default memory
987 * alignment implemented by malloc. To reference private data one can use a
988 * construct like:
989 *
990 * struct m_tag *mtag = m_tag_alloc(...);
991 * struct foo *p = (struct foo *)(mtag+1);
992 *
993 * if the alignment of struct m_tag is sufficient for referencing members of
994 * struct foo. Otherwise it is necessary to embed struct m_tag within the
995 * private data structure to insure proper alignment; e.g.,
996 *
997 * struct foo {
998 * struct m_tag tag;
999 * ...
1000 * };
1001 * struct foo *p = (struct foo *) m_tag_alloc(...);
1002 * struct m_tag *mtag = &p->tag;
1003 */
1004
1005/*
1006 * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise
1007 * tags are expected to ``vanish'' when they pass through a network
1008 * interface. For most interfaces this happens normally as the tags are
1009 * reclaimed when the mbuf is free'd. However in some special cases
1010 * reclaiming must be done manually. An example is packets that pass through
1011 * the loopback interface. Also, one must be careful to do this when
1012 * ``turning around'' packets (e.g., icmp_reflect).
1013 *
1014 * To mark a tag persistent bit-or this flag in when defining the tag id.
1015 * The tag will then be treated as described above.
1016 */
1017#define MTAG_PERSISTENT 0x800
1018
1019#define PACKET_TAG_NONE 0 /* Nadda */
1020
1021/* Packet tags for use with PACKET_ABI_COMPAT. */
1022#define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
1023#define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
1024#define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
1025#define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
1026#define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */
1027#define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */
1028#define PACKET_TAG_BRIDGE 7 /* Bridge processing done */
1029#define PACKET_TAG_GIF 8 /* GIF processing done */
1030#define PACKET_TAG_GRE 9 /* GRE processing done */
1031#define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */
1032#define PACKET_TAG_ENCAP 11 /* Encap. processing */
1033#define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */
1034#define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */
1035#define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */
1036#define PACKET_TAG_DUMMYNET 15 /* dummynet info */
1037#define PACKET_TAG_DIVERT 17 /* divert info */
1038#define PACKET_TAG_IPFORWARD 18 /* ipforward info */
1039#define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */
1040#define PACKET_TAG_PF 21 /* PF + ALTQ information */
1041#define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */
1042#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */
1043#define PACKET_TAG_CARP 28 /* CARP info */
1044#ifdef VBOX
1045# define PACKET_TAG_ALIAS 0xab01
1046# define PACKET_TAG_ETHER 0xab02
1047# define PACKET_SERVICE 0xab03
1048#endif
1049
1050/* Specific cookies and tags. */
1051
1052/* Packet tag routines. */
1053struct m_tag *m_tag_alloc(u_int32_t, int, int, int);
1054void m_tag_delete(struct mbuf *, struct m_tag *);
1055void m_tag_delete_chain(struct mbuf *, struct m_tag *);
1056void m_tag_free_default(struct m_tag *);
1057struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
1058struct m_tag *m_tag_copy(struct m_tag *, int);
1059int m_tag_copy_chain(struct mbuf *, struct mbuf *, int);
1060void m_tag_delete_nonpersistent(struct mbuf *);
1061
1062/*
1063 * Initialize the list of tags associated with an mbuf.
1064 */
1065static __inline void
1066m_tag_init(struct mbuf *m)
1067{
1068
1069 SLIST_INIT(&m->m_pkthdr.tags);
1070}
1071
1072/*
1073 * Set up the contents of a tag. Note that this does not fill in the free
1074 * method; the caller is expected to do that.
1075 *
1076 * XXX probably should be called m_tag_init, but that was already taken.
1077 */
1078static __inline void
1079m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
1080{
1081
1082 t->m_tag_id = type;
1083 t->m_tag_len = len;
1084 t->m_tag_cookie = cookie;
1085}
1086
1087/*
1088 * Reclaim resources associated with a tag.
1089 */
1090static __inline void
1091m_tag_free(struct m_tag *t)
1092{
1093
1094 (*t->m_tag_free)(t);
1095}
1096
1097/*
1098 * Return the first tag associated with an mbuf.
1099 */
1100static __inline struct m_tag *
1101m_tag_first(struct mbuf *m)
1102{
1103
1104 return (SLIST_FIRST(&m->m_pkthdr.tags));
1105}
1106
1107/*
1108 * Return the next tag in the list of tags associated with an mbuf.
1109 */
1110static __inline struct m_tag *
1111m_tag_next(struct mbuf *m, struct m_tag *t)
1112{
1113 NOREF(m);
1114 return (SLIST_NEXT(t, m_tag_link));
1115}
1116
1117/*
1118 * Prepend a tag to the list of tags associated with an mbuf.
1119 */
1120static __inline void
1121m_tag_prepend(struct mbuf *m, struct m_tag *t)
1122{
1123
1124 SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
1125}
1126
1127/*
1128 * Unlink a tag from the list of tags associated with an mbuf.
1129 */
1130static __inline void
1131m_tag_unlink(struct mbuf *m, struct m_tag *t)
1132{
1133
1134 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
1135}
1136
1137/* These are for OpenBSD compatibility. */
1138#define MTAG_ABI_COMPAT 0 /* compatibility ABI */
1139
1140static __inline struct m_tag *
1141m_tag_get(int type, int length, int fWait)
1142{
1143 return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, fWait));
1144}
1145
1146static __inline struct m_tag *
1147m_tag_find(struct mbuf *m, int type, struct m_tag *start)
1148{
1149 return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
1150 m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
1151}
1152
1153/* XXX temporary FIB methods probably eventually use tags.*/
1154#define M_FIBSHIFT 28
1155#define M_FIBMASK 0x0F
1156
1157/* get the fib from an mbuf and if it is not set, return the default */
1158#define M_GETFIB(_m) \
1159 ((((_m)->m_flags & M_FIB) >> M_FIBSHIFT) & M_FIBMASK)
1160
1161#define M_SETFIB(_m, _fib) do { \
1162 _m->m_flags &= ~M_FIB; \
1163 _m->m_flags |= (((_fib) << M_FIBSHIFT) & M_FIB); \
1164} while (0)
1165
1166#endif /* _KERNEL */
1167
1168#endif /* !_SYS_MBUF_H_ */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette