VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/misc.c@ 28520

Last change on this file since 28520 was 28520, checked in by vboxsync, 15 years ago

NAT: destructor and fini hooks are called from slirp_uma_free.
zone_drain and uma_zone_exhausted_nolock are implemented to migrate items from second zone to master one on demand.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.7 KB
Line 
1/* $Id: misc.c 28520 2010-04-20 13:36:22Z vboxsync $ */
2/** @file
3 * NAT - helpers.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*
23 * This code is based on:
24 *
25 * Copyright (c) 1995 Danny Gasparovski.
26 *
27 * Please read the file COPYRIGHT for the
28 * terms and conditions of the copyright.
29 */
30
31#define WANT_SYS_IOCTL_H
32#include <slirp.h>
33
34#ifndef HAVE_INET_ATON
35int
36inet_aton(const char *cp, struct in_addr *ia)
37{
38 u_int32_t addr = inet_addr(cp);
39 if (addr == 0xffffffff)
40 return 0;
41 ia->s_addr = addr;
42 return 1;
43}
44#endif
45
46/*
47 * Get our IP address and put it in our_addr
48 */
49void
50getouraddr(PNATState pData)
51{
52 our_addr.s_addr = loopback_addr.s_addr;
53}
54
55struct quehead
56{
57 struct quehead *qh_link;
58 struct quehead *qh_rlink;
59};
60
61void
62insque(PNATState pData, void *a, void *b)
63{
64 register struct quehead *element = (struct quehead *) a;
65 register struct quehead *head = (struct quehead *) b;
66 element->qh_link = head->qh_link;
67 head->qh_link = (struct quehead *)element;
68 element->qh_rlink = (struct quehead *)head;
69 ((struct quehead *)(element->qh_link))->qh_rlink = (struct quehead *)element;
70}
71
72void
73remque(PNATState pData, void *a)
74{
75 register struct quehead *element = (struct quehead *) a;
76 ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
77 ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
78 element->qh_rlink = NULL;
79 /* element->qh_link = NULL; TCP FIN1 crashes if you do this. Why ? */
80}
81
82
83/*
84 * Set fd blocking and non-blocking
85 */
86void
87fd_nonblock(int fd)
88{
89#ifdef FIONBIO
90 int opt = 1;
91
92 ioctlsocket(fd, FIONBIO, &opt);
93#else
94 int opt;
95
96 opt = fcntl(fd, F_GETFL, 0);
97 opt |= O_NONBLOCK;
98 fcntl(fd, F_SETFL, opt);
99#endif
100}
101
102
103#ifdef VBOX_WITH_SLIRP_BSD_MBUF
104#define ITEM_MAGIC 0xdead0001
105struct item
106{
107 uint32_t magic;
108 uma_zone_t zone;
109 uint32_t ref_count;
110 LIST_ENTRY(item) list;
111};
112
113#define ZONE_MAGIC 0xdead0002
114struct uma_zone
115{
116 uint32_t magic;
117 PNATState pData; /* to minimize changes in the rest of UMA emulation code */
118 RTCRITSECT csZone;
119 const char *name;
120 size_t size; /* item size */
121 ctor_t pfCtor;
122 dtor_t pfDtor;
123 zinit_t pfInit;
124 zfini_t pfFini;
125 uma_alloc_t pfAlloc;
126 uma_free_t pfFree;
127 int max_items;
128 int cur_items;
129 LIST_HEAD(RT_NOTHING, item) used_items;
130 LIST_HEAD(RT_NOTHING, item) free_items;
131 uma_zone_t master_zone;
132 void *area;
133};
134
135
136static void *slirp_uma_alloc(uma_zone_t zone,
137 int size, uint8_t *pflags, int fWait)
138{
139 struct item *it;
140 uint8_t *sub_area;
141 void *ret = NULL;
142 int rc;
143
144 RTCritSectEnter(&zone->csZone);
145 for (;;)
146 {
147 if (!LIST_EMPTY(&zone->free_items))
148 {
149 it = LIST_FIRST(&zone->free_items);
150 rc = 0;
151 if (zone->pfInit)
152 rc = zone->pfInit(zone->pData, (void *)&it[1], zone->size, M_DONTWAIT);
153 if (rc == 0)
154 {
155 zone->cur_items++;
156 LIST_REMOVE(it, list);
157 LIST_INSERT_HEAD(&zone->used_items, it, list);
158 ret = (void *)&it[1];
159 }
160 else
161 {
162 ret = NULL;
163 }
164 break;
165 }
166
167 if (!zone->master_zone)
168 {
169 /* We're on master zone and we cant allocate more */
170 Log2(("NAT: no room on %s zone\n", zone->name));
171 break;
172 }
173
174 /* we're on sub-zone we need get chunk of master zone and split
175 * it for sub-zone conforming chunks.
176 */
177 sub_area = slirp_uma_alloc(zone->master_zone, zone->master_zone->size, NULL, 0);
178 if (!sub_area)
179 {
180 /* No room on master */
181 Log2(("NAT: no room on %s zone for %s zone\n", zone->master_zone->name, zone->name));
182 break;
183 }
184 zone->max_items++;
185 it = &((struct item *)sub_area)[-1];
186 /* it's chunk descriptor of master zone we should remove it
187 * from the master list first
188 */
189 Assert((it->zone && it->zone->magic == ZONE_MAGIC));
190 RTCritSectEnter(&it->zone->csZone);
191 /* @todo should we alter count of master counters? */
192 LIST_REMOVE(it, list);
193 RTCritSectLeave(&it->zone->csZone);
194 /* @todo '+ zone->size' should be depend on flag */
195 memset(it, 0, sizeof(struct item));
196 it->zone = zone;
197 it->magic = ITEM_MAGIC;
198 LIST_INSERT_HEAD(&zone->free_items, it, list);
199 if (zone->cur_items >= zone->max_items)
200 LogRel(("NAT: zone(%s) has reached it maximum\n", zone->name));
201 }
202 RTCritSectLeave(&zone->csZone);
203 return ret;
204}
205
206static void slirp_uma_free(void *item, int size, uint8_t flags)
207{
208 struct item *it;
209 uma_zone_t zone;
210 uma_zone_t master_zone;
211 Assert(item);
212 it = &((struct item *)item)[-1];
213 Assert(it->magic == ITEM_MAGIC);
214 zone = it->zone;
215 /* check bourder magic */
216 Assert((*(uint32_t *)(((uint8_t *)&it[1]) + zone->size) == 0xabadbabe));
217 RTCritSectEnter(&zone->csZone);
218 Assert(zone->magic == ZONE_MAGIC);
219 LIST_REMOVE(it, list);
220 if (zone->pfFini)
221 {
222 zone->pfFini(zone->pData, item, zone->size);
223 }
224 if (zone->pfDtor)
225 {
226 zone->pfDtor(zone->pData, item, zone->size, NULL);
227 }
228 LIST_INSERT_HEAD(&zone->free_items, it, list);
229 zone->cur_items--;
230 RTCritSectLeave(&zone->csZone);
231}
232
233uma_zone_t uma_zcreate(PNATState pData, char *name, size_t size,
234 ctor_t ctor, dtor_t dtor, zinit_t init, zfini_t fini, int flags1, int flags2)
235{
236 uma_zone_t zone = RTMemAllocZ(sizeof(struct uma_zone));
237 Assert((pData));
238 zone->magic = ZONE_MAGIC;
239 zone->pData = pData;
240 zone->name = name;
241 zone->size = size;
242 zone->pfCtor = ctor;
243 zone->pfDtor = dtor;
244 zone->pfInit = init;
245 zone->pfFini = fini;
246 zone->pfAlloc = slirp_uma_alloc;
247 zone->pfFree = slirp_uma_free;
248 RTCritSectInit(&zone->csZone);
249 return zone;
250
251}
252uma_zone_t uma_zsecond_create(char *name, ctor_t ctor,
253 dtor_t dtor, zinit_t init, zfini_t fini, uma_zone_t master)
254{
255 uma_zone_t zone;
256 Assert(master);
257 zone = RTMemAllocZ(sizeof(struct uma_zone));
258 if (zone == NULL)
259 return NULL;
260
261 Assert((master && master->pData));
262 zone->magic = ZONE_MAGIC;
263 zone->pData = master->pData;
264 zone->name = name;
265 zone->pfCtor = ctor;
266 zone->pfDtor = dtor;
267 zone->pfInit = init;
268 zone->pfFini = fini;
269 zone->pfAlloc = slirp_uma_alloc;
270 zone->pfFree = slirp_uma_free;
271 zone->size = master->size;
272 zone->master_zone = master;
273 RTCritSectInit(&zone->csZone);
274 return zone;
275}
276
277void uma_zone_set_max(uma_zone_t zone, int max)
278{
279 int i = 0;
280 struct item *it;
281 zone->max_items = max;
282 zone->area = RTMemAllocZ(max * (sizeof(struct item) + zone->size + sizeof(uint32_t)));
283 for (; i < max; ++i)
284 {
285 it = (struct item *)(((uint8_t *)zone->area) + i*(sizeof(struct item) + zone->size + sizeof(uint32_t)));
286 it->magic = ITEM_MAGIC;
287 it->zone = zone;
288 *(uint32_t *)(((uint8_t *)&it[1]) + zone->size) = 0xabadbabe;
289 LIST_INSERT_HEAD(&zone->free_items, it, list);
290 }
291
292}
293
294void uma_zone_set_allocf(uma_zone_t zone, uma_alloc_t pfAlloc)
295{
296 zone->pfAlloc = pfAlloc;
297}
298
299void uma_zone_set_freef(uma_zone_t zone, uma_free_t pfFree)
300{
301 zone->pfFree = pfFree;
302}
303
304uint32_t *uma_find_refcnt(uma_zone_t zone, void *mem)
305{
306 /*@todo (vvl) this function supposed to work with special zone storing
307 reference counters */
308 struct item *it = (struct item *)mem; /* 1st element */
309 Assert(mem != NULL);
310 Assert(zone->magic == ZONE_MAGIC);
311 /* for returning pointer to counter we need get 0 elemnt */
312 Assert(it[-1].magic == ITEM_MAGIC);
313 return &it[-1].ref_count;
314}
315
316void *uma_zalloc_arg(uma_zone_t zone, void *args, int how)
317{
318 void *mem;
319 Assert(zone->magic == ZONE_MAGIC);
320 if (zone->pfAlloc == NULL)
321 return NULL;
322 RTCritSectEnter(&zone->csZone);
323 mem = zone->pfAlloc(zone, zone->size, NULL, 0);
324 if (mem != NULL)
325 {
326 if (zone->pfCtor)
327 zone->pfCtor(zone->pData, mem, zone->size, args, M_DONTWAIT);
328 }
329 RTCritSectLeave(&zone->csZone);
330 return mem;
331}
332
333void uma_zfree(uma_zone_t zone, void *item)
334{
335 uma_zfree_arg(zone, item, NULL);
336}
337
338void uma_zfree_arg(uma_zone_t zone, void *mem, void *flags)
339{
340 struct item *it;
341 Assert(zone->magic == ZONE_MAGIC);
342 Assert((zone->pfFree));
343 Assert((mem));
344
345 RTCritSectEnter(&zone->csZone);
346 it = &((struct item *)mem)[-1];
347 Assert((it->magic == ITEM_MAGIC));
348 Assert((zone->magic == ZONE_MAGIC && zone == it->zone));
349
350 zone->pfFree(mem, 0, 0);
351 RTCritSectLeave(&zone->csZone);
352}
353
354int uma_zone_exhausted_nolock(uma_zone_t zone)
355{
356 int fExhausted;
357 RTCritSectEnter(&zone->csZone);
358 fExhausted = (zone->cur_items == zone->max_items);
359 RTCritSectLeave(&zone->csZone);
360 return fExhausted;
361}
362
363void zone_drain(uma_zone_t zone)
364{
365 struct item *it;
366 uma_zone_t master_zone;
367 /* vvl: Huh? What to do with zone which hasn't got backstore ? */
368 Assert((zone->master_zone));
369 master_zone = zone->master_zone;
370 while(!LIST_EMPTY(&zone->free_items))
371 {
372 it = LIST_FIRST(&zone->free_items);
373 RTCritSectEnter(&zone->csZone);
374 LIST_REMOVE(it, list);
375 zone->max_items--;
376 RTCritSectLeave(&zone->csZone);
377 it->zone = master_zone;
378 RTCritSectEnter(&master_zone->csZone);
379 LIST_INSERT_HEAD(&master_zone->free_items, it, list);
380 master_zone->cur_items--;
381 RTCritSectLeave(&master_zone->csZone);
382 }
383}
384
385void slirp_null_arg_free(void *mem, void *arg)
386{
387 /*@todo (r=vvl) make it wiser*/
388 Assert(mem);
389 RTMemFree(mem);
390}
391
392void *uma_zalloc(uma_zone_t zone, int len)
393{
394 return NULL;
395}
396
397struct mbuf *slirp_ext_m_get(PNATState pData, size_t cbMin, void **ppvBuf, size_t *pcbBuf)
398{
399 struct mbuf *m;
400 size_t size = MCLBYTES;
401 if (cbMin < MSIZE)
402 size = MCLBYTES;
403 else if (cbMin < MCLBYTES)
404 size = MCLBYTES;
405 else if (cbMin < MJUM9BYTES)
406 size = MJUM9BYTES;
407 else if (cbMin < MJUM16BYTES)
408 size = MJUM16BYTES;
409 else
410 AssertMsgFailed(("Unsupported size"));
411
412 m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, size);
413 if (m == NULL)
414 {
415 *ppvBuf = NULL;
416 *pcbBuf = 0;
417 return NULL;
418 }
419 m->m_len = size;
420 *ppvBuf = mtod(m, void *);
421 *pcbBuf = size;
422 return m;
423}
424
425void slirp_ext_m_free(PNATState pData, struct mbuf *m)
426{
427 m_freem(pData, m);
428}
429
430static void zone_destroy(uma_zone_t zone)
431{
432 RTCritSectEnter(&zone->csZone);
433 LogRel(("NAT: zone(nm:%s, used:%d)\n", zone->name, zone->cur_items));
434 if (zone->master_zone)
435 RTMemFree(zone->area);
436 RTCritSectLeave(&zone->csZone);
437 RTCritSectDelete(&zone->csZone);
438 RTMemFree(zone);
439}
440
441void m_fini(PNATState pData)
442{
443 zone_destroy(pData->zone_mbuf);
444 zone_destroy(pData->zone_clust);
445 zone_destroy(pData->zone_pack);
446 zone_destroy(pData->zone_jumbop);
447 zone_destroy(pData->zone_jumbo9);
448 zone_destroy(pData->zone_jumbo16);
449 /*@todo do finalize here.*/
450}
451#endif /* VBOX_WITH_SLIRP_BSD_MBUF */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette