VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Wine/libWine/mmap.c@ 16477

Last change on this file since 16477 was 16477, checked in by vboxsync, 16 years ago

LGPL disclaimer by filemuncher

  • Property svn:eol-style set to native
File size: 19.1 KB
Line 
1/*
2 * Wine memory mappings support
3 *
4 * Copyright 2000, 2004 Alexandre Julliard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#include "wine/port.h"
32
33#include <assert.h>
34#include <ctype.h>
35#include <fcntl.h>
36#include <stdlib.h>
37#include <stdio.h>
38#include <string.h>
39#include <sys/types.h>
40#ifdef HAVE_SYS_MMAN_H
41#include <sys/mman.h>
42#endif
43#ifdef HAVE_UNISTD_H
44# include <unistd.h>
45#endif
46#ifdef HAVE_STDINT_H
47# include <stdint.h>
48#endif
49
50#include "wine/library.h"
51#include "wine/list.h"
52
53struct reserved_area
54{
55 struct list entry;
56 void *base;
57 size_t size;
58};
59
60static struct list reserved_areas = LIST_INIT(reserved_areas);
61static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
62
63#ifdef HAVE_MMAP
64
65#ifndef MAP_NORESERVE
66#define MAP_NORESERVE 0
67#endif
68#ifndef MAP_PRIVATE
69#define MAP_PRIVATE 0
70#endif
71#ifndef MAP_ANON
72#define MAP_ANON 0
73#endif
74
75static inline int get_fdzero(void)
76{
77 static int fd = -1;
78
79 if (MAP_ANON == 0 && fd == -1)
80 {
81 if ((fd = open( "/dev/zero", O_RDONLY )) == -1)
82 {
83 perror( "/dev/zero: open" );
84 exit(1);
85 }
86 }
87 return fd;
88}
89
90#if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
91/***********************************************************************
92 * try_mmap_fixed
93 *
94 * The purpose of this routine is to emulate the behaviour of
95 * the Linux mmap() routine if a non-NULL address is passed,
96 * but the MAP_FIXED flag is not set. Linux in this case tries
97 * to place the mapping at the specified address, *unless* the
98 * range is already in use. Solaris, however, completely ignores
99 * the address argument in this case.
100 *
101 * As Wine code occasionally relies on the Linux behaviour, e.g. to
102 * be able to map non-relocatable PE executables to their proper
103 * start addresses, or to map the DOS memory to 0, this routine
104 * emulates the Linux behaviour by checking whether the desired
105 * address range is still available, and placing the mapping there
106 * using MAP_FIXED if so.
107 */
108static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
109 int fildes, off_t off)
110{
111 char * volatile result = NULL;
112 int pagesize = getpagesize();
113 pid_t pid;
114
115 /* We only try to map to a fixed address if
116 addr is non-NULL and properly aligned,
117 and MAP_FIXED isn't already specified. */
118
119 if ( !addr )
120 return 0;
121 if ( (uintptr_t)addr & (pagesize-1) )
122 return 0;
123 if ( flags & MAP_FIXED )
124 return 0;
125
126 /* We use vfork() to freeze all threads of the
127 current process. This allows us to check without
128 race condition whether the desired memory range is
129 already in use. Note that because vfork() shares
130 the address spaces between parent and child, we
131 can actually perform the mapping in the child. */
132
133 if ( (pid = vfork()) == -1 )
134 {
135 perror("try_mmap_fixed: vfork");
136 exit(1);
137 }
138 if ( pid == 0 )
139 {
140 int i;
141 char vec;
142
143 /* We call mincore() for every page in the desired range.
144 If any of these calls succeeds, the page is already
145 mapped and we must fail. */
146 for ( i = 0; i < len; i += pagesize )
147 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
148 _exit(1);
149
150 /* Perform the mapping with MAP_FIXED set. This is safe
151 now, as none of the pages is currently in use. */
152 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
153 if ( result == addr )
154 _exit(0);
155
156 if ( result != (void *) -1 ) /* This should never happen ... */
157 munmap( result, len );
158
159 _exit(1);
160 }
161
162 /* vfork() lets the parent continue only after the child
163 has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
164 so we don't need to wait for the child. */
165
166 return result == addr;
167}
168
169#elif defined(__APPLE__)
170
171#include <mach/mach_init.h>
172#include <mach/vm_map.h>
173
174/*
175 * On Darwin, we can use the Mach call vm_allocate to allocate
176 * anonymous memory at the specified address, and then use mmap with
177 * MAP_FIXED to replace the mapping.
178 */
179static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
180 int fildes, off_t off)
181{
182 vm_address_t result = (vm_address_t)addr;
183
184 if (!vm_allocate(mach_task_self(),&result,len,0))
185 {
186 if (mmap( (void *)result, len, prot, flags | MAP_FIXED, fildes, off ) != MAP_FAILED)
187 return 1;
188 vm_deallocate(mach_task_self(),result,len);
189 }
190 return 0;
191}
192
193#endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
194
195
196/***********************************************************************
197 * wine_anon_mmap
198 *
199 * Portable wrapper for anonymous mmaps
200 */
201void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
202{
203#ifdef MAP_SHARED
204 flags &= ~MAP_SHARED;
205#endif
206
207 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
208 flags |= MAP_PRIVATE | MAP_ANON;
209
210 if (!(flags & MAP_FIXED))
211 {
212#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
213 /* Even FreeBSD 5.3 does not properly support NULL here. */
214 if( start == NULL ) start = (void *)0x110000;
215#endif
216
217#ifdef MAP_TRYFIXED
218 /* If available, this will attempt a fixed mapping in-kernel */
219 flags |= MAP_TRYFIXED;
220#elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
221 if ( try_mmap_fixed( start, size, prot, flags, get_fdzero(), 0 ) )
222 return start;
223#endif
224 }
225 return mmap( start, size, prot, flags, get_fdzero(), 0 );
226}
227
228
229/***********************************************************************
230 * mmap_reserve
231 *
232 * mmap wrapper used for reservations, only maps the specified address
233 */
234static inline int mmap_reserve( void *addr, size_t size )
235{
236 void *ptr;
237 int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
238
239#ifdef MAP_TRYFIXED
240 flags |= MAP_TRYFIXED;
241#elif defined(__APPLE__)
242 return try_mmap_fixed( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
243#endif
244 ptr = mmap( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
245 if (ptr != addr && ptr != (void *)-1) munmap( ptr, size );
246 return (ptr == addr);
247}
248
249
250/***********************************************************************
251 * reserve_area
252 *
253 * Reserve as much memory as possible in the given area.
254 */
255#if defined(__i386__) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) /* commented out until FreeBSD gets fixed */
256static void reserve_area( void *addr, void *end )
257{
258 size_t size = (char *)end - (char *)addr;
259
260#if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
261 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
262 /* we assume no other thread is running at this point */
263 size_t i, pagesize = getpagesize();
264 char vec;
265
266 while (size)
267 {
268 for (i = 0; i < size; i += pagesize)
269 if (mincore( (caddr_t)addr + i, pagesize, &vec ) != -1) break;
270
271 i &= ~granularity_mask;
272 if (i && mmap( addr, i, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
273 get_fdzero(), 0 ) != (void *)-1)
274 wine_mmap_add_reserved_area( addr, i );
275
276 i += granularity_mask + 1;
277 if ((char *)addr + i < (char *)addr) break; /* overflow */
278 addr = (char *)addr + i;
279 if (addr >= end) break;
280 size = (char *)end - (char *)addr;
281 }
282#else
283 if (!size) return;
284
285 if (mmap_reserve( addr, size ))
286 {
287 wine_mmap_add_reserved_area( addr, size );
288 return;
289 }
290 if (size > granularity_mask + 1)
291 {
292 size_t new_size = (size / 2) & ~granularity_mask;
293 reserve_area( addr, (char *)addr + new_size );
294 reserve_area( (char *)addr + new_size, end );
295 }
296#endif
297}
298
299
300/***********************************************************************
301 * reserve_malloc_space
302 *
303 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
304 * sure that there is some available sbrk() space before we reserve other things.
305 */
306static void reserve_malloc_space( size_t size )
307{
308#ifdef __sun
309 size_t i, count = size / 1024;
310 void **ptrs = malloc( count * sizeof(ptrs[0]) );
311
312 if (!ptrs) return;
313
314 for (i = 0; i < count; i++) if (!(ptrs[i] = malloc( 1024 ))) break;
315 if (i--) /* free everything except the last one */
316 while (i) free( ptrs[--i] );
317 free( ptrs );
318#endif
319}
320
321#endif /* __i386__ */
322
323
324/***********************************************************************
325 * reserve_dos_area
326 *
327 * Reserve the DOS area (0x00000000-0x00110000).
328 */
329static void reserve_dos_area(void)
330{
331 const size_t page_size = getpagesize();
332 const size_t dos_area_size = 0x110000;
333 void *ptr;
334
335 /* first page has to be handled specially */
336 ptr = wine_anon_mmap( (void *)page_size, dos_area_size - page_size, PROT_NONE, MAP_NORESERVE );
337 if (ptr != (void *)page_size)
338 {
339 if (ptr != (void *)-1) munmap( ptr, dos_area_size - page_size );
340 return;
341 }
342 /* now add first page with MAP_FIXED */
343 wine_anon_mmap( NULL, page_size, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
344 wine_mmap_add_reserved_area( NULL, dos_area_size );
345}
346
347
348/***********************************************************************
349 * mmap_init
350 */
351void mmap_init(void)
352{
353 struct reserved_area *area;
354 struct list *ptr;
355#if defined(__i386__) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) /* commented out until FreeBSD gets fixed */
356 char stack;
357 char * const stack_ptr = &stack;
358 char *user_space_limit = (char *)0x7ffe0000;
359
360 reserve_malloc_space( 8 * 1024 * 1024 );
361
362 /* check for a reserved area starting at the user space limit */
363 /* to avoid wasting time trying to allocate it again */
364 LIST_FOR_EACH( ptr, &reserved_areas )
365 {
366 area = LIST_ENTRY( ptr, struct reserved_area, entry );
367 if ((char *)area->base > user_space_limit) break;
368 if ((char *)area->base + area->size > user_space_limit)
369 {
370 user_space_limit = (char *)area->base + area->size;
371 break;
372 }
373 }
374
375 if (stack_ptr >= user_space_limit)
376 {
377 char *end = 0;
378 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
379 if (base > user_space_limit) reserve_area( user_space_limit, base );
380 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
381#ifdef linux
382 /* Linux heuristic: assume the stack is near the end of the address */
383 /* space, this avoids a lot of futile allocation attempts */
384 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
385#endif
386 reserve_area( base, end );
387 }
388 else reserve_area( user_space_limit, 0 );
389#endif /* __i386__ */
390
391 /* reserve the DOS area if not already done */
392
393 ptr = list_head( &reserved_areas );
394 if (ptr)
395 {
396 area = LIST_ENTRY( ptr, struct reserved_area, entry );
397 if (!area->base) return; /* already reserved */
398 }
399 reserve_dos_area();
400}
401
402#else /* HAVE_MMAP */
403
404void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
405{
406 return (void *)-1;
407}
408
409static inline int munmap( void *ptr, size_t size )
410{
411 return 0;
412}
413
414void mmap_init(void)
415{
416}
417
418#endif
419
420/***********************************************************************
421 * wine_mmap_add_reserved_area
422 *
423 * Add an address range to the list of reserved areas.
424 * Caller must have made sure the range is not used by anything else.
425 *
426 * Note: the reserved areas functions are not reentrant, caller is
427 * responsible for proper locking.
428 */
429void wine_mmap_add_reserved_area( void *addr, size_t size )
430{
431 struct reserved_area *area;
432 struct list *ptr;
433
434 if (!((char *)addr + size)) size--; /* avoid wrap-around */
435
436 LIST_FOR_EACH( ptr, &reserved_areas )
437 {
438 area = LIST_ENTRY( ptr, struct reserved_area, entry );
439 if (area->base > addr)
440 {
441 /* try to merge with the next one */
442 if ((char *)addr + size == (char *)area->base)
443 {
444 area->base = addr;
445 area->size += size;
446 return;
447 }
448 break;
449 }
450 else if ((char *)area->base + area->size == (char *)addr)
451 {
452 /* merge with the previous one */
453 area->size += size;
454
455 /* try to merge with the next one too */
456 if ((ptr = list_next( &reserved_areas, ptr )))
457 {
458 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
459 if ((char *)addr + size == (char *)next->base)
460 {
461 area->size += next->size;
462 list_remove( &next->entry );
463 free( next );
464 }
465 }
466 return;
467 }
468 }
469
470 if ((area = malloc( sizeof(*area) )))
471 {
472 area->base = addr;
473 area->size = size;
474 list_add_before( ptr, &area->entry );
475 }
476}
477
478
479/***********************************************************************
480 * wine_mmap_remove_reserved_area
481 *
482 * Remove an address range from the list of reserved areas.
483 * If 'unmap' is non-zero the range is unmapped too.
484 *
485 * Note: the reserved areas functions are not reentrant, caller is
486 * responsible for proper locking.
487 */
488void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
489{
490 struct reserved_area *area;
491 struct list *ptr;
492
493 if (!((char *)addr + size)) size--; /* avoid wrap-around */
494
495 ptr = list_head( &reserved_areas );
496 /* find the first area covering address */
497 while (ptr)
498 {
499 area = LIST_ENTRY( ptr, struct reserved_area, entry );
500 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
501 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
502 {
503 if (area->base >= addr)
504 {
505 if ((char *)area->base + area->size > (char *)addr + size)
506 {
507 /* range overlaps beginning of area only -> shrink area */
508 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
509 area->size -= (char *)addr + size - (char *)area->base;
510 area->base = (char *)addr + size;
511 break;
512 }
513 else
514 {
515 /* range contains the whole area -> remove area completely */
516 ptr = list_next( &reserved_areas, ptr );
517 if (unmap) munmap( area->base, area->size );
518 list_remove( &area->entry );
519 free( area );
520 continue;
521 }
522 }
523 else
524 {
525 if ((char *)area->base + area->size > (char *)addr + size)
526 {
527 /* range is in the middle of area -> split area in two */
528 struct reserved_area *new_area = malloc( sizeof(*new_area) );
529 if (new_area)
530 {
531 new_area->base = (char *)addr + size;
532 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
533 list_add_after( ptr, &new_area->entry );
534 }
535 else size = (char *)area->base + area->size - (char *)addr;
536 area->size = (char *)addr - (char *)area->base;
537 if (unmap) munmap( addr, size );
538 break;
539 }
540 else
541 {
542 /* range overlaps end of area only -> shrink area */
543 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
544 area->size = (char *)addr - (char *)area->base;
545 }
546 }
547 }
548 ptr = list_next( &reserved_areas, ptr );
549 }
550}
551
552
553/***********************************************************************
554 * wine_mmap_is_in_reserved_area
555 *
556 * Check if the specified range is included in a reserved area.
557 * Returns 1 if range is fully included, 0 if range is not included
558 * at all, and -1 if it is only partially included.
559 *
560 * Note: the reserved areas functions are not reentrant, caller is
561 * responsible for proper locking.
562 */
563int wine_mmap_is_in_reserved_area( void *addr, size_t size )
564{
565 struct reserved_area *area;
566 struct list *ptr;
567
568 LIST_FOR_EACH( ptr, &reserved_areas )
569 {
570 area = LIST_ENTRY( ptr, struct reserved_area, entry );
571 if (area->base > addr) break;
572 if ((char *)area->base + area->size <= (char *)addr) continue;
573 /* area must contain block completely */
574 if ((char *)area->base + area->size < (char *)addr + size) return -1;
575 return 1;
576 }
577 return 0;
578}
579
580
581/***********************************************************************
582 * wine_mmap_enum_reserved_areas
583 *
584 * Enumerate the list of reserved areas, sorted by addresses.
585 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
586 *
587 * Note: the reserved areas functions are not reentrant, caller is
588 * responsible for proper locking.
589 */
590int wine_mmap_enum_reserved_areas( int (*enum_func)(void *base, size_t size, void *arg), void *arg,
591 int top_down )
592{
593 int ret = 0;
594 struct list *ptr;
595
596 if (top_down)
597 {
598 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
599 {
600 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
601 if ((ret = enum_func( area->base, area->size, arg ))) break;
602 }
603 }
604 else
605 {
606 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
607 {
608 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
609 if ((ret = enum_func( area->base, area->size, arg ))) break;
610 }
611 }
612 return ret;
613}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette