VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c@ 417

Last change on this file since 417 was 387, checked in by vboxsync, 18 years ago

Use RTR0ProcHandleSelf / RTProcSelf. Implemented clientDied() cleanup on darwin.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 46.1 KB
Line 
1/** @file
2 * The VirtualBox Support Driver - Linux hosts.
3 */
4
5/*
6 * Copyright (C) 2006 InnoTek Systemberatung GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License as published by the Free Software Foundation,
12 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
13 * distribution. VirtualBox OSE is distributed in the hope that it will
14 * be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * If you received this file as part of a commercial VirtualBox
17 * distribution, then only the terms of your commercial VirtualBox
18 * license agreement apply instead of the previous paragraph.
19 */
20
21/*******************************************************************************
22* Header Files *
23*******************************************************************************/
24#include "SUPDRV.h"
25#include <iprt/assert.h>
26#include <iprt/spinlock.h>
27#include <iprt/semaphore.h>
28#include <iprt/initterm.h>
29#include <iprt/process.h>
30#include <iprt/err.h>
31#include <iprt/mem.h>
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/fs.h>
37#include <linux/mm.h>
38#include <linux/pagemap.h>
39#include <linux/slab.h>
40#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
41# include <linux/jiffies.h>
42#endif
43#include <asm/mman.h>
44#include <asm/io.h>
45#include <asm/uaccess.h>
46#ifdef CONFIG_DEVFS_FS
47# include <linux/devfs_fs_kernel.h>
48#endif
49#ifdef CONFIG_VBOXDRV_AS_MISC
50# include <linux/miscdevice.h>
51#endif
52#ifdef CONFIG_X86_LOCAL_APIC
53# include <asm/apic.h>
54# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
55# include <asm/nmi.h>
56# endif
57#endif
58
59#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
60# ifndef page_to_pfn
61# define page_to_pfn(page) ((page) - mem_map)
62# endif
63# include <asm/pgtable.h>
64# define global_flush_tlb __flush_tlb_global
65#endif
66
67/* devfs defines */
68#if defined(CONFIG_DEVFS_FS) && !defined(CONFIG_VBOXDRV_AS_MISC)
69# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
70
71# define VBOX_REGISTER_DEVFS() \
72({ \
73 void *rc = NULL; \
74 if (devfs_mk_cdev(MKDEV(DEVICE_MAJOR, 0), \
75 S_IFCHR | S_IRUGO | S_IWUGO, \
76 DEVICE_NAME) == 0) \
77 rc = (void *)' '; /* return not NULL */ \
78 rc; \
79 })
80
81# define VBOX_UNREGISTER_DEVFS(handle) \
82 devfs_remove(DEVICE_NAME);
83
84# else /* < 2.6.0 */
85
86# define VBOX_REGISTER_DEVFS() \
87 devfs_register(NULL, DEVICE_NAME, DEVFS_FL_DEFAULT, \
88 DEVICE_MAJOR, 0, \
89 S_IFCHR | S_IRUGO | S_IWUGO, \
90 &gFileOpsVBoxDrv, NULL)
91
92# define VBOX_UNREGISTER_DEVFS(handle) \
93 if (handle != NULL) \
94 devfs_unregister(handle)
95
96# endif /* < 2.6.0 */
97#endif /* CONFIG_DEV_FS && !CONFIG_VBOXDEV_AS_MISC */
98
99#ifndef CONFIG_VBOXDRV_AS_MISC
100# if defined(CONFIG_DEVFS_FS) && LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 0)
101# define VBOX_REGISTER_DEVICE(a,b,c) devfs_register_chrdev(a,b,c)
102# define VBOX_UNREGISTER_DEVICE(a,b) devfs_unregister_chrdev(a,b)
103# else
104# define VBOX_REGISTER_DEVICE(a,b,c) register_chrdev(a,b,c)
105# define VBOX_UNREGISTER_DEVICE(a,b) unregister_chrdev(a,b)
106# endif
107#endif /* !CONFIG_VBOXDRV_AS_MISC */
108
109
110#ifdef CONFIG_X86_HIGH_ENTRY
111# error "CONFIG_X86_HIGH_ENTRY is not supported by VBoxDrv at this time."
112#endif
113
114/*
115 * This sucks soooo badly on x86! Why don't they export __PAGE_KERNEL_EXEC so PAGE_KERNEL_EXEC would be usable?
116 */
117#if defined(__AMD64__)
118# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC
119#elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
120# define MY_PAGE_KERNEL_EXEC __pgprot(cpu_has_pge ? _PAGE_KERNEL_EXEC | _PAGE_GLOBAL : _PAGE_KERNEL_EXEC)
121#else
122# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL
123#endif
124
125/*
126 * The redhat hack section.
127 * - The current hacks are for 2.4.21-15.EL only.
128 */
129#ifndef NO_REDHAT_HACKS
130/* accounting. */
131# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
132# ifdef VM_ACCOUNT
133# define MY_DO_MUNMAP(a,b,c) do_munmap(a, b, c, 0) /* should it be 1 or 0? */
134# endif
135# endif
136
137/* backported remap_page_range. */
138# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
139# include <asm/tlb.h>
140# ifdef tlb_vma /* probably not good enough... */
141# define HAVE_26_STYLE_REMAP_PAGE_RANGE 1
142# endif
143# endif
144
145# ifndef __AMD64__
146/* In 2.6.9-22.ELsmp we have to call change_page_attr() twice when changing
147 * the page attributes from PAGE_KERNEL to something else, because there appears
148 * to be a bug in one of the many patches that redhat applied.
149 * It should be safe to do this on less buggy linux kernels too. ;-)
150 */
151# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
152 do { \
153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) \
154 change_page_attr(pPages, cPages, prot); \
155 change_page_attr(pPages, cPages, prot); \
156 } while (0)
157# endif
158#endif /* !NO_REDHAT_HACKS */
159
160
161#ifndef MY_DO_MUNMAP
162# define MY_DO_MUNMAP(a,b,c) do_munmap(a, b, c)
163#endif
164
165#ifndef MY_CHANGE_PAGE_ATTR
166# ifdef __AMD64__ /** @todo This is a cheap hack, but it'll get around that 'else BUG();' in __change_page_attr(). */
167# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
168 do { \
169 change_page_attr(pPages, cPages, PAGE_KERNEL_NOCACHE); \
170 change_page_attr(pPages, cPages, prot); \
171 } while (0)
172# else
173# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) change_page_attr(pPages, cPages, prot)
174# endif
175#endif
176
177
178/** @def ONE_MSEC_IN_JIFFIES
179 * The number of jiffies that make up 1 millisecond. This is only actually used
180 * when HZ is > 1000. */
181#if HZ <= 1000
182# define ONE_MSEC_IN_JIFFIES 0
183#elif !(HZ % 1000)
184# define ONE_MSEC_IN_JIFFIES (HZ / 1000)
185#else
186# define ONE_MSEC_IN_JIFFIES ((HZ + 999) / 1000)
187# error "HZ is not a multiple of 1000, the GIP stuff won't work right!"
188#endif
189
190
191/*******************************************************************************
192* Defined Constants And Macros *
193*******************************************************************************/
194/**
195 * Device extention & session data association structure.
196 */
197static SUPDRVDEVEXT g_DevExt;
198
199/** Timer structure for the GIP update. */
200static struct timer_list g_GipTimer;
201/** Pointer to the page structure for the GIP. */
202struct page *g_pGipPage;
203
204/** Registered devfs device handle. */
205#if defined(CONFIG_DEVFS_FS) && !defined(CONFIG_VBOXDRV_AS_MISC)
206# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
207static void *g_hDevFsVBoxDrv = NULL;
208# else
209static devfs_handle_t g_hDevFsVBoxDrv = NULL;
210# endif
211#endif
212
213#ifndef CONFIG_VBOXDRV_AS_MISC
214/** Module major number */
215#define DEVICE_MAJOR 234
216/** Saved major device number */
217static int g_iModuleMajor;
218#endif /* !CONFIG_VBOXDRV_AS_MISC */
219
220/** The module name. */
221#define DEVICE_NAME "vboxdrv"
222
223#ifdef __AMD64__
224/**
225 * Memory for the executable memory heap (in IPRT).
226 */
227extern uint8_t g_abExecMemory[1572864]; /* 1.5 MB */
228__asm__(".section execmemory, \"awx\", @progbits\n\t"
229 ".align 32\n\t"
230 ".globl g_abExecMemory\n"
231 "g_abExecMemory:\n\t"
232 ".zero 1572864\n\t"
233 ".type g_abExecMemory, @object\n\t"
234 ".size g_abExecMemory, 1572864\n\t"
235 ".text\n\t");
236#endif
237
238
239/*******************************************************************************
240* Internal Functions *
241*******************************************************************************/
242static int VBoxSupDrvInit(void);
243static void VBoxSupDrvUnload(void);
244static int VBoxSupDrvCreate(struct inode *pInode, struct file *pFilp);
245static int VBoxSupDrvClose(struct inode *pInode, struct file *pFilp);
246static int VBoxSupDrvDeviceControl(struct inode *pInode, struct file *pFilp,
247 unsigned int IOCmd, unsigned long IOArg);
248static void *VBoxSupDrvMapUser(struct page **papPages, unsigned cPages, unsigned fProt, pgprot_t pgFlags);
249static int VBoxSupDrvInitGip(PSUPDRVDEVEXT pDevExt);
250static int VBoxSupDrvTermGip(PSUPDRVDEVEXT pDevExt);
251static void VBoxSupGipTimer(unsigned long ulUser);
252static int VBoxSupDrvOrder(unsigned long size);
253static int VBoxSupDrvErr2LinuxErr(int);
254
255
256/** The file_operations structure. */
257static struct file_operations gFileOpsVBoxDrv =
258{
259 owner: THIS_MODULE,
260 open: VBoxSupDrvCreate,
261 release: VBoxSupDrvClose,
262 ioctl: VBoxSupDrvDeviceControl,
263};
264
265#ifdef CONFIG_VBOXDRV_AS_MISC
266/** The miscdevice structure. */
267static struct miscdevice gMiscDevice =
268{
269 minor: MISC_DYNAMIC_MINOR,
270 name: DEVICE_NAME,
271 fops: &gFileOpsVBoxDrv,
272# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \
273 LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 17)
274 devfs_name: DEVICE_NAME,
275# endif
276};
277#endif
278
279
280/**
281 * Initialize module.
282 *
283 * @returns appropritate status code.
284 */
285static int __init VBoxSupDrvInit(void)
286{
287 int rc;
288
289 dprintf(("VBoxDrv::ModuleInit\n"));
290
291#ifdef CONFIG_X86_LOCAL_APIC
292 /*
293 * If an NMI occurs while we are inside the world switcher the macine will crash.
294 * The Linux NMI watchdog generates periodic NMIs increasing a counter which is
295 * compared with another counter increased in the timer interrupt handler. Therefore
296 * we don't allow to setup an NMI watchdog.
297 */
298# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
299 /*
300 * First test: NMI actiated? Works only works with Linux 2.6 -- 2.4 does not export
301 * the nmi_watchdog variable.
302 */
303# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
304 /*
305 * Permanent IO_APIC mode active? No way to handle this!
306 */
307 if (nmi_watchdog == NMI_IO_APIC)
308 {
309 printk(KERN_ERR DEVICE_NAME
310 ": NMI watchdog in IO_APIC mode active -- refused to load the kernel module!\n"
311 DEVICE_NAME
312 ": Please disable the NMI watchdog by specifying 'nmi_watchdog=0' at kernel\n"
313 DEVICE_NAME
314 ": command line.\n");
315 return -EINVAL;
316 }
317
318 /*
319 * See arch/i386/kernel/nmi.c on >= 2.6.19: -1 means it can never enabled again
320 */
321 atomic_set(&nmi_active, -1);
322 printk(KERN_INFO DEVICE_NAME ": Trying to deactivate NMI watchdog permanently...\n");
323
324 /*
325 * Now fall through and see if it actually was enabled before. If so, fail
326 * as we cannot deactivate it cleanly from here.
327 */
328# else /* < 2.6.19 */
329 /*
330 * Older 2.6 kernels: nmi_watchdog is not initalized by default
331 */
332 if (nmi_watchdog != NMI_NONE)
333 goto nmi_activated;
334# endif
335# endif /* >= 2.6.0 */
336
337 /*
338 * Second test: Interrupt generated by performance counter not masked and can
339 * generate an NMI. Works also with Linux 2.4.
340 */
341 {
342 unsigned int v, ver, maxlvt;
343
344 v = apic_read(APIC_LVR);
345 ver = GET_APIC_VERSION(v);
346 /* 82489DXs do not report # of LVT entries. */
347 maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
348 if (maxlvt >= 4)
349 {
350 /* Read status of performance counter IRQ vector */
351 v = apic_read(APIC_LVTPC);
352
353 /* performance counter generates NMI and is not masked? */
354 if ((GET_APIC_DELIVERY_MODE(v) == APIC_MODE_NMI) && !(v & APIC_LVT_MASKED))
355 {
356# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
357 printk(KERN_ERR DEVICE_NAME
358 ": NMI watchdog either active or at least initialized. Please disable the NMI\n"
359 DEVICE_NAME
360 ": watchdog by specifying 'nmi_watchdog=0' at kernel command line.\n");
361 return -EINVAL;
362# else /* < 2.6.19 */
363# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
364nmi_activated:
365# endif
366 printk(KERN_ERR DEVICE_NAME
367 ": NMI watchdog active -- refused to load the kernel module! Please disable\n"
368 DEVICE_NAME
369 ": the NMI watchdog by specifying 'nmi_watchdog=0' at kernel command line.\n");
370 return -EINVAL;
371# endif /* >= 2.6.19 */
372 }
373 }
374 }
375# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
376 printk(KERN_INFO DEVICE_NAME ": Successfully done.\n");
377# endif /* >= 2.6.19 */
378#endif /* CONFIG_X86_LOCAL_APIC */
379
380#ifdef CONFIG_VBOXDRV_AS_MISC
381 rc = misc_register(&gMiscDevice);
382 if (rc)
383 {
384 printk(KERN_ERR DEVICE_NAME ": Can't register misc device! rc=%d\n", rc);
385 return rc;
386 }
387#else /* !CONFIG_VBOXDRV_AS_MISC */
388 /*
389 * Register character device.
390 */
391 g_iModuleMajor = DEVICE_MAJOR;
392 rc = VBOX_REGISTER_DEVICE((dev_t)g_iModuleMajor, DEVICE_NAME, &gFileOpsVBoxDrv);
393 if (rc < 0)
394 {
395 dprintf(("VBOX_REGISTER_DEVICE failed with rc=%#x!\n", rc));
396 return rc;
397 }
398
399 /*
400 * Save returned module major number
401 */
402 if (DEVICE_MAJOR != 0)
403 g_iModuleMajor = DEVICE_MAJOR;
404 else
405 g_iModuleMajor = rc;
406 rc = 0;
407
408#ifdef CONFIG_DEVFS_FS
409 /*
410 * Register a device entry
411 */
412 g_hDevFsVBoxDrv = VBOX_REGISTER_DEVFS();
413 if (g_hDevFsVBoxDrv == NULL)
414 {
415 dprintf(("devfs_register failed!\n"));
416 rc = -EINVAL;
417 }
418#endif
419#endif /* !CONFIG_VBOXDRV_AS_MISC */
420 if (!rc)
421 {
422 /*
423 * Initialize the runtime.
424 * On AMD64 we'll have to donate the high rwx memory block to the exec allocator.
425 */
426 rc = RTR0Init(0);
427 if (RT_SUCCESS(rc))
428 {
429#ifdef __AMD64__
430 rc = RTR0MemExecDonate(&g_abExecMemory[0], sizeof(g_abExecMemory));
431#endif
432 /*
433 * Initialize the device extension.
434 */
435 if (RT_SUCCESS(rc))
436 rc = supdrvInitDevExt(&g_DevExt);
437 if (!rc)
438 {
439 /*
440 * Create the GIP page.
441 */
442 rc = VBoxSupDrvInitGip(&g_DevExt);
443 if (!rc)
444 {
445 dprintf(("VBoxDrv::ModuleInit returning %#x\n", rc));
446 return rc;
447 }
448
449 supdrvDeleteDevExt(&g_DevExt);
450 }
451 else
452 rc = -EINVAL;
453 RTR0Term();
454 }
455 else
456 rc = -EINVAL;
457
458 /*
459 * Failed, cleanup and return the error code.
460 */
461#if defined(CONFIG_DEVFS_FS) && !defined(CONFIG_VBOXDRV_AS_MISC)
462 VBOX_UNREGISTER_DEVFS(g_hDevFsVBoxDrv);
463#endif
464 }
465#ifdef CONFIG_VBOXDRV_AS_MISC
466 misc_deregister(&gMiscDevice);
467 dprintf(("VBoxDrv::ModuleInit returning %#x (minor:%d)\n", rc, gMiscDevice.minor));
468#else
469 VBOX_UNREGISTER_DEVICE(g_iModuleMajor, DEVICE_NAME);
470 dprintf(("VBoxDrv::ModuleInit returning %#x (major:%d)\n", rc, g_iModuleMajor));
471#endif
472 return rc;
473}
474
475
476/**
477 * Unload the module.
478 */
479static void __exit VBoxSupDrvUnload(void)
480{
481 int rc;
482 dprintf(("VBoxSupDrvUnload\n"));
483
484 /*
485 * I Don't think it's possible to unload a driver which processes have
486 * opened, at least we'll blindly assume that here.
487 */
488#ifdef CONFIG_VBOXDRV_AS_MISC
489 rc = misc_deregister(&gMiscDevice);
490 if (rc < 0)
491 {
492 dprintf(("misc_deregister failed with rc=%#x\n", rc));
493 }
494#else /* !CONFIG_VBOXDRV_AS_MISC */
495#ifdef CONFIG_DEVFS_FS
496 /*
497 * Unregister a device entry
498 */
499 VBOX_UNREGISTER_DEVFS(g_hDevFsVBoxDrv);
500#endif // devfs
501 rc = VBOX_UNREGISTER_DEVICE(g_iModuleMajor, DEVICE_NAME);
502 if (rc < 0)
503 {
504 dprintf(("unregister_chrdev failed with rc=%#x (major:%d)\n", rc, g_iModuleMajor));
505 }
506#endif /* !CONFIG_VBOXDRV_AS_MISC */
507
508 /*
509 * Destroy GIP, delete the device extension and terminate IPRT.
510 */
511 VBoxSupDrvTermGip(&g_DevExt);
512 supdrvDeleteDevExt(&g_DevExt);
513 RTR0Term();
514}
515
516
517/**
518 * Device open. Called on open /dev/vboxdrv
519 *
520 * @param pInode Pointer to inode info structure.
521 * @param pFilp Associated file pointer.
522 */
523static int VBoxSupDrvCreate(struct inode *pInode, struct file *pFilp)
524{
525 int rc;
526 PSUPDRVSESSION pSession;
527 dprintf(("VBoxSupDrvCreate: pFilp=%p\n", pFilp));
528
529 /*
530 * Call common code for the rest.
531 */
532 rc = supdrvCreateSession(&g_DevExt, (PSUPDRVSESSION *)&pSession);
533 if (!rc)
534 {
535 pSession->Uid = current->euid;
536 pSession->Gid = current->egid;
537 pSession->Process = RTProcSelf();
538 pSession->R0Process = RTR0ProcHandleSelf();
539 }
540
541 dprintf(("VBoxSupDrvCreate: g_DevExt=%p pSession=%p rc=%d\n", &g_DevExt, pSession, rc));
542 pFilp->private_data = pSession;
543
544 return VBoxSupDrvErr2LinuxErr(rc);
545}
546
547
548/**
549 * Close device.
550 *
551 * @param pInode Pointer to inode info structure.
552 * @param pFilp Associated file pointer.
553 */
554static int VBoxSupDrvClose(struct inode *pInode, struct file *pFilp)
555{
556 dprintf(("VBoxSupDrvClose: pFilp=%p private_data=%p\n", pFilp, pFilp->private_data));
557 supdrvCloseSession(&g_DevExt, (PSUPDRVSESSION)pFilp->private_data);
558 pFilp->private_data = NULL;
559 return 0;
560}
561
562
563/**
564 * Device I/O Control entry point.
565 *
566 * @param pInode Pointer to inode info structure.
567 * @param pFilp Associated file pointer.
568 * @param IOCmd The function specified to ioctl().
569 * @param IOArg The argument specified to ioctl().
570 */
571static int VBoxSupDrvDeviceControl(struct inode *pInode, struct file *pFilp,
572 unsigned int IOCmd, unsigned long IOArg)
573{
574 int rc;
575 SUPDRVIOCTLDATA Args;
576 void *pvBuf = NULL;
577 int cbBuf = 0;
578 unsigned cbOut = 0;
579
580 dprintf2(("VBoxSupDrvDeviceControl: pFilp=%p IOCmd=%x IOArg=%p\n", pFilp, IOCmd, (void *)IOArg));
581
582 /*
583 * Copy ioctl data structure from user space.
584 */
585 if (_IOC_SIZE(IOCmd) != sizeof(SUPDRVIOCTLDATA))
586 {
587 dprintf(("VBoxSupDrvDeviceControl: incorrect input length! cbArgs=%d\n", _IOC_SIZE(IOCmd)));
588 return -EINVAL;
589 }
590 if (copy_from_user(&Args, (void *)IOArg, _IOC_SIZE(IOCmd)))
591 {
592 dprintf(("VBoxSupDrvDeviceControl: copy_from_user(&Args) failed.\n"));
593 return -EFAULT;
594 }
595
596 /*
597 * Allocate and copy user space input data buffer to kernel space.
598 */
599 if (Args.cbIn > 0 || Args.cbOut > 0)
600 {
601 cbBuf = max(Args.cbIn, Args.cbOut);
602 pvBuf = vmalloc(cbBuf);
603 if (pvBuf == NULL)
604 {
605 dprintf(("VBoxSupDrvDeviceControl: failed to allocate buffer of %d bytes.\n", cbBuf));
606 return -ENOMEM;
607 }
608
609 if (copy_from_user(pvBuf, (void *)Args.pvIn, Args.cbIn))
610 {
611 dprintf(("VBoxSupDrvDeviceControl: copy_from_user(pvBuf) failed.\n"));
612 vfree(pvBuf);
613 return -EFAULT;
614 }
615 }
616
617 /*
618 * Process the IOCtl.
619 */
620 rc = supdrvIOCtl(IOCmd, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data,
621 pvBuf, Args.cbIn, pvBuf, Args.cbOut, &cbOut);
622
623 /*
624 * Copy ioctl data and output buffer back to user space.
625 */
626 if (rc)
627 {
628 dprintf(("VBoxSupDrvDeviceControl: pFilp=%p IOCmd=%x IOArg=%p failed, rc=%d (linux rc=%d)\n",
629 pFilp, IOCmd, (void *)IOArg, rc, VBoxSupDrvErr2LinuxErr(rc)));
630 rc = VBoxSupDrvErr2LinuxErr(rc);
631 }
632 else if (cbOut > 0)
633 {
634 if (pvBuf != NULL && cbOut <= cbBuf)
635 {
636 if (copy_to_user((void *)Args.pvOut, pvBuf, cbOut))
637 {
638 dprintf(("copy_to_user failed.\n"));
639 rc = -EFAULT;
640 }
641 }
642 else
643 {
644 dprintf(("WHAT!?! supdrvIOCtl messed up! cbOut=%d cbBuf=%d pvBuf=%p\n", cbOut, cbBuf, pvBuf));
645 rc = -EPERM;
646 }
647 }
648
649 if (pvBuf)
650 vfree(pvBuf);
651
652 dprintf2(("VBoxSupDrvDeviceControl: returns %d\n", rc));
653 return rc;
654}
655
656
657/**
658 * Initializes any OS specific object creator fields.
659 */
660void VBOXCALL supdrvOSObjInitCreator(PSUPDRVOBJ pObj, PSUPDRVSESSION pSession)
661{
662 NOREF(pObj);
663 NOREF(pSession);
664}
665
666
667/**
668 * Checks if the session can access the object.
669 *
670 * @returns true if a decision has been made.
671 * @returns false if the default access policy should be applied.
672 *
673 * @param pObj The object in question.
674 * @param pSession The session wanting to access the object.
675 * @param pszObjName The object name, can be NULL.
676 * @param prc Where to store the result when returning true.
677 */
678bool VBOXCALL supdrvOSObjCanAccess(PSUPDRVOBJ pObj, PSUPDRVSESSION pSession, const char *pszObjName, int *prc)
679{
680 NOREF(pObj);
681 NOREF(pSession);
682 NOREF(pszObjName);
683 NOREF(prc);
684 return false;
685}
686
687
688/**
689 * Compute order. Some functions allocate 2^order pages.
690 *
691 * @returns order.
692 * @param cPages Number of pages.
693 */
694static int VBoxSupDrvOrder(unsigned long cPages)
695{
696 int iOrder;
697 unsigned long cTmp;
698
699 for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
700 ;
701 if (cPages & ~(1 << iOrder))
702 ++iOrder;
703
704 return iOrder;
705}
706
707
708/**
709 * OS Specific code for locking down memory.
710 *
711 * @returns 0 on success.
712 * @returns SUPDRV_ERR_* on failure.
713 * @param pMem Pointer to memory.
714 * This is not linked in anywhere.
715 * @param paPages Array which should be filled with the address of the physical pages.
716 *
717 * @remark See sgl_map_user_pages() for an example of an similar function.
718 */
719int VBOXCALL supdrvOSLockMemOne(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
720{
721 int rc;
722 struct page **papPages;
723 unsigned iPage;
724 unsigned cPages = pMem->cb >> PAGE_SHIFT;
725 unsigned long pv = (unsigned long)pMem->pvR3;
726
727 /*
728 * Allocate page pointer array.
729 */
730 papPages = vmalloc(cPages * sizeof(*papPages));
731 if (!papPages)
732 return SUPDRV_ERR_NO_MEMORY;
733
734 /*
735 * Get user pages.
736 */
737 down_read(&current->mm->mmap_sem);
738 rc = get_user_pages(current, /* Task for fault acounting. */
739 current->mm, /* Whose pages. */
740 (unsigned long)pv, /* Where from. */
741 cPages, /* How many pages. */
742 1, /* Write to memory. */
743 0, /* force. */
744 papPages, /* Page array. */
745 NULL); /* vmas */
746 if (rc != cPages)
747 {
748 up_read(&current->mm->mmap_sem);
749 dprintf(("supdrvOSLockMemOne: get_user_pages failed. rc=%d\n", rc));
750 return SUPDRV_ERR_LOCK_FAILED;
751 }
752
753 for (iPage = 0; iPage < cPages; iPage++)
754 flush_dcache_page(papPages[iPage]);
755 up_read(&current->mm->mmap_sem);
756
757 pMem->u.locked.papPages = papPages;
758 pMem->u.locked.cPages = cPages;
759
760 /*
761 * Get addresses.
762 */
763 for (iPage = 0; iPage < cPages; iPage++)
764 {
765 paPages[iPage].Phys = page_to_phys(papPages[iPage]);
766 paPages[iPage].uReserved = 0;
767 }
768
769 dprintf2(("supdrvOSLockMemOne: pvR3=%p cb=%d papPages=%p\n",
770 pMem->pvR3, pMem->cb, pMem->u.locked.papPages));
771 return 0;
772}
773
774
775/**
776 * Unlocks the memory pointed to by pv.
777 *
778 * @param pv Memory to unlock.
779 * @param cb Size of the memory (debug).
780 *
781 * @remark See sgl_unmap_user_pages() for an example of an similar function.
782 */
783void VBOXCALL supdrvOSUnlockMemOne(PSUPDRVMEMREF pMem)
784{
785 unsigned iPage;
786 dprintf2(("supdrvOSUnlockMemOne: pvR3=%p cb=%d papPages=%p\n",
787 pMem->pvR3, pMem->cb, pMem->u.locked.papPages));
788
789 /*
790 * Loop thru the pages and release them.
791 */
792 for (iPage = 0; iPage < pMem->u.locked.cPages; iPage++)
793 {
794 if (!PageReserved(pMem->u.locked.papPages[iPage]))
795 SetPageDirty(pMem->u.locked.papPages[iPage]);
796 page_cache_release(pMem->u.locked.papPages[iPage]);
797 }
798
799 /* free the page array */
800 vfree(pMem->u.locked.papPages);
801 pMem->u.locked.cPages = 0;
802}
803
804
805/**
806 * OS Specific code for allocating page aligned memory with continuous fixed
807 * physical paged backing.
808 *
809 * @returns 0 on success.
810 * @returns SUPDRV_ERR_* on failure.
811 * @param pMem Memory reference record of the memory to be allocated.
812 * (This is not linked in anywhere.)
813 * @param ppvR0 Where to store the virtual address of the ring-0 mapping. (optional)
814 * @param ppvR3 Where to store the virtual address of the ring-3 mapping.
815 * @param pHCPhys Where to store the physical address.
816 */
817int VBOXCALL supdrvOSContAllocOne(PSUPDRVMEMREF pMem, void **ppvR0, void **ppvR3, PRTHCPHYS pHCPhys)
818{
819 struct page *paPages;
820 unsigned iPage;
821 unsigned cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
822 unsigned cPages = cbAligned >> PAGE_SHIFT;
823 unsigned cOrder = VBoxSupDrvOrder(cPages);
824 unsigned long ulAddr;
825 dma_addr_t HCPhys;
826 int rc = 0;
827 pgprot_t pgFlags;
828 pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
829
830 Assert(ppvR3);
831 Assert(pHCPhys);
832
833 /*
834 * Allocate page pointer array.
835 */
836#ifdef __AMD64__ /** @todo check out if there is a correct way of getting memory below 4GB (physically). */
837 paPages = alloc_pages(GFP_DMA, cOrder);
838#else
839 paPages = alloc_pages(GFP_USER, cOrder);
840#endif
841 if (!paPages)
842 return SUPDRV_ERR_NO_MEMORY;
843
844 /*
845 * Lock the pages.
846 */
847 for (iPage = 0; iPage < cPages; iPage++)
848 {
849 SetPageReserved(&paPages[iPage]);
850 if (!PageHighMem(&paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
851 MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
852#ifdef DEBUG
853 if (iPage + 1 < cPages && (page_to_phys((&paPages[iPage])) + 0x1000) != page_to_phys((&paPages[iPage + 1])))
854 {
855 dprintf(("supdrvOSContAllocOne: Pages are not continuous!!!! iPage=%d phys=%llx physnext=%llx\n",
856 iPage, (long long)page_to_phys((&paPages[iPage])), (long long)page_to_phys((&paPages[iPage + 1]))));
857 BUG();
858 }
859#endif
860 }
861 HCPhys = page_to_phys(paPages);
862
863 /*
864 * Allocate user space mapping and put the physical pages into it.
865 */
866 down_write(&current->mm->mmap_sem);
867 ulAddr = do_mmap(NULL, 0, cbAligned, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_ANONYMOUS, 0);
868 if (!(ulAddr & ~PAGE_MASK))
869 {
870#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
871 int rc2 = remap_page_range(ulAddr, HCPhys, cbAligned, pgFlags);
872#else
873 int rc2 = 0;
874 struct vm_area_struct *vma = find_vma(current->mm, ulAddr);
875 if (vma)
876#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
877 rc2 = remap_page_range(vma, ulAddr, HCPhys, cbAligned, pgFlags);
878#else
879 rc2 = remap_pfn_range(vma, ulAddr, HCPhys >> PAGE_SHIFT, cbAligned, pgFlags);
880#endif
881 else
882 {
883 rc = SUPDRV_ERR_NO_MEMORY;
884 dprintf(("supdrvOSContAllocOne: no vma found for ulAddr=%#lx!\n", ulAddr));
885 }
886#endif
887 if (rc2)
888 {
889 rc = SUPDRV_ERR_NO_MEMORY;
890 dprintf(("supdrvOSContAllocOne: remap_page_range failed rc2=%d\n", rc2));
891 }
892 }
893 else
894 {
895 dprintf(("supdrvOSContAllocOne: do_mmap failed ulAddr=%#lx\n", ulAddr));
896 rc = SUPDRV_ERR_NO_MEMORY;
897 }
898 up_write(&current->mm->mmap_sem); /* not quite sure when to give this up. */
899
900 /*
901 * Success?
902 */
903 if (!rc)
904 {
905 *pHCPhys = HCPhys;
906 *ppvR3 = (void *)ulAddr;
907 if (ppvR0)
908 *ppvR0 = (void *)ulAddr;
909 pMem->pvR3 = (void *)ulAddr;
910 pMem->pvR0 = NULL;
911 pMem->u.cont.paPages = paPages;
912 pMem->u.cont.cPages = cPages;
913 pMem->cb = cbAligned;
914
915 dprintf2(("supdrvOSContAllocOne: pvR0=%p pvR3=%p cb=%d paPages=%p *pHCPhys=%lx *ppvR0=*ppvR3=%p\n",
916 pMem->pvR0, pMem->pvR3, pMem->cb, paPages, (unsigned long)*pHCPhys, *ppvR3));
917 global_flush_tlb();
918 return 0;
919 }
920
921 /*
922 * Failure, cleanup and be gone.
923 */
924 down_write(&current->mm->mmap_sem);
925 if (ulAddr & ~PAGE_MASK)
926 MY_DO_MUNMAP(current->mm, ulAddr, pMem->cb);
927 for (iPage = 0; iPage < cPages; iPage++)
928 {
929 ClearPageReserved(&paPages[iPage]);
930 if (!PageHighMem(&paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
931 MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, PAGE_KERNEL);
932 }
933 up_write(&current->mm->mmap_sem); /* check when we can leave this. */
934 __free_pages(paPages, cOrder);
935
936 global_flush_tlb();
937 return rc;
938}
939
940
941/**
942 * Frees contiguous memory.
943 *
944 * @param pMem Memory reference record of the memory to be freed.
945 */
946void VBOXCALL supdrvOSContFreeOne(PSUPDRVMEMREF pMem)
947{
948 unsigned iPage;
949
950 dprintf2(("supdrvOSContFreeOne: pvR0=%p pvR3=%p cb=%d paPages=%p\n",
951 pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.cont.paPages));
952
953 /*
954 * do_exit() destroys the mm before closing files.
955 * I really hope it cleans up our stuff properly...
956 */
957 if (current->mm)
958 {
959 down_write(&current->mm->mmap_sem);
960 MY_DO_MUNMAP(current->mm, (unsigned long)pMem->pvR3, pMem->cb);
961 up_write(&current->mm->mmap_sem); /* check when we can leave this. */
962 }
963
964 /*
965 * Change page attributes freeing the pages.
966 */
967 for (iPage = 0; iPage < pMem->u.cont.cPages; iPage++)
968 {
969 ClearPageReserved(&pMem->u.cont.paPages[iPage]);
970 if (!PageHighMem(&pMem->u.cont.paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
971 MY_CHANGE_PAGE_ATTR(&pMem->u.cont.paPages[iPage], 1, PAGE_KERNEL);
972 }
973 __free_pages(pMem->u.cont.paPages, VBoxSupDrvOrder(pMem->u.cont.cPages));
974
975 pMem->u.cont.cPages = 0;
976}
977
978
979/**
980 * Allocates memory which mapped into both kernel and user space.
981 * The returned memory is page aligned and so is the allocation.
982 *
983 * @returns 0 on success.
984 * @returns SUPDRV_ERR_* on failure.
985 * @param pMem Memory reference record of the memory to be allocated.
986 * (This is not linked in anywhere.)
987 * @param ppvR0 Where to store the address of the Ring-0 mapping.
988 * @param ppvR3 Where to store the address of the Ring-3 mapping.
989 */
990int VBOXCALL supdrvOSMemAllocOne(PSUPDRVMEMREF pMem, void **ppvR0, void **ppvR3)
991{
992 const unsigned cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
993 const unsigned cPages = cbAligned >> PAGE_SHIFT;
994#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)
995 unsigned cOrder = VBoxSupDrvOrder(cPages);
996 struct page *paPages;
997#endif
998 struct page **papPages;
999 unsigned iPage;
1000 pgprot_t pgFlags;
1001 pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
1002
1003 /*
1004 * Allocate array with page pointers.
1005 */
1006 pMem->u.mem.cPages = 0;
1007 pMem->u.mem.papPages = papPages = kmalloc(sizeof(papPages[0]) * cPages, GFP_KERNEL);
1008 if (!papPages)
1009 return SUPDRV_ERR_NO_MEMORY;
1010
1011 /*
1012 * Allocate the pages.
1013 */
1014#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1015 for (iPage = 0; iPage < cPages; iPage++)
1016 {
1017 papPages[iPage] = alloc_page(GFP_HIGHUSER);
1018 if (!papPages[iPage])
1019 {
1020 pMem->u.mem.cPages = iPage;
1021 supdrvOSMemFreeOne(pMem);
1022 return SUPDRV_ERR_NO_MEMORY;
1023 }
1024 }
1025
1026#else /* < 2.4.22 */
1027 paPages = alloc_pages(GFP_USER, cOrder);
1028 if (!paPages)
1029 {
1030 supdrvOSMemFreeOne(pMem);
1031 return SUPDRV_ERR_NO_MEMORY;
1032 }
1033 for (iPage = 0; iPage < cPages; iPage++)
1034 {
1035 papPages[iPage] = &paPages[iPage];
1036 if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
1037 MY_CHANGE_PAGE_ATTR(papPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
1038 if (PageHighMem(papPages[iPage]))
1039 BUG();
1040 }
1041#endif
1042 pMem->u.mem.cPages = cPages;
1043
1044 /*
1045 * Reserve the pages.
1046 */
1047 for (iPage = 0; iPage < cPages; iPage++)
1048 SetPageReserved(papPages[iPage]);
1049
1050 /*
1051 * Create the Ring-0 mapping.
1052 */
1053 if (ppvR0)
1054 {
1055#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1056# ifdef VM_MAP
1057 *ppvR0 = pMem->pvR0 = vmap(papPages, cPages, VM_MAP, pgFlags);
1058# else
1059 *ppvR0 = pMem->pvR0 = vmap(papPages, cPages, VM_ALLOC, pgFlags);
1060# endif
1061#else
1062 *ppvR0 = pMem->pvR0 = phys_to_virt(page_to_phys(papPages[0]));
1063#endif
1064 }
1065 if (pMem->pvR0 || !ppvR0)
1066 {
1067 /*
1068 * Create the ring3 mapping.
1069 */
1070 if (ppvR3)
1071 *ppvR3 = pMem->pvR3 = VBoxSupDrvMapUser(papPages, cPages, PROT_READ | PROT_WRITE | PROT_EXEC, pgFlags);
1072 if (pMem->pvR3 || !ppvR3)
1073 return 0;
1074 dprintf(("supdrvOSMemAllocOne: failed to map into r3! cPages=%u\n", cPages));
1075 }
1076 else
1077 dprintf(("supdrvOSMemAllocOne: failed to map into r0! cPages=%u\n", cPages));
1078
1079 supdrvOSMemFreeOne(pMem);
1080 return SUPDRV_ERR_NO_MEMORY;
1081}
1082
1083
1084/**
1085 * Get the physical addresses of the pages in the allocation.
1086 * This is called while inside bundle the spinlock.
1087 *
1088 * @param pMem Memory reference record of the memory.
1089 * @param paPages Where to store the page addresses.
1090 */
1091void VBOXCALL supdrvOSMemGetPages(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
1092{
1093 unsigned iPage;
1094 for (iPage = 0; iPage < pMem->u.mem.cPages; iPage++)
1095 {
1096 paPages[iPage].Phys = page_to_phys(pMem->u.mem.papPages[iPage]);
1097 paPages[iPage].uReserved = 0;
1098 }
1099}
1100
1101
1102/**
1103 * Frees memory allocated by supdrvOSMemAllocOne().
1104 *
1105 * @param pMem Memory reference record of the memory to be free.
1106 */
1107void VBOXCALL supdrvOSMemFreeOne(PSUPDRVMEMREF pMem)
1108{
1109 dprintf2(("supdrvOSMemFreeOne: pvR0=%p pvR3=%p cb=%d cPages=%d papPages=%p\n",
1110 pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.mem.cPages, pMem->u.mem.papPages));
1111
1112 /*
1113 * Unmap the user mapping (if any).
1114 * do_exit() destroys the mm before closing files.
1115 */
1116 if (pMem->pvR3 && current->mm)
1117 {
1118 down_write(&current->mm->mmap_sem);
1119 MY_DO_MUNMAP(current->mm, (unsigned long)pMem->pvR3, RT_ALIGN(pMem->cb, PAGE_SIZE));
1120 up_write(&current->mm->mmap_sem); /* check when we can leave this. */
1121 }
1122 pMem->pvR3 = NULL;
1123
1124 /*
1125 * Unmap the kernel mapping (if any).
1126 */
1127 if (pMem->pvR0)
1128 {
1129#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1130 vunmap(pMem->pvR0);
1131#endif
1132 pMem->pvR0 = NULL;
1133 }
1134
1135 /*
1136 * Free the physical pages.
1137 */
1138 if (pMem->u.mem.papPages)
1139 {
1140 struct page **papPages = pMem->u.mem.papPages;
1141 const unsigned cPages = pMem->u.mem.cPages;
1142 unsigned iPage;
1143
1144 /* Restore the page flags. */
1145 for (iPage = 0; iPage < cPages; iPage++)
1146 {
1147 ClearPageReserved(papPages[iPage]);
1148#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)
1149 if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
1150 MY_CHANGE_PAGE_ATTR(papPages[iPage], 1, PAGE_KERNEL);
1151#endif
1152 }
1153
1154 /* Free the pages. */
1155#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1156 for (iPage = 0; iPage < pMem->u.cont.cPages; iPage++)
1157 __free_page(papPages[iPage]);
1158#else
1159 if (cPages > 0)
1160 __free_pages(papPages[0], VBoxSupDrvOrder(cPages));
1161#endif
1162 /* Free the page pointer array. */
1163 kfree(papPages);
1164 pMem->u.mem.papPages = NULL;
1165 }
1166 pMem->u.mem.cPages = 0;
1167}
1168
1169
1170/**
1171 * Maps a range of pages into user space.
1172 *
1173 * @returns Pointer to the user space mapping on success.
1174 * @returns NULL on failure.
1175 * @param papPages Array of the pages to map.
1176 * @param cPages Number of pages to map.
1177 * @param fProt The mapping protection.
1178 * @param pgFlags The page level protection.
1179 */
1180static void *VBoxSupDrvMapUser(struct page **papPages, unsigned cPages, unsigned fProt, pgprot_t pgFlags)
1181{
1182 int rc = SUPDRV_ERR_NO_MEMORY;
1183 unsigned long ulAddr;
1184
1185 /*
1186 * Allocate user space mapping.
1187 */
1188 down_write(&current->mm->mmap_sem);
1189 ulAddr = do_mmap(NULL, 0, cPages * PAGE_SIZE, fProt, MAP_SHARED | MAP_ANONYMOUS, 0);
1190 if (!(ulAddr & ~PAGE_MASK))
1191 {
1192 /*
1193 * Map page by page into the mmap area.
1194 * This is generic, paranoid and not very efficient.
1195 */
1196 int rc = 0;
1197 unsigned long ulAddrCur = ulAddr;
1198 unsigned iPage;
1199 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
1200 {
1201#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1202 struct vm_area_struct *vma = find_vma(current->mm, ulAddrCur);
1203 if (!vma)
1204 break;
1205#endif
1206
1207#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1208 rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(papPages[iPage]), PAGE_SIZE, pgFlags);
1209#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1210 rc = remap_page_range(vma, ulAddrCur, page_to_phys(papPages[iPage]), PAGE_SIZE, pgFlags);
1211#else /* 2.4 */
1212 rc = remap_page_range(ulAddrCur, page_to_phys(papPages[iPage]), PAGE_SIZE, pgFlags);
1213#endif
1214 if (rc)
1215 break;
1216 }
1217
1218 /*
1219 * Successful?
1220 */
1221 if (iPage >= cPages)
1222 {
1223 up_write(&current->mm->mmap_sem);
1224 return (void *)ulAddr;
1225 }
1226
1227 /* no, cleanup! */
1228 if (rc)
1229 dprintf(("VBoxSupDrvMapUser: remap_[page|pfn]_range failed! rc=%d\n", rc));
1230 else
1231 dprintf(("VBoxSupDrvMapUser: find_vma failed!\n"));
1232
1233 MY_DO_MUNMAP(current->mm, ulAddr, cPages * PAGE_SIZE);
1234 }
1235 else
1236 {
1237 dprintf(("supdrvOSContAllocOne: do_mmap failed ulAddr=%#lx\n", ulAddr));
1238 rc = SUPDRV_ERR_NO_MEMORY;
1239 }
1240 up_write(&current->mm->mmap_sem);
1241
1242 return NULL;
1243}
1244
1245
1246/**
1247 * Initializes the GIP.
1248 *
1249 * @returns negative errno.
1250 * @param pDevExt Instance data. GIP stuff may be updated.
1251 */
1252static int VBoxSupDrvInitGip(PSUPDRVDEVEXT pDevExt)
1253{
1254 struct page *pPage;
1255 dma_addr_t HCPhys;
1256 PSUPGLOBALINFOPAGE pGip;
1257 dprintf(("VBoxSupDrvInitGip:\n"));
1258
1259 /*
1260 * Allocate the page.
1261 */
1262 pPage = alloc_pages(GFP_USER, 0);
1263 if (!pPage)
1264 {
1265 dprintf(("VBoxSupDrvInitGip: failed to allocate the GIP page\n"));
1266 return -ENOMEM;
1267 }
1268
1269 /*
1270 * Lock the page.
1271 */
1272 SetPageReserved(pPage);
1273 g_pGipPage = pPage;
1274
1275 /*
1276 * Call common initialization routine.
1277 */
1278 HCPhys = page_to_phys(pPage);
1279 pGip = (PSUPGLOBALINFOPAGE)page_address(pPage);
1280 pDevExt->ulLastJiffies = jiffies;
1281#ifdef TICK_NSEC
1282 pDevExt->u64LastMonotime = (uint64_t)pDevExt->ulLastJiffies * TICK_NSEC;
1283 dprintf(("VBoxSupDrvInitGIP: TICK_NSEC=%ld HZ=%d jiffies=%ld now=%lld\n",
1284 TICK_NSEC, HZ, pDevExt->ulLastJiffies, pDevExt->u64LastMonotime));
1285#else
1286 pDevExt->u64LastMonotime = (uint64_t)pDevExt->ulLastJiffies * (1000000 / HZ);
1287 dprintf(("VBoxSupDrvInitGIP: TICK_NSEC=%d HZ=%d jiffies=%ld now=%lld\n",
1288 (int)(1000000 / HZ), HZ, pDevExt->ulLastJiffies, pDevExt->u64LastMonotime));
1289#endif
1290 supdrvGipInit(pDevExt, pGip, HCPhys, pDevExt->u64LastMonotime,
1291 HZ <= 1000 ? HZ : 1000);
1292
1293 /*
1294 * Initialize the timer.
1295 */
1296 init_timer(&g_GipTimer);
1297 g_GipTimer.data = (unsigned long)pDevExt;
1298 g_GipTimer.function = VBoxSupGipTimer;
1299 g_GipTimer.expires = jiffies;
1300
1301 return 0;
1302}
1303
1304
1305/**
1306 * Terminates the GIP.
1307 *
1308 * @returns negative errno.
1309 * @param pDevExt Instance data. GIP stuff may be updated.
1310 */
1311static int VBoxSupDrvTermGip(PSUPDRVDEVEXT pDevExt)
1312{
1313 struct page *pPage;
1314 PSUPGLOBALINFOPAGE pGip;
1315 dprintf(("VBoxSupDrvTermGip:\n"));
1316
1317 /*
1318 * Delete the timer if it's pending.
1319 */
1320 if (timer_pending(&g_GipTimer))
1321 del_timer(&g_GipTimer);
1322
1323 /*
1324 * Uninitialize the content.
1325 */
1326 pGip = pDevExt->pGip;
1327 pDevExt->pGip = NULL;
1328 if (pGip)
1329 supdrvGipTerm(pGip);
1330
1331 /*
1332 * Free the page.
1333 */
1334 pPage = g_pGipPage;
1335 g_pGipPage = NULL;
1336 if (pPage)
1337 {
1338 ClearPageReserved(pPage);
1339 __free_pages(pPage, 0);
1340 }
1341
1342 return 0;
1343}
1344
1345/**
1346 * Timer callback function.
1347 * The ulUser parameter is the device extension pointer.
1348 */
1349static void VBoxSupGipTimer(unsigned long ulUser)
1350{
1351 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)ulUser;
1352 unsigned long ulNow = jiffies;
1353 unsigned long ulDiff = ulNow - pDevExt->ulLastJiffies;
1354 pDevExt->ulLastJiffies = ulNow;
1355#ifdef TICK_NSEC
1356 pDevExt->u64LastMonotime += ulDiff * TICK_NSEC;
1357#else
1358 pDevExt->u64LastMonotime += ulDiff * (1000000 / HZ);
1359#endif
1360 supdrvGipUpdate(pDevExt->pGip, pDevExt->u64LastMonotime);
1361 mod_timer(&g_GipTimer, jiffies + (HZ <= 1000 ? 0 : ONE_MSEC_IN_JIFFIES));
1362}
1363
1364
1365/**
1366 * Maps the GIP into user space.
1367 *
1368 * @returns negative errno.
1369 * @param pDevExt Instance data.
1370 */
1371int VBOXCALL supdrvOSGipMap(PSUPDRVDEVEXT pDevExt, PCSUPGLOBALINFOPAGE *ppGip)
1372{
1373 int rc = 0;
1374 unsigned long ulAddr;
1375 unsigned long HCPhys = pDevExt->HCPhysGip;
1376 pgprot_t pgFlags;
1377 pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_USER;
1378 dprintf2(("supdrvOSGipMap: ppGip=%p\n", ppGip));
1379
1380 /*
1381 * Allocate user space mapping and put the physical pages into it.
1382 */
1383 down_write(&current->mm->mmap_sem);
1384 ulAddr = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, 0);
1385 if (!(ulAddr & ~PAGE_MASK))
1386 {
1387#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1388 int rc2 = remap_page_range(ulAddr, HCPhys, PAGE_SIZE, pgFlags);
1389#else
1390 int rc2 = 0;
1391 struct vm_area_struct *vma = find_vma(current->mm, ulAddr);
1392 if (vma)
1393#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
1394 rc2 = remap_page_range(vma, ulAddr, HCPhys, PAGE_SIZE, pgFlags);
1395#else
1396 rc2 = remap_pfn_range(vma, ulAddr, HCPhys >> PAGE_SHIFT, PAGE_SIZE, pgFlags);
1397#endif
1398 else
1399 {
1400 rc = SUPDRV_ERR_NO_MEMORY;
1401 dprintf(("supdrvOSGipMap: no vma found for ulAddr=%#lx!\n", ulAddr));
1402 }
1403#endif
1404 if (rc2)
1405 {
1406 rc = SUPDRV_ERR_NO_MEMORY;
1407 dprintf(("supdrvOSGipMap: remap_page_range failed rc2=%d\n", rc2));
1408 }
1409 }
1410 else
1411 {
1412 dprintf(("supdrvOSGipMap: do_mmap failed ulAddr=%#lx\n", ulAddr));
1413 rc = SUPDRV_ERR_NO_MEMORY;
1414 }
1415 up_write(&current->mm->mmap_sem); /* not quite sure when to give this up. */
1416
1417 /*
1418 * Success?
1419 */
1420 if (!rc)
1421 {
1422 *ppGip = (PCSUPGLOBALINFOPAGE)ulAddr;
1423 dprintf2(("supdrvOSGipMap: ppGip=%p\n", *ppGip));
1424 return 0;
1425 }
1426
1427 /*
1428 * Failure, cleanup and be gone.
1429 */
1430 if (ulAddr & ~PAGE_MASK)
1431 {
1432 down_write(&current->mm->mmap_sem);
1433 MY_DO_MUNMAP(current->mm, ulAddr, PAGE_SIZE);
1434 up_write(&current->mm->mmap_sem);
1435 }
1436
1437 dprintf2(("supdrvOSGipMap: returns %d\n", rc));
1438 return rc;
1439}
1440
1441
1442/**
1443 * Maps the GIP into user space.
1444 *
1445 * @returns negative errno.
1446 * @param pDevExt Instance data.
1447 */
1448int VBOXCALL supdrvOSGipUnmap(PSUPDRVDEVEXT pDevExt, PCSUPGLOBALINFOPAGE pGip)
1449{
1450 dprintf2(("supdrvOSGipUnmap: pGip=%p\n", pGip));
1451 if (current->mm)
1452 {
1453 down_write(&current->mm->mmap_sem);
1454 MY_DO_MUNMAP(current->mm, (unsigned long)pGip, PAGE_SIZE);
1455 up_write(&current->mm->mmap_sem);
1456 }
1457 dprintf2(("supdrvOSGipUnmap: returns 0\n"));
1458 return 0;
1459}
1460
1461
1462/**
1463 * Resumes the GIP updating.
1464 *
1465 * @param pDevExt Instance data.
1466 */
1467void VBOXCALL supdrvOSGipResume(PSUPDRVDEVEXT pDevExt)
1468{
1469 dprintf2(("supdrvOSGipResume:\n"));
1470 mod_timer(&g_GipTimer, jiffies);
1471}
1472
1473
1474/**
1475 * Suspends the GIP updating.
1476 *
1477 * @param pDevExt Instance data.
1478 */
1479void VBOXCALL supdrvOSGipSuspend(PSUPDRVDEVEXT pDevExt)
1480{
1481 dprintf2(("supdrvOSGipSuspend:\n"));
1482 if (timer_pending(&g_GipTimer))
1483 del_timer(&g_GipTimer);
1484}
1485
1486
1487/**
1488 * Converts a supdrv error code to an linux error code.
1489 *
1490 * @returns corresponding linux error code.
1491 * @param rc supdrv error code (SUPDRV_ERR_* defines).
1492 */
1493static int VBoxSupDrvErr2LinuxErr(int rc)
1494{
1495 switch (rc)
1496 {
1497 case 0: return 0;
1498 case SUPDRV_ERR_GENERAL_FAILURE: return -EACCES;
1499 case SUPDRV_ERR_INVALID_PARAM: return -EINVAL;
1500 case SUPDRV_ERR_INVALID_MAGIC: return -EILSEQ;
1501 case SUPDRV_ERR_INVALID_HANDLE: return -ENXIO;
1502 case SUPDRV_ERR_INVALID_POINTER: return -EFAULT;
1503 case SUPDRV_ERR_LOCK_FAILED: return -ENOLCK;
1504 case SUPDRV_ERR_ALREADY_LOADED: return -EEXIST;
1505 case SUPDRV_ERR_PERMISSION_DENIED: return -EPERM;
1506 case SUPDRV_ERR_VERSION_MISMATCH: return -ENOSYS;
1507 }
1508
1509 return -EPERM;
1510}
1511
1512
1513RTDECL(int) SUPR0Printf(const char *pszFormat, ...)
1514{
1515#if 1
1516 va_list args;
1517 char szMsg[512];
1518
1519 va_start(args, pszFormat);
1520 vsnprintf(szMsg, sizeof(szMsg) - 1, pszFormat, args);
1521 szMsg[sizeof(szMsg) - 1] = '\0';
1522 printk("%s", szMsg);
1523 va_end(args);
1524#else
1525 /* forward to printf - needs some more GCC hacking to fix ebp... */
1526 __asm__ __volatile__ ("mov %0, %esp\n\t"
1527 "jmp %1\n\t",
1528 :: "r" ((uintptr_t)&pszFormat - 4),
1529 "m" (printk));
1530#endif
1531 return 0;
1532}
1533
1534
1535/** Runtime assert implementation for Linux Ring-0. */
1536RTDECL(void) AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1537{
1538 printk("!!Assertion Failed!!\n"
1539 "Expression: %s\n"
1540 "Location : %s(%d) %s\n",
1541 pszExpr, pszFile, uLine, pszFunction);
1542}
1543
1544
1545/** Runtime assert implementation for Linux Ring-0. */
1546RTDECL(void) AssertMsg2(const char *pszFormat, ...)
1547{ /* forwarder. */
1548 va_list ap;
1549 char msg[256];
1550
1551 va_start(ap, pszFormat);
1552 vsnprintf(msg, sizeof(msg) - 1, pszFormat, ap);
1553 msg[sizeof(msg) - 1] = '\0';
1554 printk("%s", msg);
1555 va_end(ap);
1556}
1557
1558
1559/* GCC C++ hack. */
1560unsigned __gxx_personality_v0 = 0xcccccccc;
1561
1562
1563module_init(VBoxSupDrvInit);
1564module_exit(VBoxSupDrvUnload);
1565
1566MODULE_AUTHOR("InnoTek Systemberatung GmbH");
1567MODULE_DESCRIPTION("VirtualBox Support Driver");
1568MODULE_LICENSE("GPL");
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette