VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c@ 260

Last change on this file since 260 was 260, checked in by vboxsync, 18 years ago

Fixed the vboxdrv kernel module on 2.4 series kernels

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.0 KB
Line 
1/** @file
2 *
3 * VBox host drivers - Ring-0 support drivers - Linux host:
4 * Linux host driver code
5 */
6
7/*
8 * Copyright (C) 2006 InnoTek Systemberatung GmbH
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License as published by the Free Software Foundation,
14 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
15 * distribution. VirtualBox OSE is distributed in the hope that it will
16 * be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * If you received this file as part of a commercial VirtualBox
19 * distribution, then only the terms of your commercial VirtualBox
20 * license agreement apply instead of the previous paragraph.
21 */
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#include "SUPDRV.h"
27#include <iprt/assert.h>
28#include <iprt/spinlock.h>
29#include <iprt/semaphore.h>
30
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/fs.h>
35#include <linux/mm.h>
36#include <linux/pagemap.h>
37#include <linux/slab.h>
38#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
39# include <linux/jiffies.h>
40#endif
41#include <asm/mman.h>
42#include <asm/io.h>
43#include <asm/uaccess.h>
44#ifdef CONFIG_DEVFS_FS
45# include <linux/devfs_fs_kernel.h>
46#endif
47#ifdef CONFIG_VBOXDRV_AS_MISC
48# include <linux/miscdevice.h>
49#endif
50#ifdef CONFIG_X86_LOCAL_APIC
51# include <asm/apic.h>
52# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
53# include <asm/nmi.h>
54# endif
55#endif
56
57#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
58# ifndef page_to_pfn
59# define page_to_pfn(page) ((page) - mem_map)
60# endif
61# include <asm/pgtable.h>
62# define global_flush_tlb __flush_tlb_global
63#endif
64
65/* devfs defines */
66#if defined(CONFIG_DEVFS_FS) && !defined(CONFIG_VBOXDRV_AS_MISC)
67# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
68
69# define VBOX_REGISTER_DEVFS() \
70({ \
71 void *rc = NULL; \
72 if (devfs_mk_cdev(MKDEV(DEVICE_MAJOR, 0), \
73 S_IFCHR | S_IRUGO | S_IWUGO, \
74 DEVICE_NAME) == 0) \
75 rc = (void *)' '; /* return not NULL */ \
76 rc; \
77 })
78
79# define VBOX_UNREGISTER_DEVFS(handle) \
80 devfs_remove(DEVICE_NAME);
81
82# else /* < 2.6.0 */
83
84# define VBOX_REGISTER_DEVFS() \
85 devfs_register(NULL, DEVICE_NAME, DEVFS_FL_DEFAULT, \
86 DEVICE_MAJOR, 0, \
87 S_IFCHR | S_IRUGO | S_IWUGO, \
88 &gFileOpsVBoxDrv, NULL)
89
90# define VBOX_UNREGISTER_DEVFS(handle) \
91 if (handle != NULL) \
92 devfs_unregister(handle)
93
94# endif /* < 2.6.0 */
95#endif /* CONFIG_DEV_FS && !CONFIG_VBOXDEV_AS_MISC */
96
97#ifndef CONFIG_VBOXDRV_AS_MISC
98# if defined(CONFIG_DEVFS_FS) && LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 0)
99# define VBOX_REGISTER_DEVICE(a,b,c) devfs_register_chrdev(a,b,c)
100# define VBOX_UNREGISTER_DEVICE(a,b) devfs_unregister_chrdev(a,b)
101# else
102# define VBOX_REGISTER_DEVICE(a,b,c) register_chrdev(a,b,c)
103# define VBOX_UNREGISTER_DEVICE(a,b) unregister_chrdev(a,b)
104# endif
105#endif /* !CONFIG_VBOXDRV_AS_MISC */
106
107
108#ifdef CONFIG_X86_HIGH_ENTRY
109# error "CONFIG_X86_HIGH_ENTRY is not supported by VBoxDrv at this time."
110#endif
111
112/*
113 * This sucks soooo badly on x86! Why don't they export __PAGE_KERNEL_EXEC so PAGE_KERNEL_EXEC would be usable?
114 */
115#if defined(__AMD64__)
116# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC
117#elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
118# define MY_PAGE_KERNEL_EXEC __pgprot(cpu_has_pge ? _PAGE_KERNEL_EXEC | _PAGE_GLOBAL : _PAGE_KERNEL_EXEC)
119#else
120# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL
121#endif
122
123/*
124 * The redhat hack section.
125 * - The current hacks are for 2.4.21-15.EL only.
126 */
127#ifndef NO_REDHAT_HACKS
128/* accounting. */
129# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
130# ifdef VM_ACCOUNT
131# define MY_DO_MUNMAP(a,b,c) do_munmap(a, b, c, 0) /* should it be 1 or 0? */
132# endif
133# endif
134
135/* backported remap_page_range. */
136# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
137# include <asm/tlb.h>
138# ifdef tlb_vma /* probably not good enough... */
139# define HAVE_26_STYLE_REMAP_PAGE_RANGE 1
140# endif
141# endif
142
143# ifndef __AMD64__
144/* In 2.6.9-22.ELsmp we have to call change_page_attr() twice when changing
145 * the page attributes from PAGE_KERNEL to something else, because there appears
146 * to be a bug in one of the many patches that redhat applied.
147 * It should be safe to do this on less buggy linux kernels too. ;-)
148 */
149# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
150 do { \
151 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) \
152 change_page_attr(pPages, cPages, prot); \
153 change_page_attr(pPages, cPages, prot); \
154 } while (0)
155# endif
156#endif /* !NO_REDHAT_HACKS */
157
158
159#ifndef MY_DO_MUNMAP
160# define MY_DO_MUNMAP(a,b,c) do_munmap(a, b, c)
161#endif
162
163#ifndef MY_CHANGE_PAGE_ATTR
164# ifdef __AMD64__ /** @todo This is a cheap hack, but it'll get around that 'else BUG();' in __change_page_attr(). */
165# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
166 do { \
167 change_page_attr(pPages, cPages, PAGE_KERNEL_NOCACHE); \
168 change_page_attr(pPages, cPages, prot); \
169 } while (0)
170# else
171# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) change_page_attr(pPages, cPages, prot)
172# endif
173#endif
174
175
176/** @def ONE_MSEC_IN_JIFFIES
177 * The number of jiffies that make up 1 millisecond. This is only actually used
178 * when HZ is > 1000. */
179#if HZ <= 1000
180# define ONE_MSEC_IN_JIFFIES 0
181#elif !(HZ % 1000)
182# define ONE_MSEC_IN_JIFFIES (HZ / 1000)
183#else
184# define ONE_MSEC_IN_JIFFIES ((HZ + 999) / 1000)
185# error "HZ is not a multiple of 1000, the GIP stuff won't work right!"
186#endif
187
188
189/*******************************************************************************
190* Defined Constants And Macros *
191*******************************************************************************/
192/**
193 * Device extention & session data association structure.
194 */
195static SUPDRVDEVEXT g_DevExt;
196
197/** Timer structure for the GIP update. */
198static struct timer_list g_GipTimer;
199/** Pointer to the page structure for the GIP. */
200struct page *g_pGipPage;
201
202/** Registered devfs device handle. */
203#if defined(CONFIG_DEVFS_FS) && !defined(CONFIG_VBOXDRV_AS_MISC)
204# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
205static void *g_hDevFsVBoxDrv = NULL;
206# else
207static devfs_handle_t g_hDevFsVBoxDrv = NULL;
208# endif
209#endif
210
211#ifndef CONFIG_VBOXDRV_AS_MISC
212/** Module major number */
213#define DEVICE_MAJOR 234
214/** Saved major device number */
215static int g_iModuleMajor;
216#endif /* !CONFIG_VBOXDRV_AS_MISC */
217
218/** The module name. */
219#define DEVICE_NAME "vboxdrv"
220
221
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227static int VBoxSupDrvInit(void);
228static void VBoxSupDrvUnload(void);
229static int VBoxSupDrvCreate(struct inode *pInode, struct file *pFilp);
230static int VBoxSupDrvClose(struct inode *pInode, struct file *pFilp);
231static int VBoxSupDrvDeviceControl(struct inode *pInode, struct file *pFilp,
232 unsigned int IOCmd, unsigned long IOArg);
233static void *VBoxSupDrvMapUser(struct page **papPages, unsigned cPages, unsigned fProt, pgprot_t pgFlags);
234static int VBoxSupDrvInitGip(PSUPDRVDEVEXT pDevExt);
235static int VBoxSupDrvTermGip(PSUPDRVDEVEXT pDevExt);
236static void VBoxSupGipTimer(unsigned long ulUser);
237static int VBoxSupDrvOrder(unsigned long size);
238static int VBoxSupDrvErr2LinuxErr(int);
239
240
241/** The file_operations structure. */
242static struct file_operations gFileOpsVBoxDrv =
243{
244 owner: THIS_MODULE,
245 open: VBoxSupDrvCreate,
246 release: VBoxSupDrvClose,
247 ioctl: VBoxSupDrvDeviceControl,
248};
249
250#ifdef CONFIG_VBOXDRV_AS_MISC
251/** The miscdevice structure. */
252static struct miscdevice gMiscDevice =
253{
254 minor: MISC_DYNAMIC_MINOR,
255 name: DEVICE_NAME,
256 fops: &gFileOpsVBoxDrv,
257# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && \
258 LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 17)
259 devfs_name: DEVICE_NAME,
260# endif
261};
262#endif
263
264
265/**
266 * Initialize module.
267 *
268 * @returns appropritate status code.
269 */
270static int __init VBoxSupDrvInit(void)
271{
272 int rc;
273
274 dprintf(("VBoxDrv::ModuleInit\n"));
275
276#ifdef CONFIG_X86_LOCAL_APIC
277 /*
278 * If an NMI occurs while we are inside the world switcher the macine will crash.
279 * The Linux NMI watchdog generates periodic NMIs increasing a counter which is
280 * compared with another counter increased in the timer interrupt handler. Therefore
281 * we don't allow to setup an NMI watchdog.
282 */
283# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
284 /*
285 * First test: NMI actiated? Works only works with Linux 2.6 -- 2.4 does not export
286 * the nmi_watchdog variable.
287 */
288# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
289 /*
290 * Permanent IO_APIC mode active? No way to handle this!
291 */
292 if (nmi_watchdog == NMI_IO_APIC)
293 {
294 printk(KERN_ERR DEVICE_NAME
295 ": NMI watchdog in IO_APIC mode active -- refused to load the kernel module!\n"
296 DEVICE_NAME
297 ": Please disable the NMI watchdog by specifying 'nmi_watchdog=0' at kernel\n"
298 DEVICE_NAME
299 ": command line.\n");
300 return -EINVAL;
301 }
302
303 /*
304 * See arch/i386/kernel/nmi.c on >= 2.6.19: -1 means it can never enabled again
305 */
306 atomic_set(&nmi_active, -1);
307 printk(KERN_INFO DEVICE_NAME ": Trying to deactivate NMI watchdog permanently...\n");
308
309 /*
310 * Now fall through and see if it actually was enabled before. If so, fail
311 * as we cannot deactivate it cleanly from here.
312 */
313# else /* < 2.6.19 */
314 /*
315 * Older 2.6 kernels: nmi_watchdog is not initalized by default
316 */
317 if (nmi_watchdog != NMI_NONE)
318 goto nmi_activated;
319# endif
320# endif /* >= 2.6.0 */
321
322 /*
323 * Second test: Interrupt generated by performance counter not masked and can
324 * generate an NMI. Works also with Linux 2.4.
325 */
326 {
327 unsigned int v, ver, maxlvt;
328
329 v = apic_read(APIC_LVR);
330 ver = GET_APIC_VERSION(v);
331 /* 82489DXs do not report # of LVT entries. */
332 maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
333 if (maxlvt >= 4)
334 {
335 /* Read status of performance counter IRQ vector */
336 v = apic_read(APIC_LVTPC);
337
338 /* performance counter generates NMI and is not masked? */
339 if ((GET_APIC_DELIVERY_MODE(v) == APIC_MODE_NMI) && !(v & APIC_LVT_MASKED))
340 {
341# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
342 printk(KERN_ERR DEVICE_NAME
343 ": NMI watchdog either active or at least initialized. Please disable the NMI\n"
344 DEVICE_NAME
345 ": watchdog by specifying 'nmi_watchdog=0' at kernel command line.\n");
346 return -EINVAL;
347# else /* < 2.6.19 */
348# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
349nmi_activated:
350# endif
351 printk(KERN_ERR DEVICE_NAME
352 ": NMI watchdog active -- refused to load the kernel module! Please disable\n"
353 DEVICE_NAME
354 ": the NMI watchdog by specifying 'nmi_watchdog=0' at kernel command line.\n");
355 return -EINVAL;
356# endif /* >= 2.6.19 */
357 }
358 }
359 }
360# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
361 printk(KERN_INFO DEVICE_NAME ": Successfully done.\n");
362# endif /* >= 2.6.19 */
363#endif /* CONFIG_X86_LOCAL_APIC */
364
365#ifdef CONFIG_VBOXDRV_AS_MISC
366 rc = misc_register(&gMiscDevice);
367 if (rc)
368 {
369 printk(KERN_ERR DEVICE_NAME ": Can't register misc device! rc=%d\n", rc);
370 return rc;
371 }
372#else /* !CONFIG_VBOXDRV_AS_MISC */
373 /*
374 * Register character device.
375 */
376 g_iModuleMajor = DEVICE_MAJOR;
377 rc = VBOX_REGISTER_DEVICE((dev_t)g_iModuleMajor, DEVICE_NAME, &gFileOpsVBoxDrv);
378 if (rc < 0)
379 {
380 dprintf(("VBOX_REGISTER_DEVICE failed with rc=%#x!\n", rc));
381 return rc;
382 }
383
384 /*
385 * Save returned module major number
386 */
387 if (DEVICE_MAJOR != 0)
388 g_iModuleMajor = DEVICE_MAJOR;
389 else
390 g_iModuleMajor = rc;
391 rc = 0;
392
393#ifdef CONFIG_DEVFS_FS
394 /*
395 * Register a device entry
396 */
397 g_hDevFsVBoxDrv = VBOX_REGISTER_DEVFS();
398 if (g_hDevFsVBoxDrv == NULL)
399 {
400 dprintf(("devfs_register failed!\n"));
401 rc = -EINVAL;
402 }
403#endif
404#endif /* !CONFIG_VBOXDRV_AS_MISC */
405 if (!rc)
406 {
407 /*
408 * Initialize the device extension.
409 */
410 rc = supdrvInitDevExt(&g_DevExt);
411 if (!rc)
412 {
413 /*
414 * Create the GIP page.
415 */
416 rc = VBoxSupDrvInitGip(&g_DevExt);
417 if (!rc)
418 {
419 dprintf(("VBoxDrv::ModuleInit returning %#x\n", rc));
420 return rc;
421 }
422 supdrvDeleteDevExt(&g_DevExt);
423 }
424 else
425 rc = -EINVAL;
426
427 /*
428 * Failed, cleanup and return the error code.
429 */
430#if defined(CONFIG_DEVFS_FS) && !defined(CONFIG_VBOXDRV_AS_MISC)
431 VBOX_UNREGISTER_DEVFS(g_hDevFsVBoxDrv);
432#endif
433 }
434#ifdef CONFIG_VBOXDRV_AS_MISC
435 misc_deregister(&gMiscDevice);
436 dprintf(("VBoxDrv::ModuleInit returning %#x (minor:%d)\n", rc, gMiscDevice.minor));
437#else
438 VBOX_UNREGISTER_DEVICE(g_iModuleMajor, DEVICE_NAME);
439 dprintf(("VBoxDrv::ModuleInit returning %#x (major:%d)\n", rc, g_iModuleMajor));
440#endif
441 return rc;
442}
443
444
445/**
446 * Unload the module.
447 */
448static void __exit VBoxSupDrvUnload(void)
449{
450 int rc;
451 dprintf(("VBoxSupDrvUnload\n"));
452
453 /*
454 * I Don't think it's possible to unload a driver which processes have
455 * opened, at least we'll blindly assume that here.
456 */
457#ifdef CONFIG_VBOXDRV_AS_MISC
458 rc = misc_deregister(&gMiscDevice);
459 if (rc < 0)
460 {
461 dprintf(("misc_deregister failed with rc=%#x\n", rc));
462 }
463#else /* !CONFIG_VBOXDRV_AS_MISC */
464#ifdef CONFIG_DEVFS_FS
465 /*
466 * Unregister a device entry
467 */
468 VBOX_UNREGISTER_DEVFS(g_hDevFsVBoxDrv);
469#endif // devfs
470 rc = VBOX_UNREGISTER_DEVICE(g_iModuleMajor, DEVICE_NAME);
471 if (rc < 0)
472 {
473 dprintf(("unregister_chrdev failed with rc=%#x (major:%d)\n", rc, g_iModuleMajor));
474 }
475#endif /* !CONFIG_VBOXDRV_AS_MISC */
476
477 /*
478 * Destroy GIP and delete the device extension.
479 */
480 VBoxSupDrvTermGip(&g_DevExt);
481 supdrvDeleteDevExt(&g_DevExt);
482}
483
484
485/**
486 * Device open. Called on open /dev/vboxdrv
487 *
488 * @param pInode Pointer to inode info structure.
489 * @param pFilp Associated file pointer.
490 */
491static int VBoxSupDrvCreate(struct inode *pInode, struct file *pFilp)
492{
493 int rc;
494 PSUPDRVSESSION pSession;
495 dprintf(("VBoxSupDrvCreate: pFilp=%p\n", pFilp));
496
497 /*
498 * Call common code for the rest.
499 */
500 rc = supdrvCreateSession(&g_DevExt, (PSUPDRVSESSION *)&pSession);
501 if (!rc)
502 {
503 pSession->Uid = current->euid;
504 pSession->Gid = current->egid;
505 pSession->Process = (RTPROCESS)current->tgid;
506 }
507
508 dprintf(("VBoxSupDrvCreate: g_DevExt=%p pSession=%p rc=%d\n", &g_DevExt, pSession, rc));
509 pFilp->private_data = pSession;
510
511 return VBoxSupDrvErr2LinuxErr(rc);
512}
513
514
515/**
516 * Close device.
517 *
518 * @param pInode Pointer to inode info structure.
519 * @param pFilp Associated file pointer.
520 */
521static int VBoxSupDrvClose(struct inode *pInode, struct file *pFilp)
522{
523 dprintf(("VBoxSupDrvClose: pFilp=%p private_data=%p\n", pFilp, pFilp->private_data));
524 supdrvCloseSession(&g_DevExt, (PSUPDRVSESSION)pFilp->private_data);
525 pFilp->private_data = NULL;
526 return 0;
527}
528
529
530/**
531 * Device I/O Control entry point.
532 *
533 * @param pInode Pointer to inode info structure.
534 * @param pFilp Associated file pointer.
535 * @param IOCmd The function specified to ioctl().
536 * @param IOArg The argument specified to ioctl().
537 */
538static int VBoxSupDrvDeviceControl(struct inode *pInode, struct file *pFilp,
539 unsigned int IOCmd, unsigned long IOArg)
540{
541 int rc;
542 SUPDRVIOCTLDATA Args;
543 void *pvBuf = NULL;
544 int cbBuf = 0;
545 unsigned cbOut = 0;
546
547 dprintf2(("VBoxSupDrvDeviceControl: pFilp=%p IOCmd=%x IOArg=%p\n", pFilp, IOCmd, (void *)IOArg));
548
549 /*
550 * Copy ioctl data structure from user space.
551 */
552 if (_IOC_SIZE(IOCmd) != sizeof(SUPDRVIOCTLDATA))
553 {
554 dprintf(("VBoxSupDrvDeviceControl: incorrect input length! cbArgs=%d\n", _IOC_SIZE(IOCmd)));
555 return -EINVAL;
556 }
557 if (copy_from_user(&Args, (void *)IOArg, _IOC_SIZE(IOCmd)))
558 {
559 dprintf(("VBoxSupDrvDeviceControl: copy_from_user(&Args) failed.\n"));
560 return -EFAULT;
561 }
562
563 /*
564 * Allocate and copy user space input data buffer to kernel space.
565 */
566 if (Args.cbIn > 0 || Args.cbOut > 0)
567 {
568 cbBuf = max(Args.cbIn, Args.cbOut);
569 pvBuf = vmalloc(cbBuf);
570 if (pvBuf == NULL)
571 {
572 dprintf(("VBoxSupDrvDeviceControl: failed to allocate buffer of %d bytes.\n", cbBuf));
573 return -ENOMEM;
574 }
575
576 if (copy_from_user(pvBuf, (void *)Args.pvIn, Args.cbIn))
577 {
578 dprintf(("VBoxSupDrvDeviceControl: copy_from_user(pvBuf) failed.\n"));
579 vfree(pvBuf);
580 return -EFAULT;
581 }
582 }
583
584 /*
585 * Process the IOCtl.
586 */
587 rc = supdrvIOCtl(IOCmd, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data,
588 pvBuf, Args.cbIn, pvBuf, Args.cbOut, &cbOut);
589
590 /*
591 * Copy ioctl data and output buffer back to user space.
592 */
593 if (rc)
594 {
595 dprintf(("VBoxSupDrvDeviceControl: pFilp=%p IOCmd=%x IOArg=%p failed, rc=%d (linux rc=%d)\n",
596 pFilp, IOCmd, (void *)IOArg, rc, VBoxSupDrvErr2LinuxErr(rc)));
597 rc = VBoxSupDrvErr2LinuxErr(rc);
598 }
599 else if (cbOut > 0)
600 {
601 if (pvBuf != NULL && cbOut <= cbBuf)
602 {
603 if (copy_to_user((void *)Args.pvOut, pvBuf, cbOut))
604 {
605 dprintf(("copy_to_user failed.\n"));
606 rc = -EFAULT;
607 }
608 }
609 else
610 {
611 dprintf(("WHAT!?! supdrvIOCtl messed up! cbOut=%d cbBuf=%d pvBuf=%p\n", cbOut, cbBuf, pvBuf));
612 rc = -EPERM;
613 }
614 }
615
616 if (pvBuf)
617 vfree(pvBuf);
618
619 dprintf2(("VBoxSupDrvDeviceControl: returns %d\n", rc));
620 return rc;
621}
622
623
624/**
625 * Initializes any OS specific object creator fields.
626 */
627void VBOXCALL supdrvOSObjInitCreator(PSUPDRVOBJ pObj, PSUPDRVSESSION pSession)
628{
629 NOREF(pObj);
630 NOREF(pSession);
631}
632
633
634/**
635 * Checks if the session can access the object.
636 *
637 * @returns true if a decision has been made.
638 * @returns false if the default access policy should be applied.
639 *
640 * @param pObj The object in question.
641 * @param pSession The session wanting to access the object.
642 * @param pszObjName The object name, can be NULL.
643 * @param prc Where to store the result when returning true.
644 */
645bool VBOXCALL supdrvOSObjCanAccess(PSUPDRVOBJ pObj, PSUPDRVSESSION pSession, const char *pszObjName, int *prc)
646{
647 NOREF(pObj);
648 NOREF(pSession);
649 NOREF(pszObjName);
650 NOREF(prc);
651 return false;
652}
653
654
655/**
656 * Compute order. Some functions allocate 2^order pages.
657 *
658 * @returns order.
659 * @param cPages Number of pages.
660 */
661static int VBoxSupDrvOrder(unsigned long cPages)
662{
663 int iOrder;
664 unsigned long cTmp;
665
666 for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
667 ;
668 if (cPages & ~(1 << iOrder))
669 ++iOrder;
670
671 return iOrder;
672}
673
674
675/**
676 * OS Specific code for locking down memory.
677 *
678 * @returns 0 on success.
679 * @returns SUPDRV_ERR_* on failure.
680 * @param pMem Pointer to memory.
681 * This is not linked in anywhere.
682 * @param paPages Array which should be filled with the address of the physical pages.
683 *
684 * @remark See sgl_map_user_pages() for an example of an similar function.
685 */
686int VBOXCALL supdrvOSLockMemOne(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
687{
688 int rc;
689 struct page **papPages;
690 unsigned iPage;
691 unsigned cPages = pMem->cb >> PAGE_SHIFT;
692 unsigned long pv = (unsigned long)pMem->pvR3;
693
694 /*
695 * Allocate page pointer array.
696 */
697 papPages = vmalloc(cPages * sizeof(*papPages));
698 if (!papPages)
699 return SUPDRV_ERR_NO_MEMORY;
700
701 /*
702 * Get user pages.
703 */
704 down_read(&current->mm->mmap_sem);
705 rc = get_user_pages(current, /* Task for fault acounting. */
706 current->mm, /* Whose pages. */
707 (unsigned long)pv, /* Where from. */
708 cPages, /* How many pages. */
709 1, /* Write to memory. */
710 0, /* force. */
711 papPages, /* Page array. */
712 NULL); /* vmas */
713 if (rc != cPages)
714 {
715 up_read(&current->mm->mmap_sem);
716 dprintf(("supdrvOSLockMemOne: get_user_pages failed. rc=%d\n", rc));
717 return SUPDRV_ERR_LOCK_FAILED;
718 }
719
720 for (iPage = 0; iPage < cPages; iPage++)
721 flush_dcache_page(papPages[iPage]);
722 up_read(&current->mm->mmap_sem);
723
724 pMem->u.locked.papPages = papPages;
725 pMem->u.locked.cPages = cPages;
726
727 /*
728 * Get addresses.
729 */
730 for (iPage = 0; iPage < cPages; iPage++)
731 {
732 paPages[iPage].Phys = page_to_phys(papPages[iPage]);
733 paPages[iPage].uReserved = 0;
734 }
735
736 dprintf2(("supdrvOSLockMemOne: pvR3=%p cb=%d papPages=%p\n",
737 pMem->pvR3, pMem->cb, pMem->u.locked.papPages));
738 return 0;
739}
740
741
742/**
743 * Unlocks the memory pointed to by pv.
744 *
745 * @param pv Memory to unlock.
746 * @param cb Size of the memory (debug).
747 *
748 * @remark See sgl_unmap_user_pages() for an example of an similar function.
749 */
750void VBOXCALL supdrvOSUnlockMemOne(PSUPDRVMEMREF pMem)
751{
752 unsigned iPage;
753 dprintf2(("supdrvOSUnlockMemOne: pvR3=%p cb=%d papPages=%p\n",
754 pMem->pvR3, pMem->cb, pMem->u.locked.papPages));
755
756 /*
757 * Loop thru the pages and release them.
758 */
759 for (iPage = 0; iPage < pMem->u.locked.cPages; iPage++)
760 {
761 if (!PageReserved(pMem->u.locked.papPages[iPage]))
762 SetPageDirty(pMem->u.locked.papPages[iPage]);
763 page_cache_release(pMem->u.locked.papPages[iPage]);
764 }
765
766 /* free the page array */
767 vfree(pMem->u.locked.papPages);
768 pMem->u.locked.cPages = 0;
769}
770
771
772/**
773 * OS Specific code for allocating page aligned memory with continuous fixed
774 * physical paged backing.
775 *
776 * @returns 0 on success.
777 * @returns SUPDRV_ERR_* on failure.
778 * @param pMem Memory reference record of the memory to be allocated.
779 * (This is not linked in anywhere.)
780 * @param ppvR0 Where to store the virtual address of the ring-0 mapping. (optional)
781 * @param ppvR3 Where to store the virtual address of the ring-3 mapping.
782 * @param pHCPhys Where to store the physical address.
783 */
784int VBOXCALL supdrvOSContAllocOne(PSUPDRVMEMREF pMem, void **ppvR0, void **ppvR3, PRTHCPHYS pHCPhys)
785{
786 struct page *paPages;
787 unsigned iPage;
788 unsigned cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
789 unsigned cPages = cbAligned >> PAGE_SHIFT;
790 unsigned cOrder = VBoxSupDrvOrder(cPages);
791 unsigned long ulAddr;
792 dma_addr_t HCPhys;
793 int rc = 0;
794 pgprot_t pgFlags;
795 pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
796
797 Assert(ppvR3);
798 Assert(pHCPhys);
799
800 /*
801 * Allocate page pointer array.
802 */
803#ifdef __AMD64__ /** @todo check out if there is a correct way of getting memory below 4GB (physically). */
804 paPages = alloc_pages(GFP_DMA, cOrder);
805#else
806 paPages = alloc_pages(GFP_USER, cOrder);
807#endif
808 if (!paPages)
809 return SUPDRV_ERR_NO_MEMORY;
810
811 /*
812 * Lock the pages.
813 */
814 for (iPage = 0; iPage < cPages; iPage++)
815 {
816 SetPageReserved(&paPages[iPage]);
817 if (!PageHighMem(&paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
818 MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
819#ifdef DEBUG
820 if (iPage + 1 < cPages && (page_to_phys((&paPages[iPage])) + 0x1000) != page_to_phys((&paPages[iPage + 1])))
821 {
822 dprintf(("supdrvOSContAllocOne: Pages are not continuous!!!! iPage=%d phys=%llx physnext=%llx\n",
823 iPage, (long long)page_to_phys((&paPages[iPage])), (long long)page_to_phys((&paPages[iPage + 1]))));
824 BUG();
825 }
826#endif
827 }
828 HCPhys = page_to_phys(paPages);
829
830 /*
831 * Allocate user space mapping and put the physical pages into it.
832 */
833 down_write(&current->mm->mmap_sem);
834 ulAddr = do_mmap(NULL, 0, cbAligned, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_ANONYMOUS, 0);
835 if (!(ulAddr & ~PAGE_MASK))
836 {
837#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
838 int rc2 = remap_page_range(ulAddr, HCPhys, cbAligned, pgFlags);
839#else
840 int rc2 = 0;
841 struct vm_area_struct *vma = find_vma(current->mm, ulAddr);
842 if (vma)
843#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
844 rc2 = remap_page_range(vma, ulAddr, HCPhys, cbAligned, pgFlags);
845#else
846 rc2 = remap_pfn_range(vma, ulAddr, HCPhys >> PAGE_SHIFT, cbAligned, pgFlags);
847#endif
848 else
849 {
850 rc = SUPDRV_ERR_NO_MEMORY;
851 dprintf(("supdrvOSContAllocOne: no vma found for ulAddr=%#lx!\n", ulAddr));
852 }
853#endif
854 if (rc2)
855 {
856 rc = SUPDRV_ERR_NO_MEMORY;
857 dprintf(("supdrvOSContAllocOne: remap_page_range failed rc2=%d\n", rc2));
858 }
859 }
860 else
861 {
862 dprintf(("supdrvOSContAllocOne: do_mmap failed ulAddr=%#lx\n", ulAddr));
863 rc = SUPDRV_ERR_NO_MEMORY;
864 }
865 up_write(&current->mm->mmap_sem); /* not quite sure when to give this up. */
866
867 /*
868 * Success?
869 */
870 if (!rc)
871 {
872 *pHCPhys = HCPhys;
873 *ppvR3 = (void *)ulAddr;
874 if (ppvR0)
875 *ppvR0 = (void *)ulAddr;
876 pMem->pvR3 = (void *)ulAddr;
877 pMem->pvR0 = NULL;
878 pMem->u.cont.paPages = paPages;
879 pMem->u.cont.cPages = cPages;
880 pMem->cb = cbAligned;
881
882 dprintf2(("supdrvOSContAllocOne: pvR0=%p pvR3=%p cb=%d paPages=%p *pHCPhys=%lx *ppvR0=*ppvR3=%p\n",
883 pMem->pvR0, pMem->pvR3, pMem->cb, paPages, (unsigned long)*pHCPhys, *ppvR3));
884 global_flush_tlb();
885 return 0;
886 }
887
888 /*
889 * Failure, cleanup and be gone.
890 */
891 down_write(&current->mm->mmap_sem);
892 if (ulAddr & ~PAGE_MASK)
893 MY_DO_MUNMAP(current->mm, ulAddr, pMem->cb);
894 for (iPage = 0; iPage < cPages; iPage++)
895 {
896 ClearPageReserved(&paPages[iPage]);
897 if (!PageHighMem(&paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
898 MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, PAGE_KERNEL);
899 }
900 up_write(&current->mm->mmap_sem); /* check when we can leave this. */
901 __free_pages(paPages, cOrder);
902
903 global_flush_tlb();
904 return rc;
905}
906
907
908/**
909 * Frees contiguous memory.
910 *
911 * @param pMem Memory reference record of the memory to be freed.
912 */
913void VBOXCALL supdrvOSContFreeOne(PSUPDRVMEMREF pMem)
914{
915 unsigned iPage;
916
917 dprintf2(("supdrvOSContFreeOne: pvR0=%p pvR3=%p cb=%d paPages=%p\n",
918 pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.cont.paPages));
919
920 /*
921 * do_exit() destroys the mm before closing files.
922 * I really hope it cleans up our stuff properly...
923 */
924 if (current->mm)
925 {
926 down_write(&current->mm->mmap_sem);
927 MY_DO_MUNMAP(current->mm, (unsigned long)pMem->pvR3, pMem->cb);
928 up_write(&current->mm->mmap_sem); /* check when we can leave this. */
929 }
930
931 /*
932 * Change page attributes freeing the pages.
933 */
934 for (iPage = 0; iPage < pMem->u.cont.cPages; iPage++)
935 {
936 ClearPageReserved(&pMem->u.cont.paPages[iPage]);
937 if (!PageHighMem(&pMem->u.cont.paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
938 MY_CHANGE_PAGE_ATTR(&pMem->u.cont.paPages[iPage], 1, PAGE_KERNEL);
939 }
940 __free_pages(pMem->u.cont.paPages, VBoxSupDrvOrder(pMem->u.cont.cPages));
941
942 pMem->u.cont.cPages = 0;
943}
944
945
946/**
947 * Allocates memory which mapped into both kernel and user space.
948 * The returned memory is page aligned and so is the allocation.
949 *
950 * @returns 0 on success.
951 * @returns SUPDRV_ERR_* on failure.
952 * @param pMem Memory reference record of the memory to be allocated.
953 * (This is not linked in anywhere.)
954 * @param ppvR0 Where to store the address of the Ring-0 mapping.
955 * @param ppvR3 Where to store the address of the Ring-3 mapping.
956 */
957int VBOXCALL supdrvOSMemAllocOne(PSUPDRVMEMREF pMem, void **ppvR0, void **ppvR3)
958{
959 const unsigned cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
960 const unsigned cPages = cbAligned >> PAGE_SHIFT;
961#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)
962 unsigned cOrder = VBoxSupDrvOrder(cPages);
963 struct page *paPages;
964#endif
965 struct page **papPages;
966 unsigned iPage;
967 pgprot_t pgFlags;
968 pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
969
970 /*
971 * Allocate array with page pointers.
972 */
973 pMem->u.mem.cPages = 0;
974 pMem->u.mem.papPages = papPages = kmalloc(sizeof(papPages[0]) * cPages, GFP_KERNEL);
975 if (!papPages)
976 return SUPDRV_ERR_NO_MEMORY;
977
978 /*
979 * Allocate the pages.
980 */
981#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
982 for (iPage = 0; iPage < cPages; iPage++)
983 {
984 papPages[iPage] = alloc_page(GFP_HIGHUSER);
985 if (!papPages[iPage])
986 {
987 pMem->u.mem.cPages = iPage;
988 supdrvOSMemFreeOne(pMem);
989 return SUPDRV_ERR_NO_MEMORY;
990 }
991 }
992
993#else /* < 2.4.22 */
994 paPages = alloc_pages(GFP_USER, cOrder);
995 if (!paPages)
996 {
997 supdrvOSMemFreeOne(pMem);
998 return SUPDRV_ERR_NO_MEMORY;
999 }
1000 for (iPage = 0; iPage < cPages; iPage++)
1001 {
1002 papPages[iPage] = &paPages[iPage];
1003 if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
1004 MY_CHANGE_PAGE_ATTR(papPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
1005 if (PageHighMem(papPages[iPage]))
1006 BUG();
1007 }
1008#endif
1009 pMem->u.mem.cPages = cPages;
1010
1011 /*
1012 * Reserve the pages.
1013 */
1014 for (iPage = 0; iPage < cPages; iPage++)
1015 SetPageReserved(papPages[iPage]);
1016
1017 /*
1018 * Create the Ring-0 mapping.
1019 */
1020 if (ppvR0)
1021 {
1022#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1023# ifdef VM_MAP
1024 *ppvR0 = pMem->pvR0 = vmap(papPages, cPages, VM_MAP, pgFlags);
1025# else
1026 *ppvR0 = pMem->pvR0 = vmap(papPages, cPages, VM_ALLOC, pgFlags);
1027# endif
1028#else
1029 *ppvR0 = pMem->pvR0 = phys_to_virt(page_to_phys(papPages[0]));
1030#endif
1031 }
1032 if (pMem->pvR0 || !ppvR0)
1033 {
1034 /*
1035 * Create the ring3 mapping.
1036 */
1037 if (ppvR3)
1038 *ppvR3 = pMem->pvR3 = VBoxSupDrvMapUser(papPages, cPages, PROT_READ | PROT_WRITE | PROT_EXEC, pgFlags);
1039 if (pMem->pvR3 || !ppvR3)
1040 return 0;
1041 dprintf(("supdrvOSMemAllocOne: failed to map into r3! cPages=%u\n", cPages));
1042 }
1043 else
1044 dprintf(("supdrvOSMemAllocOne: failed to map into r0! cPages=%u\n", cPages));
1045
1046 supdrvOSMemFreeOne(pMem);
1047 return SUPDRV_ERR_NO_MEMORY;
1048}
1049
1050
1051/**
1052 * Get the physical addresses of the pages in the allocation.
1053 * This is called while inside bundle the spinlock.
1054 *
1055 * @param pMem Memory reference record of the memory.
1056 * @param paPages Where to store the page addresses.
1057 */
1058void VBOXCALL supdrvOSMemGetPages(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
1059{
1060 unsigned iPage;
1061 for (iPage = 0; iPage < pMem->u.mem.cPages; iPage++)
1062 {
1063 paPages[iPage].Phys = page_to_phys(pMem->u.mem.papPages[iPage]);
1064 paPages[iPage].uReserved = 0;
1065 }
1066}
1067
1068
1069/**
1070 * Frees memory allocated by supdrvOSMemAllocOne().
1071 *
1072 * @param pMem Memory reference record of the memory to be free.
1073 */
1074void VBOXCALL supdrvOSMemFreeOne(PSUPDRVMEMREF pMem)
1075{
1076 dprintf2(("supdrvOSMemFreeOne: pvR0=%p pvR3=%p cb=%d cPages=%d papPages=%p\n",
1077 pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.mem.cPages, pMem->u.mem.papPages));
1078
1079 /*
1080 * Unmap the user mapping (if any).
1081 * do_exit() destroys the mm before closing files.
1082 */
1083 if (pMem->pvR3 && current->mm)
1084 {
1085 down_write(&current->mm->mmap_sem);
1086 MY_DO_MUNMAP(current->mm, (unsigned long)pMem->pvR3, RT_ALIGN(pMem->cb, PAGE_SIZE));
1087 up_write(&current->mm->mmap_sem); /* check when we can leave this. */
1088 }
1089 pMem->pvR3 = NULL;
1090
1091 /*
1092 * Unmap the kernel mapping (if any).
1093 */
1094 if (pMem->pvR0)
1095 {
1096#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1097 vunmap(pMem->pvR0);
1098#endif
1099 pMem->pvR0 = NULL;
1100 }
1101
1102 /*
1103 * Free the physical pages.
1104 */
1105 if (pMem->u.mem.papPages)
1106 {
1107 struct page **papPages = pMem->u.mem.papPages;
1108 const unsigned cPages = pMem->u.mem.cPages;
1109 unsigned iPage;
1110
1111 /* Restore the page flags. */
1112 for (iPage = 0; iPage < cPages; iPage++)
1113 {
1114 ClearPageReserved(papPages[iPage]);
1115#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)
1116 if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
1117 MY_CHANGE_PAGE_ATTR(papPages[iPage], 1, PAGE_KERNEL);
1118#endif
1119 }
1120
1121 /* Free the pages. */
1122#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
1123 for (iPage = 0; iPage < pMem->u.cont.cPages; iPage++)
1124 __free_page(papPages[iPage]);
1125#else
1126 if (cPages > 0)
1127 __free_pages(papPages[0], VBoxSupDrvOrder(cPages));
1128#endif
1129 /* Free the page pointer array. */
1130 kfree(papPages);
1131 pMem->u.mem.papPages = NULL;
1132 }
1133 pMem->u.mem.cPages = 0;
1134}
1135
1136
1137/**
1138 * Maps a range of pages into user space.
1139 *
1140 * @returns Pointer to the user space mapping on success.
1141 * @returns NULL on failure.
1142 * @param papPages Array of the pages to map.
1143 * @param cPages Number of pages to map.
1144 * @param fProt The mapping protection.
1145 * @param pgFlags The page level protection.
1146 */
1147static void *VBoxSupDrvMapUser(struct page **papPages, unsigned cPages, unsigned fProt, pgprot_t pgFlags)
1148{
1149 int rc = SUPDRV_ERR_NO_MEMORY;
1150 unsigned long ulAddr;
1151
1152 /*
1153 * Allocate user space mapping.
1154 */
1155 down_write(&current->mm->mmap_sem);
1156 ulAddr = do_mmap(NULL, 0, cPages * PAGE_SIZE, fProt, MAP_SHARED | MAP_ANONYMOUS, 0);
1157 if (!(ulAddr & ~PAGE_MASK))
1158 {
1159 /*
1160 * Map page by page into the mmap area.
1161 * This is generic, paranoid and not very efficient.
1162 */
1163 int rc = 0;
1164 unsigned long ulAddrCur = ulAddr;
1165 unsigned iPage;
1166 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
1167 {
1168#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1169 struct vm_area_struct *vma = find_vma(current->mm, ulAddrCur);
1170 if (!vma)
1171 break;
1172#endif
1173
1174#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1175 rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(papPages[iPage]), PAGE_SIZE, pgFlags);
1176#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1177 rc = remap_page_range(vma, ulAddrCur, page_to_phys(papPages[iPage]), PAGE_SIZE, pgFlags);
1178#else /* 2.4 */
1179 rc = remap_page_range(ulAddrCur, page_to_phys(papPages[iPage]), PAGE_SIZE, pgFlags);
1180#endif
1181 if (rc)
1182 break;
1183 }
1184
1185 /*
1186 * Successful?
1187 */
1188 if (iPage >= cPages)
1189 {
1190 up_write(&current->mm->mmap_sem);
1191 return (void *)ulAddr;
1192 }
1193
1194 /* no, cleanup! */
1195 if (rc)
1196 dprintf(("VBoxSupDrvMapUser: remap_[page|pfn]_range failed! rc=%d\n", rc));
1197 else
1198 dprintf(("VBoxSupDrvMapUser: find_vma failed!\n"));
1199
1200 MY_DO_MUNMAP(current->mm, ulAddr, cPages * PAGE_SIZE);
1201 }
1202 else
1203 {
1204 dprintf(("supdrvOSContAllocOne: do_mmap failed ulAddr=%#lx\n", ulAddr));
1205 rc = SUPDRV_ERR_NO_MEMORY;
1206 }
1207 up_write(&current->mm->mmap_sem);
1208
1209 return NULL;
1210}
1211
1212
1213/**
1214 * Initializes the GIP.
1215 *
1216 * @returns negative errno.
1217 * @param pDevExt Instance data. GIP stuff may be updated.
1218 */
1219static int VBoxSupDrvInitGip(PSUPDRVDEVEXT pDevExt)
1220{
1221 struct page *pPage;
1222 dma_addr_t HCPhys;
1223 PSUPGLOBALINFOPAGE pGip;
1224 dprintf(("VBoxSupDrvInitGip:\n"));
1225
1226 /*
1227 * Allocate the page.
1228 */
1229 pPage = alloc_pages(GFP_USER, 0);
1230 if (!pPage)
1231 {
1232 dprintf(("VBoxSupDrvInitGip: failed to allocate the GIP page\n"));
1233 return -ENOMEM;
1234 }
1235
1236 /*
1237 * Lock the page.
1238 */
1239 SetPageReserved(pPage);
1240 g_pGipPage = pPage;
1241
1242 /*
1243 * Call common initialization routine.
1244 */
1245 HCPhys = page_to_phys(pPage);
1246 pGip = (PSUPGLOBALINFOPAGE)page_address(pPage);
1247 pDevExt->ulLastJiffies = jiffies;
1248#ifdef TICK_NSEC
1249 pDevExt->u64LastMonotime = (uint64_t)pDevExt->ulLastJiffies * TICK_NSEC;
1250 dprintf(("VBoxSupDrvInitGIP: TICK_NSEC=%ld HZ=%d jiffies=%ld now=%lld\n",
1251 TICK_NSEC, HZ, pDevExt->ulLastJiffies, pDevExt->u64LastMonotime));
1252#else
1253 pDevExt->u64LastMonotime = (uint64_t)pDevExt->ulLastJiffies * (1000000 / HZ);
1254 dprintf(("VBoxSupDrvInitGIP: TICK_NSEC=%d HZ=%d jiffies=%ld now=%lld\n",
1255 (int)(1000000 / HZ), HZ, pDevExt->ulLastJiffies, pDevExt->u64LastMonotime));
1256#endif
1257 supdrvGipInit(pDevExt, pGip, HCPhys, pDevExt->u64LastMonotime,
1258 HZ <= 1000 ? HZ : 1000);
1259
1260 /*
1261 * Initialize the timer.
1262 */
1263 init_timer(&g_GipTimer);
1264 g_GipTimer.data = (unsigned long)pDevExt;
1265 g_GipTimer.function = VBoxSupGipTimer;
1266 g_GipTimer.expires = jiffies;
1267
1268 return 0;
1269}
1270
1271
1272/**
1273 * Terminates the GIP.
1274 *
1275 * @returns negative errno.
1276 * @param pDevExt Instance data. GIP stuff may be updated.
1277 */
1278static int VBoxSupDrvTermGip(PSUPDRVDEVEXT pDevExt)
1279{
1280 struct page *pPage;
1281 PSUPGLOBALINFOPAGE pGip;
1282 dprintf(("VBoxSupDrvTermGip:\n"));
1283
1284 /*
1285 * Delete the timer if it's pending.
1286 */
1287 if (timer_pending(&g_GipTimer))
1288 del_timer(&g_GipTimer);
1289
1290 /*
1291 * Uninitialize the content.
1292 */
1293 pGip = pDevExt->pGip;
1294 pDevExt->pGip = NULL;
1295 if (pGip)
1296 supdrvGipTerm(pGip);
1297
1298 /*
1299 * Free the page.
1300 */
1301 pPage = g_pGipPage;
1302 g_pGipPage = NULL;
1303 if (pPage)
1304 {
1305 ClearPageReserved(pPage);
1306 __free_pages(pPage, 0);
1307 }
1308
1309 return 0;
1310}
1311
1312/**
1313 * Timer callback function.
1314 * The ulUser parameter is the device extension pointer.
1315 */
1316static void VBoxSupGipTimer(unsigned long ulUser)
1317{
1318 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)ulUser;
1319 unsigned long ulNow = jiffies;
1320 unsigned long ulDiff = ulNow - pDevExt->ulLastJiffies;
1321 pDevExt->ulLastJiffies = ulNow;
1322#ifdef TICK_NSEC
1323 pDevExt->u64LastMonotime += ulDiff * TICK_NSEC;
1324#else
1325 pDevExt->u64LastMonotime += ulDiff * (1000000 / HZ);
1326#endif
1327 supdrvGipUpdate(pDevExt->pGip, pDevExt->u64LastMonotime);
1328 mod_timer(&g_GipTimer, jiffies + (HZ <= 1000 ? 0 : ONE_MSEC_IN_JIFFIES));
1329}
1330
1331
1332/**
1333 * Maps the GIP into user space.
1334 *
1335 * @returns negative errno.
1336 * @param pDevExt Instance data.
1337 */
1338int VBOXCALL supdrvOSGipMap(PSUPDRVDEVEXT pDevExt, PCSUPGLOBALINFOPAGE *ppGip)
1339{
1340 int rc = 0;
1341 unsigned long ulAddr;
1342 unsigned long HCPhys = pDevExt->HCPhysGip;
1343 pgprot_t pgFlags;
1344 pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_USER;
1345 dprintf2(("supdrvOSGipMap: ppGip=%p\n", ppGip));
1346
1347 /*
1348 * Allocate user space mapping and put the physical pages into it.
1349 */
1350 down_write(&current->mm->mmap_sem);
1351 ulAddr = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, 0);
1352 if (!(ulAddr & ~PAGE_MASK))
1353 {
1354#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1355 int rc2 = remap_page_range(ulAddr, HCPhys, PAGE_SIZE, pgFlags);
1356#else
1357 int rc2 = 0;
1358 struct vm_area_struct *vma = find_vma(current->mm, ulAddr);
1359 if (vma)
1360#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
1361 rc2 = remap_page_range(vma, ulAddr, HCPhys, PAGE_SIZE, pgFlags);
1362#else
1363 rc2 = remap_pfn_range(vma, ulAddr, HCPhys >> PAGE_SHIFT, PAGE_SIZE, pgFlags);
1364#endif
1365 else
1366 {
1367 rc = SUPDRV_ERR_NO_MEMORY;
1368 dprintf(("supdrvOSGipMap: no vma found for ulAddr=%#lx!\n", ulAddr));
1369 }
1370#endif
1371 if (rc2)
1372 {
1373 rc = SUPDRV_ERR_NO_MEMORY;
1374 dprintf(("supdrvOSGipMap: remap_page_range failed rc2=%d\n", rc2));
1375 }
1376 }
1377 else
1378 {
1379 dprintf(("supdrvOSGipMap: do_mmap failed ulAddr=%#lx\n", ulAddr));
1380 rc = SUPDRV_ERR_NO_MEMORY;
1381 }
1382 up_write(&current->mm->mmap_sem); /* not quite sure when to give this up. */
1383
1384 /*
1385 * Success?
1386 */
1387 if (!rc)
1388 {
1389 *ppGip = (PCSUPGLOBALINFOPAGE)ulAddr;
1390 dprintf2(("supdrvOSGipMap: ppGip=%p\n", *ppGip));
1391 return 0;
1392 }
1393
1394 /*
1395 * Failure, cleanup and be gone.
1396 */
1397 if (ulAddr & ~PAGE_MASK)
1398 {
1399 down_write(&current->mm->mmap_sem);
1400 MY_DO_MUNMAP(current->mm, ulAddr, PAGE_SIZE);
1401 up_write(&current->mm->mmap_sem);
1402 }
1403
1404 dprintf2(("supdrvOSGipMap: returns %d\n", rc));
1405 return rc;
1406}
1407
1408
1409/**
1410 * Maps the GIP into user space.
1411 *
1412 * @returns negative errno.
1413 * @param pDevExt Instance data.
1414 */
1415int VBOXCALL supdrvOSGipUnmap(PSUPDRVDEVEXT pDevExt, PCSUPGLOBALINFOPAGE pGip)
1416{
1417 dprintf2(("supdrvOSGipUnmap: pGip=%p\n", pGip));
1418 if (current->mm)
1419 {
1420 down_write(&current->mm->mmap_sem);
1421 MY_DO_MUNMAP(current->mm, (unsigned long)pGip, PAGE_SIZE);
1422 up_write(&current->mm->mmap_sem);
1423 }
1424 dprintf2(("supdrvOSGipUnmap: returns 0\n"));
1425 return 0;
1426}
1427
1428
1429/**
1430 * Resumes the GIP updating.
1431 *
1432 * @param pDevExt Instance data.
1433 */
1434void VBOXCALL supdrvOSGipResume(PSUPDRVDEVEXT pDevExt)
1435{
1436 dprintf2(("supdrvOSGipResume:\n"));
1437 mod_timer(&g_GipTimer, jiffies);
1438}
1439
1440
1441/**
1442 * Suspends the GIP updating.
1443 *
1444 * @param pDevExt Instance data.
1445 */
1446void VBOXCALL supdrvOSGipSuspend(PSUPDRVDEVEXT pDevExt)
1447{
1448 dprintf2(("supdrvOSGipSuspend:\n"));
1449 if (timer_pending(&g_GipTimer))
1450 del_timer(&g_GipTimer);
1451}
1452
1453
1454/**
1455 * Converts a supdrv error code to an linux error code.
1456 *
1457 * @returns corresponding linux error code.
1458 * @param rc supdrv error code (SUPDRV_ERR_* defines).
1459 */
1460static int VBoxSupDrvErr2LinuxErr(int rc)
1461{
1462 switch (rc)
1463 {
1464 case 0: return 0;
1465 case SUPDRV_ERR_GENERAL_FAILURE: return -EACCES;
1466 case SUPDRV_ERR_INVALID_PARAM: return -EINVAL;
1467 case SUPDRV_ERR_INVALID_MAGIC: return -EILSEQ;
1468 case SUPDRV_ERR_INVALID_HANDLE: return -ENXIO;
1469 case SUPDRV_ERR_INVALID_POINTER: return -EFAULT;
1470 case SUPDRV_ERR_LOCK_FAILED: return -ENOLCK;
1471 case SUPDRV_ERR_ALREADY_LOADED: return -EEXIST;
1472 case SUPDRV_ERR_PERMISSION_DENIED: return -EPERM;
1473 case SUPDRV_ERR_VERSION_MISMATCH: return -ENOSYS;
1474 }
1475
1476 return -EPERM;
1477}
1478
1479
1480RTDECL(int) SUPR0Printf(const char *pszFormat, ...)
1481{
1482#if 1
1483 va_list args;
1484 char szMsg[512];
1485
1486 va_start(args, pszFormat);
1487 vsnprintf(szMsg, sizeof(szMsg) - 1, pszFormat, args);
1488 szMsg[sizeof(szMsg) - 1] = '\0';
1489 printk("%s", szMsg);
1490 va_end(args);
1491#else
1492 /* forward to printf - needs some more GCC hacking to fix ebp... */
1493 __asm__ __volatile__ ("mov %0, %esp\n\t"
1494 "jmp %1\n\t",
1495 :: "r" ((uintptr_t)&pszFormat - 4),
1496 "m" (printk));
1497#endif
1498 return 0;
1499}
1500
1501
1502/** Runtime assert implementation for Linux Ring-0. */
1503RTDECL(void) AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1504{
1505 printk("!!Assertion Failed!!\n"
1506 "Expression: %s\n"
1507 "Location : %s(%d) %s\n",
1508 pszExpr, pszFile, uLine, pszFunction);
1509}
1510
1511
1512/** Runtime assert implementation for Linux Ring-0. */
1513RTDECL(void) AssertMsg2(const char *pszFormat, ...)
1514{ /* forwarder. */
1515 va_list ap;
1516 char msg[256];
1517
1518 va_start(ap, pszFormat);
1519 vsnprintf(msg, sizeof(msg) - 1, pszFormat, ap);
1520 msg[sizeof(msg) - 1] = '\0';
1521 printk("%s", msg);
1522 va_end(ap);
1523}
1524
1525
1526/* GCC C++ hack. */
1527unsigned __gxx_personality_v0 = 0xcccccccc;
1528
1529
1530module_init(VBoxSupDrvInit);
1531module_exit(VBoxSupDrvUnload);
1532
1533MODULE_AUTHOR("InnoTek Systemberatung GmbH");
1534MODULE_DESCRIPTION("VirtualBox Support Driver");
1535MODULE_LICENSE("GPL");
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette