VirtualBox

source: vbox/trunk/src/VBox/Additions/os2/VBoxGuest/VBoxGuest.cpp@ 4056

Last change on this file since 4056 was 3657, checked in by vboxsync, 18 years ago

export

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.9 KB
Line 
1/** $Id: */
2/** @file
3 * VBoxGuest - Guest Additions Driver.
4 */
5
6/*
7 * Copyright (C) 2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VBoxDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_HGCM
48static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /** @todo implement this using RTR0MemObjReserveKernel() (it needs to be implemented everywhere too). */
67 return VINF_SUCCESS;
68}
69
70
71/**
72 * Initializes the interrupt filter mask.
73 *
74 * This will ASSUME that we're the ones in carge over the mask, so
75 * we'll simply clear all bits we don't set.
76 *
77 * @returns VBox status code (ignored).
78 * @param pDevExt The device extension.
79 * @param fMask The new mask.
80 */
81static int vboxGuestInitFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
82{
83 VMMDevCtlGuestFilterMask *pReq;
84 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
85 if (RT_SUCCESS(rc))
86 {
87 pReq->u32OrMask = fMask;
88 pReq->u32NotMask = ~fMask; /* It's an AND mask. */
89 rc = VbglGRPerform(&pReq->header);
90 if ( RT_FAILURE(rc)
91 || RT_FAILURE(pReq->header.rc))
92 LogRel(("vboxGuestInitCtlFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
93 rc, pReq->header.rc));
94 VbglGRFree(&pReq->header);
95 }
96 return rc;
97}
98
99
100/**
101 * Report guest information to the VMMDev.
102 *
103 * @returns VBox status code.
104 * @param pDevExt The device extension.
105 * @param enmOSType The OS type to report.
106 */
107static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
108{
109 VMMDevReportGuestInfo *pReq;
110 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
111 if (RT_SUCCESS(rc))
112 {
113 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
114 pReq->guestInfo.osType = enmOSType;
115 rc = VbglGRPerform(&pReq->header);
116 if ( RT_FAILURE(rc)
117 || RT_FAILURE(pReq->header.rc))
118 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
119 rc, pReq->header.rc));
120 VbglGRFree(&pReq->header);
121 }
122 return rc;
123}
124
125
126
127/**
128 * Maps the VMMDev memory.
129 *
130 * @returns VBox status code.
131 * @retval VERR_VERSION_MISMATCH The VMMDev memory didn't meet our expectations.
132 *
133 * @param pDevExt The device extension.
134 */
135static int vboxGuestInitMapMemory(PVBOXGUESTDEVEXT pDevExt)
136{
137 const RTCCPHYS PhysMMIOBase = pDevExt->PhysMMIOBase;
138
139 /*
140 * Create a physical memory object for it.
141 *
142 * Since we don't know the actual size (OS/2 doesn't at least), we make
143 * a qualified guess using the VMMDEV_RAM_SIZE.
144 */
145 size_t cb = RT_ALIGN_Z(VMMDEV_RAM_SIZE, PAGE_SIZE);
146 int rc = RTR0MemObjEnterPhys(&pDevExt->MemObjMMIO, PhysMMIOBase, cb);
147 if (RT_FAILURE(rc))
148 {
149 cb = _4K;
150 rc = RTR0MemObjEnterPhys(&pDevExt->MemObjMMIO, PhysMMIOBase, cb);
151 }
152 if (RT_FAILURE(rc))
153 {
154 Log(("vboxGuestInitMapMemory: RTR0MemObjEnterPhys(,%RCp,%zx) -> %Rrc\n",
155 PhysMMIOBase, cb, rc));
156 return rc;
157 }
158
159 /*
160 * Map the object into kernel space.
161 *
162 * We want a normal mapping with normal caching, which good in two ways. First
163 * since the API doesn't have any flags indicating how the mapping should be cached.
164 * And second, because PGM doesn't necessarily respect the cache/writethru bits
165 * anyway for normal RAM.
166 */
167 rc = RTR0MemObjMapKernel(&pDevExt->MemMapMMIO, pDevExt->MemObjMMIO, (void *)-1, 0,
168 RTMEM_PROT_READ | RTMEM_PROT_WRITE);
169 if (RT_SUCCESS(rc))
170 {
171 /*
172 * Validate the VMM memory.
173 */
174 VMMDevMemory *pVMMDev = (VMMDevMemory *)RTR0MemObjAddress(pDevExt->MemMapMMIO);
175 Assert(pVMMDev);
176 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
177 && pVMMDev->u32Size >= 32 /* just for checking sanity */)
178 {
179 /*
180 * Did we hit the the correct size? If not we'll have to
181 * redo the mapping using the correct size.
182 */
183 if (RT_ALIGN_32(pVMMDev->u32Size, PAGE_SIZE) == cb)
184 {
185 pDevExt->pVMMDevMemory = pVMMDev;
186 return VINF_SUCCESS;
187 }
188
189 Log(("vboxGuestInitMapMemory: Actual size %#RX32 (tried %#zx)\n", pVMMDev->u32Size, cb));
190 cb = RT_ALIGN_32(pVMMDev->u32Size, PAGE_SIZE);
191
192 rc = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc);
193 pDevExt->MemObjMMIO = pDevExt->MemMapMMIO = NIL_RTR0MEMOBJ;
194
195 rc = RTR0MemObjEnterPhys(&pDevExt->MemObjMMIO, PhysMMIOBase, cb);
196 if (RT_SUCCESS(rc))
197 {
198 rc = RTR0MemObjMapKernel(&pDevExt->MemMapMMIO, pDevExt->MemObjMMIO, (void *)-1, 0,
199 RTMEM_PROT_READ | RTMEM_PROT_WRITE);
200 if (RT_SUCCESS(rc))
201 {
202 pDevExt->pVMMDevMemory = (VMMDevMemory *)RTR0MemObjAddress(pDevExt->MemMapMMIO);
203 Assert(pDevExt->pVMMDevMemory);
204 return VINF_SUCCESS;
205 }
206
207 Log(("vboxGuestInitMapMemory: RTR0MemObjMapKernel [%RCp,%zx] -> %Rrc (2nd)\n",
208 PhysMMIOBase, cb, rc));
209 }
210 else
211 Log(("vboxGuestInitMapMemory: RTR0MemObjEnterPhys(,%RCp,%zx) -> %Rrc (2nd)\n",
212 PhysMMIOBase, cb, rc));
213 }
214 else
215 {
216 rc = VERR_VERSION_MISMATCH;
217 LogRel(("vboxGuestInitMapMemory: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32\n",
218 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size));
219 }
220
221 }
222 else
223 Log(("vboxGuestInitMapMemory: RTR0MemObjMapKernel [%RCp,%zx] -> %Rrc\n",
224 PhysMMIOBase, cb, rc));
225
226 int rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
227 return rc;
228}
229
230
231/**
232 * Initializes the VBoxGuest device extension when the
233 * device driver is loaded.
234 *
235 * The native code locates the VMMDev on the PCI bus and retrieve
236 * the MMIO and I/O port ranges, this function will take care of
237 * mapping the MMIO memory (if present). Upon successful return
238 * the native code should set up the interrupt handler.
239 *
240 * @returns VBox status code.
241 *
242 * @param pDevExt The device extension. Allocated by the native code.
243 * @param IOPortBase The base of the I/O port range.
244 * @param PhysMMIOBase The base of the MMIO memory range.
245 * This is optional, pass NIL_RTCCPHYS if not present.
246 * @param enmOSType The guest OS type to report to the VMMDev.
247 */
248int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase, RTCCPHYS PhysMMIOBase,
249 VBOXOSTYPE enmOSType)
250{
251 int rc, rc2;
252
253 /*
254 * Initalize the data.
255 */
256 pDevExt->PhysMMIOBase = PhysMMIOBase;
257 pDevExt->IOPortBase = IOPortBase;
258 pDevExt->MemObjMMIO = NIL_RTR0MEMOBJ;
259 pDevExt->pVMMDevMemory = NULL;
260 pDevExt->pIrqAckEvents = NULL;
261 pDevExt->WaitList.pHead = NULL;
262 pDevExt->WaitList.pTail = NULL;
263#ifdef VBOX_HGCM
264 pDevExt->HGCMWaitList.pHead = NULL;
265 pDevExt->HGCMWaitList.pTail = NULL;
266#endif
267 pDevExt->FreeList.pHead = NULL;
268 pDevExt->FreeList.pTail = NULL;
269 pDevExt->f32PendingEvents = 0;
270 pDevExt->u32ClipboardClientId = 0;
271
272 /*
273 * If there is an MMIO region map it into kernel memory.
274 */
275 if (PhysMMIOBase != NIL_RTCCPHYS)
276 {
277 AssertMsgReturn(PhysMMIOBase >= _1M, ("%RCp\n", PhysMMIOBase), VERR_INTERNAL_ERROR);
278 rc = vboxGuestInitMapMemory(pDevExt);
279 if (RT_SUCCESS(rc))
280 Log(("VBoxGuestInitDevExt: VMMDevMemory: phys=%RCp mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
281 PhysMMIOBase, pDevExt->pVMMDevMemory, pDevExt->pVMMDevMemory->u32Size,
282 RT_ALIGN_32(pDevExt->pVMMDevMemory->u32Size, PAGE_SIZE), pDevExt->pVMMDevMemory->u32Version));
283 else if (rc == VERR_VERSION_MISMATCH)
284 Assert(!pDevExt->pVMMDevMemory); /* We can live without it (I think). */
285 else
286 return rc;
287 }
288
289 /*
290 * Create the wait and seesion spinlocks.
291 */
292 rc = RTSpinlockCreate(&pDevExt->WaitSpinlock);
293 if (RT_SUCCESS(rc))
294 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
295 if (RT_FAILURE(rc))
296 {
297 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
298 if (pDevExt->WaitSpinlock != NIL_RTSPINLOCK)
299 RTSpinlockDestroy(pDevExt->WaitSpinlock);
300 rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
301 return rc;
302 }
303
304 /*
305 * Initialize the guest library and report the guest info back to VMMDev,
306 * set the interrupt control filter mask, and fixate the guest mappings
307 * made by the VMM.
308 */
309 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
310 if (RT_SUCCESS(rc))
311 {
312 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
313 if (RT_SUCCESS(rc))
314 {
315 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
316 if (RT_SUCCESS(rc))
317 {
318#ifdef VBOX_HGCM
319 rc = vboxGuestInitFilterMask(pDevExt, VMMDEV_EVENT_HGCM);
320#else
321 rc = vboxGuestInitFilterMask(pDevExt, 0);
322#endif
323 if (RT_SUCCESS(rc))
324 {
325 vboxGuestInitFixateGuestMappings(pDevExt);
326 Log(("VBoxGuestInitDevExt: returns success\n"));
327 return VINF_SUCCESS;
328 }
329 }
330
331 /* failure cleanup */
332 }
333 else
334 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
335
336 VbglTerminate();
337 }
338 else
339 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
340
341 rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
342 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
343 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
344 return rc; /* (failed) */
345}
346
347
348/**
349 * Deletes all the items in a wait chain.
350 * @param pWait The head of the chain.
351 */
352static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
353{
354 while (pList->pHead)
355 {
356 PVBOXGUESTWAIT pWait = pList->pHead;
357 pList->pHead = pWait->pNext;
358
359 pWait->pNext = NULL;
360 pWait->pPrev = NULL;
361 int rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
362 pWait->Event = NIL_RTSEMEVENTMULTI;
363 RTMemFree(pWait);
364 }
365 pList->pHead = NULL;
366 pList->pTail = NULL;
367}
368
369
370/**
371 * Destroys the VBoxGuest device extension.
372 *
373 * The native code should call this before the driver is loaded,
374 * but don't call this on shutdown.
375 *
376 * @param pDevExt The device extension.
377 */
378void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
379{
380 int rc2;
381 Log(("VBoxGuestDeleteDevExt:\n"));
382
383 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
384
385 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
386#ifdef VBOX_HGCM
387 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
388#endif
389 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
390
391 VbglTerminate();
392
393 rc2 = RTR0MemObjFree(pDevExt->MemObjMMIO, true); AssertRC(rc2);
394 pDevExt->MemObjMMIO = pDevExt->MemMapMMIO = NIL_RTR0MEMOBJ;
395 pDevExt->pVMMDevMemory = NULL;
396
397 pDevExt->IOPortBase = 0;
398 pDevExt->pIrqAckEvents = NULL;
399}
400
401
402/**
403 * Creates a VBoxGuest user session.
404 *
405 * The native code calls this when a ring-3 client opens the device.
406 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
407 *
408 * @returns VBox status code.
409 * @param pDevExt The device extension.
410 * @param ppSession Where to store the session on success.
411 */
412int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
413{
414 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
415 if (RT_UNLIKELY(!pSession))
416 {
417 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
418 return VERR_NO_MEMORY;
419 }
420
421 pSession->Process = RTProcSelf();
422 pSession->R0Process = RTR0ProcHandleSelf();
423 pSession->pDevExt = pDevExt;
424
425 *ppSession = pSession;
426 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
427 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
428 return VINF_SUCCESS;
429}
430
431
432/**
433 * Creates a VBoxGuest kernel session.
434 *
435 * The native code calls this when a ring-0 client connects to the device.
436 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
437 *
438 * @returns VBox status code.
439 * @param pDevExt The device extension.
440 * @param ppSession Where to store the session on success.
441 */
442int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
443{
444 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
445 if (RT_UNLIKELY(!pSession))
446 {
447 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
448 return VERR_NO_MEMORY;
449 }
450
451 pSession->Process = NIL_RTPROCESS;
452 pSession->R0Process = NIL_RTR0PROCESS;
453 pSession->pDevExt = pDevExt;
454
455 *ppSession = pSession;
456 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
457 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
458 return VINF_SUCCESS;
459}
460
461
462
463/**
464 * Closes a VBoxGuest session.
465 *
466 * @param pDevExt The device extension.
467 * @param pSession The session to close (and free).
468 */
469void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
470{
471 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
472 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
473
474#ifdef VBOX_HGCM
475 for (unsigned i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
476 if (pSession->aHGCMClientIds[i])
477 {
478 VBoxGuestHGCMDisconnectInfo Info;
479 Info.result = 0;
480 Info.u32ClientID = pSession->aHGCMClientIds[i];
481 pSession->aHGCMClientIds[i] = 0;
482 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
483 VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
484 }
485#endif
486
487 pSession->pDevExt = NULL;
488 pSession->Process = NIL_RTPROCESS;
489 pSession->R0Process = NIL_RTR0PROCESS;
490 RTMemFree(pSession);
491}
492
493
494/**
495 * Links the wait-for-event entry into the tail of the given list.
496 *
497 * @param pList The list to link it into.
498 * @param pWait The wait for event entry to append.
499 */
500DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
501{
502 const PVBOXGUESTWAIT pTail = pList->pTail;
503 pWait->pNext = NULL;
504 pWait->pPrev = pTail;
505 if (pTail)
506 pTail->pNext = pWait;
507 else
508 pList->pHead = pWait;
509 pList->pTail = pWait;
510}
511
512
513/**
514 * Unlinks the wait-for-event entry.
515 *
516 * @param pList The list to unlink it from.
517 * @param pWait The wait for event entry to unlink.
518 */
519DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
520{
521 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
522 const PVBOXGUESTWAIT pNext = pWait->pNext;
523 if (pNext)
524 pNext->pPrev = pPrev;
525 else
526 pList->pTail = pPrev;
527 if (pPrev)
528 pPrev->pNext = pNext;
529 else
530 pList->pHead = pNext;
531}
532
533
534/**
535 * Allocates a wiat-for-event entry.
536 *
537 * @returns The wait-for-event entry.
538 * @param pDevExt The device extension.
539 */
540static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt)
541{
542 /*
543 * Allocate it one way or the other.
544 */
545 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
546 if (pWait)
547 {
548 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
549 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
550
551 pWait = pDevExt->FreeList.pTail;
552 if (pWait)
553 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
554
555 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
556 }
557 if (!pWait)
558 {
559 static unsigned s_cErrors = 0;
560
561 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
562 if (!pWait)
563 {
564 if (s_cErrors++ < 32)
565 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
566 return NULL;
567 }
568
569 int rc = RTSemEventMultiCreate(&pWait->Event);
570 if (RT_FAILURE(rc))
571 {
572 if (s_cErrors++ < 32)
573 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
574 RTMemFree(pWait);
575 return NULL;
576 }
577 }
578
579 /*
580 * Zero members just as an precaution.
581 */
582 pWait->pNext = NULL;
583 pWait->pPrev = NULL;
584 pWait->fReqEvents = 0;
585 pWait->fResEvents = 0;
586#ifdef VBOX_HGCM
587 pWait->pHGCMReq = NULL;
588#endif
589 RTSemEventMultiReset(pWait->Event);
590 return pWait;
591}
592
593
594/**
595 * Frees the wait-for-event entry.
596 * The caller must own the wait spinlock!
597 *
598 * @param pDevExt The device extension.
599 * @param pWait The wait-for-event entry to free.
600 */
601static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
602{
603 pWait->fReqEvents = 0;
604 pWait->fResEvents = 0;
605#ifdef VBOX_HGCM
606 pWait->pHGCMReq = NULL;
607#endif
608 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
609}
610
611
612/**
613 * Frees the wait-for-event entry.
614 *
615 * @param pDevExt The device extension.
616 * @param pWait The wait-for-event entry to free.
617 */
618static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
619{
620 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
621 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
622 VBoxGuestWaitFreeLocked(pDevExt, pWait);
623 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
624}
625
626
627/**
628 * Implements the fast (no input or output) type of IOCtls.
629 *
630 * This is currently just a placeholder stub inherited from the support driver code.
631 *
632 * @returns VBox status code.
633 * @param iFunction The IOCtl function number.
634 * @param pDevExt The device extension.
635 * @param pSession The session.
636 */
637int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
638{
639 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
640
641 return VERR_NOT_SUPPORTED;
642}
643
644
645
646static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
647{
648 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
649 pInfo->portAddress = pDevExt->IOPortBase;
650 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
651 if (pcbDataReturned)
652 *pcbDataReturned = sizeof(*pInfo);
653 return VINF_SUCCESS;
654}
655
656
657/**
658 * Worker VBoxGuestCommonIOCtl_WaitEvent.
659 * The caller enters the spinlock, we may or may not leave it.
660 *
661 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
662 */
663DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
664 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
665{
666 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
667 if (fMatches)
668 {
669 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
670 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, pTmp);
671
672 pInfo->u32EventFlagsOut = fMatches;
673 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
674 if (fReqEvents & ~((uint32_t)1 << iEvent))
675 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
676 else
677 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
678 return VINF_SUCCESS;
679 }
680 return VERR_TIMEOUT;
681}
682
683
684static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned,
685 bool fInterruptible)
686{
687 pInfo->u32EventFlagsOut = 0;
688 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
689 if (pcbDataReturned)
690 *pcbDataReturned = sizeof(*pInfo);
691
692 /*
693 * Copy and verify the input mask.
694 */
695 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
696 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
697 if (RT_UNLIKELY(iEvent < 0))
698 {
699 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
700 return VERR_INVALID_PARAMETER;
701 }
702
703 /*
704 * Check the condition up front, before doing the wait-for-event allocations.
705 */
706 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
707 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
708 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
709 if (rc == VINF_SUCCESS)
710 return rc;
711 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
712
713 if (!pInfo->u32TimeoutIn)
714 {
715 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
716 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VINF_TIMEOUT\n"));
717 return VERR_TIMEOUT;
718 }
719
720 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt);
721 if (!pWait)
722 return VERR_NO_MEMORY;
723 pWait->fReqEvents = fReqEvents;
724
725 /*
726 * We've got the wait entry now, re-enter the spinlock and check for the condition.
727 * If the wait condition is met, return.
728 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
729 */
730 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
731 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
732 if (rc == VINF_SUCCESS)
733 {
734 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
735 return rc;
736 }
737 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
738 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
739
740 if (fInterruptible)
741 rc = RTSemEventMultiWaitNoResume(pWait->Event,
742 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
743 else
744 rc = RTSemEventMultiWait(pWait->Event,
745 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
746
747 /*
748 * There is one special case here and that's when the semaphore is
749 * destroyed upon device driver unload. This shouldn't happen of course,
750 * but in case it does, just get out of here ASAP.
751 */
752 if (rc == VERR_SEM_DESTROYED)
753 return rc;
754
755 /*
756 * Unlink the wait item and dispose of it.
757 */
758 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
759 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
760 const uint32_t fResEvents = pWait->fResEvents;
761 VBoxGuestWaitFreeLocked(pDevExt, pWait);
762 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
763
764 /*
765 * Now deal with the return code.
766 */
767 if (fResEvents)
768 {
769 pInfo->u32EventFlagsOut = fResEvents;
770 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
771 if (fReqEvents & ~((uint32_t)1 << iEvent))
772 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
773 else
774 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
775 rc = VINF_SUCCESS;
776 }
777 else if (rc == VERR_TIMEOUT)
778 {
779 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
780 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VINF_TIMEOUT\n"));
781 }
782 else if (rc == VERR_INTERRUPTED)
783 {
784 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
785 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
786 }
787 else
788 {
789 if (RT_SUCCESS(rc))
790 {
791 static unsigned s_cErrors = 0;
792 if (s_cErrors++ < 32)
793 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
794 rc = VERR_INTERNAL_ERROR;
795 }
796 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
797 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
798 }
799
800 return rc;
801}
802
803
804static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, VMMDevRequestHeader *pReqHdr,
805 size_t cbData, size_t *pcbDataReturned)
806{
807 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
808
809 /*
810 * Validate the header and request size.
811 */
812 const uint32_t cbReq = pReqHdr->size;
813 const uint32_t cbMinSize = vmmdevGetRequestSize(pReqHdr->requestType);
814 if (cbReq < cbMinSize)
815 {
816 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
817 cbReq, cbMinSize, pReqHdr->requestType));
818 return VERR_INVALID_PARAMETER;
819 }
820 if (cbReq > cbData)
821 {
822 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
823 cbData, cbReq, pReqHdr->requestType));
824 return VERR_INVALID_PARAMETER;
825 }
826
827 /*
828 * Make a copy of the request in the physical memory heap so
829 * the VBoxGuestLibrary can more easily deal with the request.
830 * (This is really a waste of time since the OS or the OS specific
831 * code has already buffered or locked the input/output buffer, but
832 * it does makes things a bit simpler wrt to phys address.)
833 */
834 VMMDevRequestHeader *pReqCopy;
835 int rc = VbglGRAlloc(&pReqCopy, cbReq, pReqHdr->requestType);
836 if (RT_FAILURE(rc))
837 {
838 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
839 cbReq, cbReq, rc));
840 return rc;
841 }
842
843 memcpy(pReqCopy, pReqHdr, cbReq);
844 rc = VbglGRPerform(pReqCopy);
845 if ( RT_SUCCESS(rc)
846 && RT_SUCCESS(pReqCopy->rc))
847 {
848 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
849 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
850
851 memcpy(pReqHdr, pReqCopy, cbReq);
852 if (pcbDataReturned)
853 *pcbDataReturned = cbReq;
854 }
855 else if (RT_FAILURE(rc))
856 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
857 else
858 {
859 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
860 rc = pReqCopy->rc;
861 }
862
863 VbglGRFree(pReqCopy);
864 return rc;
865}
866
867
868static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
869{
870 VMMDevCtlGuestFilterMask *pReq;
871 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
872 if (RT_FAILURE(rc))
873 {
874 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
875 sizeof(*pReq), sizeof(*pReq), rc));
876 return rc;
877 }
878
879 pReq->u32OrMask = pInfo->u32OrMask;
880 pReq->u32NotMask = pInfo->u32NotMask;
881
882 rc = VbglGRPerform(&pReq->header);
883 if (RT_FAILURE(rc))
884 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
885 else if (RT_FAILURE(pReq->header.rc))
886 {
887 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
888 rc = pReq->header.rc;
889 }
890
891 VbglGRFree(&pReq->header);
892 return rc;
893}
894
895
896#ifdef VBOX_HGCM
897
898/**
899 * This is a callback for dealing with async waits.
900 *
901 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
902 */
903static DECLCALLBACK(void)
904VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User)
905{
906 VMMDevHGCMRequestHeader volatile *pHdr = (VMMDevHGCMRequestHeader volatile *)pHdrNonVolatile;
907 const bool fInterruptible = (bool)u32User;
908 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
909 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
910
911 /*
912 * Check to see if the condition was met by the time we got here.
913 *
914 * We create a simple poll loop here for dealing with out-of-memory
915 * conditions since the caller isn't necessarily able to deal with
916 * us returning too early.
917 */
918 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
919 PVBOXGUESTWAIT pWait;
920 for (;;)
921 {
922 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
923 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
924 {
925 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
926 return;
927 }
928 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
929
930 pWait = VBoxGuestWaitAlloc(pDevExt);
931 if (pWait)
932 break;
933 if (fInterruptible)
934 return;
935 RTThreadSleep(1);
936 }
937 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
938 pWait->pHGCMReq = pHdr;
939
940 /*
941 * Re-enter the spinlock and re-check for the condition.
942 * If the condition is met, return.
943 * Otherwise link us into the HGCM wait list and go to sleep.
944 */
945 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
946 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
947 {
948 VBoxGuestWaitFreeLocked(pDevExt, pWait);
949 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
950 return;
951 }
952 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
953 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
954
955 int rc;
956 if (fInterruptible)
957 rc = RTSemEventMultiWaitNoResume(pWait->Event, RT_INDEFINITE_WAIT);
958 else
959 rc = RTSemEventMultiWait(pWait->Event, RT_INDEFINITE_WAIT);
960
961 /*
962 * This shouldn't ever return failure...
963 * Unlink, free and return.
964 */
965 if (rc == VERR_SEM_DESTROYED)
966 return;
967 if (RT_FAILURE(rc))
968 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
969
970 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
971 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
972 VBoxGuestWaitFreeLocked(pDevExt, pWait);
973 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
974}
975
976
977static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
978 size_t *pcbDataReturned)
979{
980 /*
981 * The VbglHGCMConnect call will invoke the callback if the HGCM
982 * call is performed in an ASYNC fashion. The function is not able
983 * to deal with cancelled requests.
984 */
985 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
986 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
987 ? pInfo->Loc.u.host.achName : "<not local host>"));
988
989 int rc = VbglHGCMConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
990 if (RT_SUCCESS(rc))
991 {
992 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
993 pInfo->u32ClientID, pInfo->result, rc));
994 if (RT_SUCCESS(pInfo->result))
995 {
996 /*
997 * Append the client id to the client id table.
998 * If the table has somehow become filled up, we'll disconnect the session.
999 */
1000 unsigned i;
1001 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1002 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1003 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1004 if (!pSession->aHGCMClientIds[i])
1005 {
1006 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1007 break;
1008 }
1009 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1010 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1011 {
1012 static unsigned s_cErrors = 0;
1013 if (s_cErrors++ < 32)
1014 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1015
1016 VBoxGuestHGCMDisconnectInfo Info;
1017 Info.result = 0;
1018 Info.u32ClientID = pInfo->u32ClientID;
1019 VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1020 return VERR_TOO_MANY_OPEN_FILES;
1021 }
1022 }
1023 if (pcbDataReturned)
1024 *pcbDataReturned = sizeof(*pInfo);
1025 }
1026 return rc;
1027}
1028
1029
1030static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1031 size_t *pcbDataReturned)
1032{
1033 /*
1034 * Validate the client id and invalidate its entry while we're in the call.
1035 */
1036 const uint32_t u32ClientId = pInfo->u32ClientID;
1037 unsigned i;
1038 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1039 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1040 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1041 if (pSession->aHGCMClientIds[i] == u32ClientId)
1042 {
1043 pSession->aHGCMClientIds[i] = UINT32_MAX;
1044 break;
1045 }
1046 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1047 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1048 {
1049 static unsigned s_cErrors = 0;
1050 if (s_cErrors++ > 32)
1051 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1052 return VERR_INVALID_HANDLE;
1053 }
1054
1055 /*
1056 * The VbglHGCMConnect call will invoke the callback if the HGCM
1057 * call is performed in an ASYNC fashion. The function is not able
1058 * to deal with cancelled requests.
1059 */
1060 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1061 int rc = VbglHGCMDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1062 if (RT_SUCCESS(rc))
1063 {
1064 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1065 if (pcbDataReturned)
1066 *pcbDataReturned = sizeof(*pInfo);
1067 }
1068
1069 /* Update the client id array according to the result. */
1070 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1071 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1072 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1073 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1074
1075 return rc;
1076}
1077
1078
1079static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfo *pInfo,
1080 size_t cbData, size_t *pcbDataReturned)
1081{
1082 /*
1083 * Some more validations.
1084 */
1085 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1086 {
1087 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1088 return VERR_INVALID_PARAMETER;
1089 }
1090 const size_t cbActual = sizeof(*pInfo) + pInfo->cParms * sizeof(HGCMFunctionParameter);
1091 if (cbData < cbActual)
1092 {
1093 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1094 cbData, cbActual));
1095 return VERR_INVALID_PARAMETER;
1096 }
1097
1098 /*
1099 * Validate the client id.
1100 */
1101 const uint32_t u32ClientId = pInfo->u32ClientID;
1102 unsigned i;
1103 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1104 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1105 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1106 if (pSession->aHGCMClientIds[i] == u32ClientId)
1107 break;
1108 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1109 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1110 {
1111 static unsigned s_cErrors = 0;
1112 if (s_cErrors++ > 32)
1113 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", u32ClientId));
1114 return VERR_INVALID_HANDLE;
1115 }
1116
1117 /*
1118 * The VbglHGCMCall call will invoke the callback if the HGCM
1119 * call is performed in an ASYNC fashion. This function can
1120 * deal with cancelled requests, so we let user more requests
1121 * be interruptible (should add a flag for this later I guess).
1122 */
1123 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1124 int rc = VbglHGCMCall(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, pSession->R0Process != NIL_RTR0PROCESS);
1125 if (RT_SUCCESS(rc))
1126 {
1127 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1128 if (pcbDataReturned)
1129 *pcbDataReturned = cbActual;
1130 }
1131 return rc;
1132}
1133
1134
1135/**
1136 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1137 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1138 */
1139static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1140{
1141 int rc;
1142 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1143
1144
1145 /*
1146 * If there is an old client, try disconnect it first.
1147 */
1148 if (pDevExt->u32ClipboardClientId != 0)
1149 {
1150 VBoxGuestHGCMDisconnectInfo Info;
1151 Info.result = (uint32_t)VERR_WRONG_ORDER; /** @todo Vitali, why is this member unsigned? */
1152 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1153 rc = VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, 0);
1154 if (RT_SUCCESS(rc))
1155 {
1156 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1157 return rc;
1158 }
1159 if (RT_FAILURE((int32_t)Info.result))
1160 {
1161 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1162 return Info.result;
1163 }
1164 pDevExt->u32ClipboardClientId = 0;
1165 }
1166
1167 /*
1168 * Try connect.
1169 */
1170 VBoxGuestHGCMConnectInfo Info;
1171 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1172 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1173 Info.u32ClientID = 0;
1174 Info.result = (uint32_t)VERR_WRONG_ORDER;
1175
1176 rc = VbglHGCMConnect(&Info,VBoxGuestHGCMAsyncWaitCallback, pDevExt, 0);
1177 if (RT_FAILURE(rc))
1178 {
1179 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1180 return rc;
1181 }
1182 if (RT_FAILURE((int32_t)Info.result))
1183 {
1184 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1185 return rc;
1186 }
1187
1188 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1189
1190 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1191 *pu32ClientId = Info.u32ClientID;
1192 if (pcbDataReturned)
1193 *pcbDataReturned = sizeof(uint32_t);
1194
1195 return VINF_SUCCESS;
1196}
1197
1198#endif /* VBOX_HGCM */
1199
1200
1201/**
1202 * Common IOCtl for user to kernel and kernel to kernel communcation.
1203 *
1204 * This function only does the basic validation and then invokes
1205 * worker functions that takes care of each specific function.
1206 *
1207 * @returns VBox status code.
1208 *
1209 * @param iFunction The requested function.
1210 * @param pDevExt The device extension.
1211 * @param pSession The client session.
1212 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1213 * @param cbData The max size of the data buffer.
1214 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1215 */
1216int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1217 void *pvData, size_t cbData, size_t *pcbDataReturned)
1218{
1219 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1220 iFunction, pDevExt, pSession, pvData, cbData));
1221
1222 /*
1223 * Define some helper macros to simplify validation.
1224 */
1225#define CHECKRET_RING0(mnemonic) \
1226 do { \
1227 if (pSession->R0Process != NIL_RTR0PROCESS) \
1228 { \
1229 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1230 pSession->Process, (uintptr_t)pSession->R0Process)); \
1231 return VERR_PERMISSION_DENIED; \
1232 } \
1233 } while (0)
1234#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1235 do { \
1236 if (cbData < (cbMin)) \
1237 { \
1238 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1239 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1240 return VERR_BUFFER_OVERFLOW; \
1241 } \
1242 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1243 { \
1244 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1245 return VERR_INVALID_POINTER; \
1246 } \
1247 } while (0)
1248
1249
1250 /*
1251 * Deal with variably sized requests first.
1252 */
1253 int rc = VINF_SUCCESS;
1254 if ( iFunction >= VBOXGUEST_IOCTL_VMMREQUEST(0)
1255 && iFunction <= VBOXGUEST_IOCTL_VMMREQUEST(0xfff)) /** @todo find a better way to do this*/
1256 {
1257 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1258 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1259 }
1260#ifdef VBOX_HGCM
1261 /*
1262 * This one is tricky and can be done later.
1263 */
1264 else if ( iFunction >= VBOXGUEST_IOCTL_HGCM_CALL(0)
1265 && iFunction <= VBOXGUEST_IOCTL_HGCM_CALL(0xfff))
1266 {
1267 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1268 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, cbData, pcbDataReturned);
1269 }
1270#endif /* VBOX_HGCM */
1271 else
1272 {
1273 switch (iFunction)
1274 {
1275 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1276 CHECKRET_RING0("GETVMMDEVPORT");
1277 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1278 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1279 break;
1280
1281 case VBOXGUEST_IOCTL_WAITEVENT:
1282 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1283 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, (VBoxGuestWaitEventInfo *)pvData, pcbDataReturned,
1284 pSession->R0Process != NIL_RTR0PROCESS);
1285 break;
1286
1287 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1288 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1289 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1290 break;
1291
1292#ifdef VBOX_HGCM
1293 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1294 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1295 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1296 break;
1297
1298 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1299 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1300 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1301 break;
1302
1303
1304 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1305 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1306 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1307 break;
1308#endif /* VBOX_HGCM */
1309
1310 default:
1311 {
1312 Log(("VBoxGuestCommonIOCtl: Unkown request %#x\n", iFunction));
1313 rc = VERR_NOT_SUPPORTED;
1314 break;
1315 }
1316 }
1317 }
1318
1319 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1320 return rc;
1321}
1322
1323
1324
1325/**
1326 * Common interrupt service routine.
1327 *
1328 * This deals with events and with waking up thread waiting for those events.
1329 *
1330 * @returns true if it was our interrupt, false if it wasn't.
1331 * @param pDevExt The VBoxGuest device extension.
1332 */
1333bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1334{
1335 /*
1336 * Now we have to find out whether it was our IRQ. Read the event mask
1337 * from our device to see if there are any pending events.
1338 */
1339 bool fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1340 if (fOurIrq)
1341 {
1342 /* Acknowlegde events. */
1343 VMMDevEvents *pReq = pDevExt->pIrqAckEvents;
1344 int rc = VbglGRPerform(&pReq->header);
1345 if ( RT_SUCCESS(rc)
1346 && RT_SUCCESS(pReq->header.rc))
1347 {
1348 uint32_t fEvents = pReq->events;
1349 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1350
1351 /*
1352 * Enter the spinlock and examin the waiting threads.
1353 */
1354 int rc2 = 0;
1355 PVBOXGUESTWAIT pWait;
1356 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1357 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
1358
1359#ifdef VBOX_HGCM
1360 /* The HGCM event/list is kind of different in that we evaluate all entries. */
1361 if (fEvents & VMMDEV_EVENT_HGCM)
1362 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1363 if ( !pWait->fResEvents
1364 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1365 {
1366 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1367 rc2 |= RTSemEventMultiSignal(pWait->Event);
1368 }
1369#endif
1370
1371 /* Normal FIFO evaluation. */
1372 fEvents |= pDevExt->f32PendingEvents;
1373 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1374 if (!pWait->fResEvents)
1375 {
1376 pWait->fResEvents = pWait->fReqEvents & fEvents;
1377 fEvents &= ~pWait->fResEvents;
1378 rc2 |= RTSemEventMultiSignal(pWait->Event);
1379 if (!fEvents)
1380 break;
1381 }
1382
1383 ASMAtomicXchgU32(&pDevExt->f32PendingEvents, fEvents);
1384 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
1385 Assert(rc2 == 0);
1386 }
1387 else /* something is serious wrong... */
1388 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d, header rc=%d (events=%#x)!!\n",
1389 rc, pReq->header.rc, pReq->events));
1390 }
1391 else
1392 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1393
1394 return fOurIrq;
1395}
1396
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette