VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 32877

Last change on this file since 32877 was 32685, checked in by vboxsync, 14 years ago

Logging distinction.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 85.1 KB
Line 
1/* $Id: VBoxGuest.cpp 32685 2010-09-22 08:34:19Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43#ifdef RT_OS_WINDOWS
44# ifndef CTL_CODE
45# include <Windows.h>
46# endif
47#endif
48
49
50/*******************************************************************************
51* Internal Functions *
52*******************************************************************************/
53#ifdef VBOX_WITH_HGCM
54static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
55#endif
56
57
58/*******************************************************************************
59* Global Variables *
60*******************************************************************************/
61static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
62
63
64
65/**
66 * Reserves memory in which the VMM can relocate any guest mappings
67 * that are floating around.
68 *
69 * This operation is a little bit tricky since the VMM might not accept
70 * just any address because of address clashes between the three contexts
71 * it operates in, so use a small stack to perform this operation.
72 *
73 * @returns VBox status code (ignored).
74 * @param pDevExt The device extension.
75 */
76static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
77{
78 /*
79 * Query the required space.
80 */
81 VMMDevReqHypervisorInfo *pReq;
82 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
83 if (RT_FAILURE(rc))
84 return rc;
85 pReq->hypervisorStart = 0;
86 pReq->hypervisorSize = 0;
87 rc = VbglGRPerform(&pReq->header);
88 if (RT_FAILURE(rc)) /* this shouldn't happen! */
89 {
90 VbglGRFree(&pReq->header);
91 return rc;
92 }
93
94 /*
95 * The VMM will report back if there is nothing it wants to map, like for
96 * instance in VT-x and AMD-V mode.
97 */
98 if (pReq->hypervisorSize == 0)
99 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
100 else
101 {
102 /*
103 * We have to try several times since the host can be picky
104 * about certain addresses.
105 */
106 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
107 uint32_t cbHypervisor = pReq->hypervisorSize;
108 RTR0MEMOBJ ahTries[5];
109 uint32_t iTry;
110 bool fBitched = false;
111 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
112 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
113 {
114 /*
115 * Reserve space, or if that isn't supported, create a object for
116 * some fictive physical memory and map that in to kernel space.
117 *
118 * To make the code a bit uglier, most systems cannot help with
119 * 4MB alignment, so we have to deal with that in addition to
120 * having two ways of getting the memory.
121 */
122 uint32_t uAlignment = _4M;
123 RTR0MEMOBJ hObj;
124 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
125 if (rc == VERR_NOT_SUPPORTED)
126 {
127 uAlignment = PAGE_SIZE;
128 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
129 }
130 /*
131 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
132 * not implemented at all at the current platform, try to map the memory object into the
133 * virtual kernel space.
134 */
135 if (rc == VERR_NOT_SUPPORTED)
136 {
137 if (hFictive == NIL_RTR0MEMOBJ)
138 {
139 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
140 if (RT_FAILURE(rc))
141 break;
142 hFictive = hObj;
143 }
144 uAlignment = _4M;
145 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
146 if (rc == VERR_NOT_SUPPORTED)
147 {
148 uAlignment = PAGE_SIZE;
149 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
150 }
151 }
152 if (RT_FAILURE(rc))
153 {
154 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
155 rc, cbHypervisor, uAlignment, iTry));
156 fBitched = true;
157 break;
158 }
159
160 /*
161 * Try set it.
162 */
163 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
164 pReq->header.rc = VERR_INTERNAL_ERROR;
165 pReq->hypervisorSize = cbHypervisor;
166 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
167 if ( uAlignment == PAGE_SIZE
168 && pReq->hypervisorStart & (_4M - 1))
169 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
170 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
171
172 rc = VbglGRPerform(&pReq->header);
173 if (RT_SUCCESS(rc))
174 {
175 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
176 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
177 RTR0MemObjAddress(pDevExt->hGuestMappings),
178 RTR0MemObjSize(pDevExt->hGuestMappings),
179 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
180 break;
181 }
182 ahTries[iTry] = hObj;
183 }
184
185 /*
186 * Cleanup failed attempts.
187 */
188 while (iTry-- > 0)
189 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
190 if ( RT_FAILURE(rc)
191 && hFictive != NIL_RTR0PTR)
192 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
193 if (RT_FAILURE(rc) && !fBitched)
194 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
195 }
196 VbglGRFree(&pReq->header);
197
198 /*
199 * We ignore failed attempts for now.
200 */
201 return VINF_SUCCESS;
202}
203
204
205/**
206 * Undo what vboxGuestInitFixateGuestMappings did.
207 *
208 * @param pDevExt The device extension.
209 */
210static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
211{
212 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
213 {
214 /*
215 * Tell the host that we're going to free the memory we reserved for
216 * it, the free it up. (Leak the memory if anything goes wrong here.)
217 */
218 VMMDevReqHypervisorInfo *pReq;
219 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
220 if (RT_SUCCESS(rc))
221 {
222 pReq->hypervisorStart = 0;
223 pReq->hypervisorSize = 0;
224 rc = VbglGRPerform(&pReq->header);
225 VbglGRFree(&pReq->header);
226 }
227 if (RT_SUCCESS(rc))
228 {
229 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
230 AssertRC(rc);
231 }
232 else
233 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
234
235 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
236 }
237}
238
239
240/**
241 * Sets the interrupt filter mask during initialization and termination.
242 *
243 * This will ASSUME that we're the ones in carge over the mask, so
244 * we'll simply clear all bits we don't set.
245 *
246 * @returns VBox status code (ignored).
247 * @param pDevExt The device extension.
248 * @param fMask The new mask.
249 */
250static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
251{
252 VMMDevCtlGuestFilterMask *pReq;
253 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
254 if (RT_SUCCESS(rc))
255 {
256 pReq->u32OrMask = fMask;
257 pReq->u32NotMask = ~fMask;
258 rc = VbglGRPerform(&pReq->header);
259 if (RT_FAILURE(rc))
260 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
261 VbglGRFree(&pReq->header);
262 }
263 return rc;
264}
265
266
267/**
268 * Inflate the balloon by one chunk represented by an R0 memory object.
269 *
270 * The caller owns the balloon mutex.
271 *
272 * @returns IPRT status code.
273 * @param pMemObj Pointer to the R0 memory object.
274 * @param pReq The pre-allocated request for performing the VMMDev call.
275 */
276static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
277{
278 uint32_t iPage;
279 int rc;
280
281 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
282 {
283 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
284 pReq->aPhysPage[iPage] = phys;
285 }
286
287 pReq->fInflate = true;
288 pReq->header.size = cbChangeMemBalloonReq;
289 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
290
291 rc = VbglGRPerform(&pReq->header);
292 if (RT_FAILURE(rc))
293 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
294 return rc;
295}
296
297
298/**
299 * Deflate the balloon by one chunk - info the host and free the memory object.
300 *
301 * The caller owns the balloon mutex.
302 *
303 * @returns IPRT status code.
304 * @param pMemObj Pointer to the R0 memory object.
305 * The memory object will be freed afterwards.
306 * @param pReq The pre-allocated request for performing the VMMDev call.
307 */
308static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
309{
310 uint32_t iPage;
311 int rc;
312
313 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
314 {
315 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
316 pReq->aPhysPage[iPage] = phys;
317 }
318
319 pReq->fInflate = false;
320 pReq->header.size = cbChangeMemBalloonReq;
321 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
322
323 rc = VbglGRPerform(&pReq->header);
324 if (RT_FAILURE(rc))
325 {
326 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
327 return rc;
328 }
329
330 rc = RTR0MemObjFree(*pMemObj, true);
331 if (RT_FAILURE(rc))
332 {
333 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
334 return rc;
335 }
336
337 *pMemObj = NIL_RTR0MEMOBJ;
338 return VINF_SUCCESS;
339}
340
341
342/**
343 * Inflate/deflate the memory balloon and notify the host.
344 *
345 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
346 * the mutex.
347 *
348 * @returns VBox status code.
349 * @param pDevExt The device extension.
350 * @param pSession The session.
351 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
352 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
353 * (VINF_SUCCESS if set).
354 */
355static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
356{
357 int rc = VINF_SUCCESS;
358
359 if (pDevExt->MemBalloon.fUseKernelAPI)
360 {
361 VMMDevChangeMemBalloon *pReq;
362 uint32_t i;
363
364 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
365 {
366 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
367 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
368 return VERR_INVALID_PARAMETER;
369 }
370
371 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
372 return VINF_SUCCESS; /* nothing to do */
373
374 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
375 && !pDevExt->MemBalloon.paMemObj)
376 {
377 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
378 if (!pDevExt->MemBalloon.paMemObj)
379 {
380 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
381 return VERR_NO_MEMORY;
382 }
383 }
384
385 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
386 if (RT_FAILURE(rc))
387 return rc;
388
389 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
390 {
391 /* inflate */
392 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
393 {
394 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
395 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
396 if (RT_FAILURE(rc))
397 {
398 if (rc == VERR_NOT_SUPPORTED)
399 {
400 /* not supported -- fall back to the R3-allocated memory. */
401 rc = VINF_SUCCESS;
402 pDevExt->MemBalloon.fUseKernelAPI = false;
403 Assert(pDevExt->MemBalloon.cChunks == 0);
404 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
405 }
406 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
407 * cannot allocate more memory => don't try further, just stop here */
408 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
409 break;
410 }
411
412 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
413 if (RT_FAILURE(rc))
414 {
415 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
416 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
417 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
418 break;
419 }
420 pDevExt->MemBalloon.cChunks++;
421 }
422 }
423 else
424 {
425 /* deflate */
426 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
427 {
428 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
429 if (RT_FAILURE(rc))
430 {
431 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
432 break;
433 }
434 pDevExt->MemBalloon.cChunks--;
435 }
436 }
437
438 VbglGRFree(&pReq->header);
439 }
440
441 /*
442 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
443 * the balloon changes via the other API.
444 */
445 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
446
447 return rc;
448}
449
450
451/**
452 * Helper to reinit the VBoxVMM communication after hibernation.
453 *
454 * @returns VBox status code.
455 * @param pDevExt The device extension.
456 * @param enmOSType The OS type.
457 */
458int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
459{
460 int rc = VBoxGuestReportGuestInfo(enmOSType);
461 if (RT_SUCCESS(rc))
462 {
463 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
464 if (RT_FAILURE(rc))
465 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
466 }
467 else
468 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
469 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
470 return rc;
471}
472
473
474/**
475 * Inflate/deflate the balloon by one chunk.
476 *
477 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
478 *
479 * @returns VBox status code.
480 * @param pDevExt The device extension.
481 * @param pSession The session.
482 * @param u64ChunkAddr The address of the chunk to add to / remove from the
483 * balloon.
484 * @param fInflate Inflate if true, deflate if false.
485 */
486static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
487 uint64_t u64ChunkAddr, bool fInflate)
488{
489 VMMDevChangeMemBalloon *pReq;
490 int rc = VINF_SUCCESS;
491 uint32_t i;
492 PRTR0MEMOBJ pMemObj = NULL;
493
494 if (fInflate)
495 {
496 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
497 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
498 {
499 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
500 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
501 return VERR_INVALID_PARAMETER;
502 }
503
504 if (!pDevExt->MemBalloon.paMemObj)
505 {
506 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
507 if (!pDevExt->MemBalloon.paMemObj)
508 {
509 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
510 return VERR_NO_MEMORY;
511 }
512 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
513 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
514 }
515 }
516 else
517 {
518 if (pDevExt->MemBalloon.cChunks == 0)
519 {
520 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
521 return VERR_INVALID_PARAMETER;
522 }
523 }
524
525 /*
526 * Enumerate all memory objects and check if the object is already registered.
527 */
528 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
529 {
530 if ( fInflate
531 && !pMemObj
532 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
533 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
534 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
535 {
536 if (fInflate)
537 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
538 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
539 break;
540 }
541 }
542 if (!pMemObj)
543 {
544 if (fInflate)
545 {
546 /* no free object pointer found -- should not happen */
547 return VERR_NO_MEMORY;
548 }
549
550 /* cannot free this memory as it wasn't provided before */
551 return VERR_NOT_FOUND;
552 }
553
554 /*
555 * Try inflate / defalte the balloon as requested.
556 */
557 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
558 if (RT_FAILURE(rc))
559 return rc;
560
561 if (fInflate)
562 {
563 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
564 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
565 if (RT_SUCCESS(rc))
566 {
567 rc = vboxGuestBalloonInflate(pMemObj, pReq);
568 if (RT_SUCCESS(rc))
569 pDevExt->MemBalloon.cChunks++;
570 else
571 {
572 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
573 RTR0MemObjFree(*pMemObj, true);
574 *pMemObj = NIL_RTR0MEMOBJ;
575 }
576 }
577 }
578 else
579 {
580 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
581 if (RT_SUCCESS(rc))
582 pDevExt->MemBalloon.cChunks--;
583 else
584 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
585 }
586
587 VbglGRFree(&pReq->header);
588 return rc;
589}
590
591
592/**
593 * Cleanup the memory balloon of a session.
594 *
595 * Will request the balloon mutex, so it must be valid and the caller must not
596 * own it already.
597 *
598 * @param pDevExt The device extension.
599 * @param pDevExt The session. Can be NULL at unload.
600 */
601static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
602{
603 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
604 if ( pDevExt->MemBalloon.pOwner == pSession
605 || pSession == NULL /*unload*/)
606 {
607 if (pDevExt->MemBalloon.paMemObj)
608 {
609 VMMDevChangeMemBalloon *pReq;
610 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
611 if (RT_SUCCESS(rc))
612 {
613 uint32_t i;
614 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
615 {
616 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
617 if (RT_FAILURE(rc))
618 {
619 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
620 rc, pDevExt->MemBalloon.cChunks));
621 break;
622 }
623 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
624 pDevExt->MemBalloon.cChunks--;
625 }
626 VbglGRFree(&pReq->header);
627 }
628 else
629 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
630 rc, pDevExt->MemBalloon.cChunks));
631 RTMemFree(pDevExt->MemBalloon.paMemObj);
632 pDevExt->MemBalloon.paMemObj = NULL;
633 }
634
635 pDevExt->MemBalloon.pOwner = NULL;
636 }
637 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
638}
639
640
641/**
642 * Initializes the VBoxGuest device extension when the
643 * device driver is loaded.
644 *
645 * The native code locates the VMMDev on the PCI bus and retrieve
646 * the MMIO and I/O port ranges, this function will take care of
647 * mapping the MMIO memory (if present). Upon successful return
648 * the native code should set up the interrupt handler.
649 *
650 * @returns VBox status code.
651 *
652 * @param pDevExt The device extension. Allocated by the native code.
653 * @param IOPortBase The base of the I/O port range.
654 * @param pvMMIOBase The base of the MMIO memory mapping.
655 * This is optional, pass NULL if not present.
656 * @param cbMMIO The size of the MMIO memory mapping.
657 * This is optional, pass 0 if not present.
658 * @param enmOSType The guest OS type to report to the VMMDev.
659 * @param fFixedEvents Events that will be enabled upon init and no client
660 * will ever be allowed to mask.
661 */
662int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
663 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
664{
665 int rc, rc2;
666
667 /*
668 * Adjust fFixedEvents.
669 */
670#ifdef VBOX_WITH_HGCM
671 fFixedEvents |= VMMDEV_EVENT_HGCM;
672#endif
673
674 /*
675 * Initalize the data.
676 */
677 pDevExt->IOPortBase = IOPortBase;
678 pDevExt->pVMMDevMemory = NULL;
679 pDevExt->fFixedEvents = fFixedEvents;
680 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
681 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
682 pDevExt->pIrqAckEvents = NULL;
683 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
684 RTListInit(&pDevExt->WaitList);
685#ifdef VBOX_WITH_HGCM
686 RTListInit(&pDevExt->HGCMWaitList);
687#endif
688#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
689 RTListInit(&pDevExt->WakeUpList);
690#endif
691 RTListInit(&pDevExt->WokenUpList);
692 RTListInit(&pDevExt->FreeList);
693 pDevExt->f32PendingEvents = 0;
694 pDevExt->u32MousePosChangedSeq = 0;
695 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
696 pDevExt->u32ClipboardClientId = 0;
697 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
698 pDevExt->MemBalloon.cChunks = 0;
699 pDevExt->MemBalloon.cMaxChunks = 0;
700 pDevExt->MemBalloon.fUseKernelAPI = true;
701 pDevExt->MemBalloon.paMemObj = NULL;
702 pDevExt->MemBalloon.pOwner = NULL;
703
704 /*
705 * If there is an MMIO region validate the version and size.
706 */
707 if (pvMMIOBase)
708 {
709 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
710 Assert(cbMMIO);
711 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
712 && pVMMDev->u32Size >= 32
713 && pVMMDev->u32Size <= cbMMIO)
714 {
715 pDevExt->pVMMDevMemory = pVMMDev;
716 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
717 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
718 }
719 else /* try live without it. */
720 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
721 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
722 }
723
724 /*
725 * Create the wait and session spinlocks as well as the ballooning mutex.
726 */
727 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
728 if (RT_SUCCESS(rc))
729 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
730 if (RT_FAILURE(rc))
731 {
732 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
733 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
734 RTSpinlockDestroy(pDevExt->EventSpinlock);
735 return rc;
736 }
737
738 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
739 if (RT_FAILURE(rc))
740 {
741 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
742 RTSpinlockDestroy(pDevExt->SessionSpinlock);
743 RTSpinlockDestroy(pDevExt->EventSpinlock);
744 return rc;
745 }
746
747 /*
748 * Initialize the guest library and report the guest info back to VMMDev,
749 * set the interrupt control filter mask, and fixate the guest mappings
750 * made by the VMM.
751 */
752 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
753 if (RT_SUCCESS(rc))
754 {
755 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
756 if (RT_SUCCESS(rc))
757 {
758 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
759 Assert(pDevExt->PhysIrqAckEvents != 0);
760
761 rc = VBoxGuestReportGuestInfo(enmOSType);
762 if (RT_SUCCESS(rc))
763 {
764 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
765 if (RT_SUCCESS(rc))
766 {
767 /*
768 * Disable guest graphics capability by default. The guest specific
769 * graphics driver will re-enable this when it is necessary.
770 */
771 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
772 if (RT_SUCCESS(rc))
773 {
774 vboxGuestInitFixateGuestMappings(pDevExt);
775
776 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
777 if (RT_FAILURE(rc))
778 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
779
780 Log(("VBoxGuestInitDevExt: returns success\n"));
781 return VINF_SUCCESS;
782 }
783
784 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
785 }
786 else
787 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
788 }
789 else
790 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
791 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
792 }
793 else
794 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
795
796 VbglTerminate();
797 }
798 else
799 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
800
801 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
802 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
803 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
804 return rc; /* (failed) */
805}
806
807
808/**
809 * Deletes all the items in a wait chain.
810 * @param pList The head of the chain.
811 */
812static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
813{
814 while (!RTListIsEmpty(pList))
815 {
816 int rc2;
817 PVBOXGUESTWAIT pWait = RTListNodeGetFirst(pList, VBOXGUESTWAIT, ListNode);
818 RTListNodeRemove(&pWait->ListNode);
819
820 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
821 pWait->Event = NIL_RTSEMEVENTMULTI;
822 pWait->pSession = NULL;
823 RTMemFree(pWait);
824 }
825}
826
827
828/**
829 * Destroys the VBoxGuest device extension.
830 *
831 * The native code should call this before the driver is loaded,
832 * but don't call this on shutdown.
833 *
834 * @param pDevExt The device extension.
835 */
836void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
837{
838 int rc2;
839 Log(("VBoxGuestDeleteDevExt:\n"));
840 Log(("VBoxGuest: The additions driver is terminating.\n"));
841
842 /*
843 * Clean up the bits that involves the host first.
844 */
845 vboxGuestTermUnfixGuestMappings(pDevExt);
846 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
847 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
848 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
849
850 /*
851 * Cleanup all the other resources.
852 */
853 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
854 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
855 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
856
857 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
858#ifdef VBOX_WITH_HGCM
859 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
860#endif
861#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
862 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
863#endif
864 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
865 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
866
867 VbglTerminate();
868
869 pDevExt->pVMMDevMemory = NULL;
870
871 pDevExt->IOPortBase = 0;
872 pDevExt->pIrqAckEvents = NULL;
873}
874
875
876/**
877 * Creates a VBoxGuest user session.
878 *
879 * The native code calls this when a ring-3 client opens the device.
880 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
881 *
882 * @returns VBox status code.
883 * @param pDevExt The device extension.
884 * @param ppSession Where to store the session on success.
885 */
886int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
887{
888 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
889 if (RT_UNLIKELY(!pSession))
890 {
891 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
892 return VERR_NO_MEMORY;
893 }
894
895 pSession->Process = RTProcSelf();
896 pSession->R0Process = RTR0ProcHandleSelf();
897 pSession->pDevExt = pDevExt;
898
899 *ppSession = pSession;
900 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
901 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
902 return VINF_SUCCESS;
903}
904
905
906/**
907 * Creates a VBoxGuest kernel session.
908 *
909 * The native code calls this when a ring-0 client connects to the device.
910 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
911 *
912 * @returns VBox status code.
913 * @param pDevExt The device extension.
914 * @param ppSession Where to store the session on success.
915 */
916int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
917{
918 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
919 if (RT_UNLIKELY(!pSession))
920 {
921 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
922 return VERR_NO_MEMORY;
923 }
924
925 pSession->Process = NIL_RTPROCESS;
926 pSession->R0Process = NIL_RTR0PROCESS;
927 pSession->pDevExt = pDevExt;
928
929 *ppSession = pSession;
930 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
931 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
932 return VINF_SUCCESS;
933}
934
935
936
937/**
938 * Closes a VBoxGuest session.
939 *
940 * @param pDevExt The device extension.
941 * @param pSession The session to close (and free).
942 */
943void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
944{
945 unsigned i; NOREF(i);
946 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
947 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
948
949#ifdef VBOX_WITH_HGCM
950 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
951 if (pSession->aHGCMClientIds[i])
952 {
953 VBoxGuestHGCMDisconnectInfo Info;
954 Info.result = 0;
955 Info.u32ClientID = pSession->aHGCMClientIds[i];
956 pSession->aHGCMClientIds[i] = 0;
957 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
958 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
959 }
960#endif
961
962 pSession->pDevExt = NULL;
963 pSession->Process = NIL_RTPROCESS;
964 pSession->R0Process = NIL_RTR0PROCESS;
965 vboxGuestCloseMemBalloon(pDevExt, pSession);
966 RTMemFree(pSession);
967}
968
969
970/**
971 * Allocates a wait-for-event entry.
972 *
973 * @returns The wait-for-event entry.
974 * @param pDevExt The device extension.
975 * @param pSession The session that's allocating this. Can be NULL.
976 */
977static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
978{
979 /*
980 * Allocate it one way or the other.
981 */
982 PVBOXGUESTWAIT pWait = RTListNodeGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
983 if (pWait)
984 {
985 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
986 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
987
988 pWait = RTListNodeGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
989 if (pWait)
990 RTListNodeRemove(&pWait->ListNode);
991
992 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
993 }
994 if (!pWait)
995 {
996 static unsigned s_cErrors = 0;
997 int rc;
998
999 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1000 if (!pWait)
1001 {
1002 if (s_cErrors++ < 32)
1003 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1004 return NULL;
1005 }
1006
1007 rc = RTSemEventMultiCreate(&pWait->Event);
1008 if (RT_FAILURE(rc))
1009 {
1010 if (s_cErrors++ < 32)
1011 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1012 RTMemFree(pWait);
1013 return NULL;
1014 }
1015
1016 pWait->ListNode.pNext = NULL;
1017 pWait->ListNode.pPrev = NULL;
1018 }
1019
1020 /*
1021 * Zero members just as an precaution.
1022 */
1023 pWait->fReqEvents = 0;
1024 pWait->fResEvents = 0;
1025#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1026 pWait->fPendingWakeUp = false;
1027 pWait->fFreeMe = false;
1028#endif
1029 pWait->pSession = pSession;
1030#ifdef VBOX_WITH_HGCM
1031 pWait->pHGCMReq = NULL;
1032#endif
1033 RTSemEventMultiReset(pWait->Event);
1034 return pWait;
1035}
1036
1037
1038/**
1039 * Frees the wait-for-event entry.
1040 *
1041 * The caller must own the wait spinlock !
1042 * The entry must be in a list!
1043 *
1044 * @param pDevExt The device extension.
1045 * @param pWait The wait-for-event entry to free.
1046 */
1047static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1048{
1049 pWait->fReqEvents = 0;
1050 pWait->fResEvents = 0;
1051#ifdef VBOX_WITH_HGCM
1052 pWait->pHGCMReq = NULL;
1053#endif
1054#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1055 Assert(!pWait->fFreeMe);
1056 if (pWait->fPendingWakeUp)
1057 pWait->fFreeMe = true;
1058 else
1059#endif
1060 {
1061 RTListNodeRemove(&pWait->ListNode);
1062 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1063 }
1064}
1065
1066
1067/**
1068 * Frees the wait-for-event entry.
1069 *
1070 * @param pDevExt The device extension.
1071 * @param pWait The wait-for-event entry to free.
1072 */
1073static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1074{
1075 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1076 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1077 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1078 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1079}
1080
1081
1082#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1083/**
1084 * Processes the wake-up list.
1085 *
1086 * All entries in the wake-up list gets signalled and moved to the woken-up
1087 * list.
1088 *
1089 * @param pDevExt The device extension.
1090 */
1091void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1092{
1093 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1094 {
1095 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1096 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1097 for (;;)
1098 {
1099 int rc;
1100 PVBOXGUESTWAIT pWait = RTListNodeGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1101 if (!pWait)
1102 break;
1103 pWait->fPendingWakeUp = true;
1104 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1105
1106 rc = RTSemEventMultiSignal(pWait->Event);
1107 AssertRC(rc);
1108
1109 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1110 pWait->fPendingWakeUp = false;
1111 if (!pWait->fFreeMe)
1112 {
1113 RTListNodeRemove(&pWait->ListNode);
1114 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1115 }
1116 else
1117 {
1118 pWait->fFreeMe = false;
1119 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1120 }
1121 }
1122 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1123 }
1124}
1125#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1126
1127
1128/**
1129 * Modifies the guest capabilities.
1130 *
1131 * Should be called during driver init and termination.
1132 *
1133 * @returns VBox status code.
1134 * @param fOr The Or mask (what to enable).
1135 * @param fNot The Not mask (what to disable).
1136 */
1137int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1138{
1139 VMMDevReqGuestCapabilities2 *pReq;
1140 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1141 if (RT_FAILURE(rc))
1142 {
1143 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1144 sizeof(*pReq), sizeof(*pReq), rc));
1145 return rc;
1146 }
1147
1148 pReq->u32OrMask = fOr;
1149 pReq->u32NotMask = fNot;
1150
1151 rc = VbglGRPerform(&pReq->header);
1152 if (RT_FAILURE(rc))
1153 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1154
1155 VbglGRFree(&pReq->header);
1156 return rc;
1157}
1158
1159
1160/**
1161 * Implements the fast (no input or output) type of IOCtls.
1162 *
1163 * This is currently just a placeholder stub inherited from the support driver code.
1164 *
1165 * @returns VBox status code.
1166 * @param iFunction The IOCtl function number.
1167 * @param pDevExt The device extension.
1168 * @param pSession The session.
1169 */
1170int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1171{
1172 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1173
1174 NOREF(iFunction);
1175 NOREF(pDevExt);
1176 NOREF(pSession);
1177 return VERR_NOT_SUPPORTED;
1178}
1179
1180
1181/**
1182 * Return the VMM device port.
1183 *
1184 * returns IPRT status code.
1185 * @param pDevExt The device extension.
1186 * @param pInfo The request info.
1187 * @param pcbDataReturned (out) contains the number of bytes to return.
1188 */
1189static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1190{
1191 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1192 pInfo->portAddress = pDevExt->IOPortBase;
1193 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1194 if (pcbDataReturned)
1195 *pcbDataReturned = sizeof(*pInfo);
1196 return VINF_SUCCESS;
1197}
1198
1199
1200/**
1201 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1202 *
1203 * The caller enters the spinlock, we leave it.
1204 *
1205 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1206 */
1207DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1208 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1209{
1210 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1211 if (fMatches)
1212 {
1213 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1214 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1215
1216 pInfo->u32EventFlagsOut = fMatches;
1217 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1218 if (fReqEvents & ~((uint32_t)1 << iEvent))
1219 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1220 else
1221 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1222 return VINF_SUCCESS;
1223 }
1224 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1225 return VERR_TIMEOUT;
1226}
1227
1228
1229static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1230 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1231{
1232 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1233 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1234 uint32_t fResEvents;
1235 int iEvent;
1236 PVBOXGUESTWAIT pWait;
1237 int rc;
1238
1239 pInfo->u32EventFlagsOut = 0;
1240 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1241 if (pcbDataReturned)
1242 *pcbDataReturned = sizeof(*pInfo);
1243
1244 /*
1245 * Copy and verify the input mask.
1246 */
1247 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1248 if (RT_UNLIKELY(iEvent < 0))
1249 {
1250 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1251 return VERR_INVALID_PARAMETER;
1252 }
1253
1254 /*
1255 * Check the condition up front, before doing the wait-for-event allocations.
1256 */
1257 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1258 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1259 if (rc == VINF_SUCCESS)
1260 return rc;
1261
1262 if (!pInfo->u32TimeoutIn)
1263 {
1264 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1265 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1266 return VERR_TIMEOUT;
1267 }
1268
1269 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1270 if (!pWait)
1271 return VERR_NO_MEMORY;
1272 pWait->fReqEvents = fReqEvents;
1273
1274 /*
1275 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1276 * If the wait condition is met, return.
1277 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1278 */
1279 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1280 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1281 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1282 if (rc == VINF_SUCCESS)
1283 {
1284 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1285 return rc;
1286 }
1287
1288 if (fInterruptible)
1289 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1290 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1291 else
1292 rc = RTSemEventMultiWait(pWait->Event,
1293 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1294
1295 /*
1296 * There is one special case here and that's when the semaphore is
1297 * destroyed upon device driver unload. This shouldn't happen of course,
1298 * but in case it does, just get out of here ASAP.
1299 */
1300 if (rc == VERR_SEM_DESTROYED)
1301 return rc;
1302
1303 /*
1304 * Unlink the wait item and dispose of it.
1305 */
1306 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1307 fResEvents = pWait->fResEvents;
1308 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1309 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1310
1311 /*
1312 * Now deal with the return code.
1313 */
1314 if ( fResEvents
1315 && fResEvents != UINT32_MAX)
1316 {
1317 pInfo->u32EventFlagsOut = fResEvents;
1318 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1319 if (fReqEvents & ~((uint32_t)1 << iEvent))
1320 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1321 else
1322 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1323 rc = VINF_SUCCESS;
1324 }
1325 else if ( fResEvents == UINT32_MAX
1326 || rc == VERR_INTERRUPTED)
1327 {
1328 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1329 rc = VERR_INTERRUPTED;
1330 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1331 }
1332 else if (rc == VERR_TIMEOUT)
1333 {
1334 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1335 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1336 }
1337 else
1338 {
1339 if (RT_SUCCESS(rc))
1340 {
1341 static unsigned s_cErrors = 0;
1342 if (s_cErrors++ < 32)
1343 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1344 rc = VERR_INTERNAL_ERROR;
1345 }
1346 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1347 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1348 }
1349
1350 return rc;
1351}
1352
1353
1354static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1355{
1356 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1357 PVBOXGUESTWAIT pWait;
1358 PVBOXGUESTWAIT pSafe;
1359 int rc = 0;
1360
1361 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1362
1363 /*
1364 * Walk the event list and wake up anyone with a matching session.
1365 */
1366 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1367 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1368 {
1369 if (pWait->pSession == pSession)
1370 {
1371 pWait->fResEvents = UINT32_MAX;
1372 RTListNodeRemove(&pWait->ListNode);
1373#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1374 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1375#else
1376 rc |= RTSemEventMultiSignal(pWait->Event);
1377 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1378#endif
1379 }
1380 }
1381 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1382 Assert(rc == 0);
1383
1384#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1385 VBoxGuestWaitDoWakeUps(pDevExt);
1386#endif
1387
1388 return VINF_SUCCESS;
1389}
1390
1391
1392static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1393 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1394{
1395 int rc;
1396 VMMDevRequestHeader *pReqCopy;
1397
1398 /*
1399 * Validate the header and request size.
1400 */
1401 const VMMDevRequestType enmType = pReqHdr->requestType;
1402 const uint32_t cbReq = pReqHdr->size;
1403 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1404
1405 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1406
1407 if (cbReq < cbMinSize)
1408 {
1409 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1410 cbReq, cbMinSize, enmType));
1411 return VERR_INVALID_PARAMETER;
1412 }
1413 if (cbReq > cbData)
1414 {
1415 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1416 cbData, cbReq, enmType));
1417 return VERR_INVALID_PARAMETER;
1418 }
1419 rc = VbglGRVerify(pReqHdr, cbData);
1420 if (RT_FAILURE(rc))
1421 {
1422 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1423 cbData, cbReq, enmType, rc));
1424 return rc;
1425 }
1426
1427 /*
1428 * Make a copy of the request in the physical memory heap so
1429 * the VBoxGuestLibrary can more easily deal with the request.
1430 * (This is really a waste of time since the OS or the OS specific
1431 * code has already buffered or locked the input/output buffer, but
1432 * it does makes things a bit simpler wrt to phys address.)
1433 */
1434 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1435 if (RT_FAILURE(rc))
1436 {
1437 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1438 cbReq, cbReq, rc));
1439 return rc;
1440 }
1441 memcpy(pReqCopy, pReqHdr, cbReq);
1442
1443 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1444 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1445
1446 rc = VbglGRPerform(pReqCopy);
1447 if ( RT_SUCCESS(rc)
1448 && RT_SUCCESS(pReqCopy->rc))
1449 {
1450 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1451 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1452
1453 memcpy(pReqHdr, pReqCopy, cbReq);
1454 if (pcbDataReturned)
1455 *pcbDataReturned = cbReq;
1456 }
1457 else if (RT_FAILURE(rc))
1458 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1459 else
1460 {
1461 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1462 rc = pReqCopy->rc;
1463 }
1464
1465 VbglGRFree(pReqCopy);
1466 return rc;
1467}
1468
1469
1470static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1471{
1472 VMMDevCtlGuestFilterMask *pReq;
1473 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1474 if (RT_FAILURE(rc))
1475 {
1476 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1477 sizeof(*pReq), sizeof(*pReq), rc));
1478 return rc;
1479 }
1480
1481 pReq->u32OrMask = pInfo->u32OrMask;
1482 pReq->u32NotMask = pInfo->u32NotMask;
1483 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1484 rc = VbglGRPerform(&pReq->header);
1485 if (RT_FAILURE(rc))
1486 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1487
1488 VbglGRFree(&pReq->header);
1489 return rc;
1490}
1491
1492#ifdef VBOX_WITH_HGCM
1493
1494AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1495
1496/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1497static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1498 bool fInterruptible, uint32_t cMillies)
1499{
1500 int rc;
1501
1502 /*
1503 * Check to see if the condition was met by the time we got here.
1504 *
1505 * We create a simple poll loop here for dealing with out-of-memory
1506 * conditions since the caller isn't necessarily able to deal with
1507 * us returning too early.
1508 */
1509 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1510 PVBOXGUESTWAIT pWait;
1511 for (;;)
1512 {
1513 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1514 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1515 {
1516 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1517 return VINF_SUCCESS;
1518 }
1519 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1520
1521 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1522 if (pWait)
1523 break;
1524 if (fInterruptible)
1525 return VERR_INTERRUPTED;
1526 RTThreadSleep(1);
1527 }
1528 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1529 pWait->pHGCMReq = pHdr;
1530
1531 /*
1532 * Re-enter the spinlock and re-check for the condition.
1533 * If the condition is met, return.
1534 * Otherwise link us into the HGCM wait list and go to sleep.
1535 */
1536 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1537 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1538 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1539 {
1540 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1541 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1542 return VINF_SUCCESS;
1543 }
1544 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1545
1546 if (fInterruptible)
1547 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1548 else
1549 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1550 if (rc == VERR_SEM_DESTROYED)
1551 return rc;
1552
1553 /*
1554 * Unlink, free and return.
1555 */
1556 if ( RT_FAILURE(rc)
1557 && rc != VERR_TIMEOUT
1558 && ( !fInterruptible
1559 || rc != VERR_INTERRUPTED))
1560 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1561
1562 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1563 return rc;
1564}
1565
1566
1567/**
1568 * This is a callback for dealing with async waits.
1569 *
1570 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1571 */
1572static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1573{
1574 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1575 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1576 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1577 pDevExt,
1578 false /* fInterruptible */,
1579 u32User /* cMillies */);
1580}
1581
1582
1583/**
1584 * This is a callback for dealing with async waits with a timeout.
1585 *
1586 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1587 */
1588static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1589 void *pvUser, uint32_t u32User)
1590{
1591 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1592 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1593 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1594 pDevExt,
1595 true /* fInterruptible */,
1596 u32User /* cMillies */ );
1597
1598}
1599
1600
1601static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1602 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1603{
1604 int rc;
1605
1606 /*
1607 * The VbglHGCMConnect call will invoke the callback if the HGCM
1608 * call is performed in an ASYNC fashion. The function is not able
1609 * to deal with cancelled requests.
1610 */
1611 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1612 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1613 ? pInfo->Loc.u.host.achName : "<not local host>"));
1614
1615 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1616 if (RT_SUCCESS(rc))
1617 {
1618 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1619 pInfo->u32ClientID, pInfo->result, rc));
1620 if (RT_SUCCESS(pInfo->result))
1621 {
1622 /*
1623 * Append the client id to the client id table.
1624 * If the table has somehow become filled up, we'll disconnect the session.
1625 */
1626 unsigned i;
1627 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1628 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1629 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1630 if (!pSession->aHGCMClientIds[i])
1631 {
1632 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1633 break;
1634 }
1635 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1636 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1637 {
1638 static unsigned s_cErrors = 0;
1639 VBoxGuestHGCMDisconnectInfo Info;
1640
1641 if (s_cErrors++ < 32)
1642 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1643
1644 Info.result = 0;
1645 Info.u32ClientID = pInfo->u32ClientID;
1646 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1647 return VERR_TOO_MANY_OPEN_FILES;
1648 }
1649 }
1650 if (pcbDataReturned)
1651 *pcbDataReturned = sizeof(*pInfo);
1652 }
1653 return rc;
1654}
1655
1656
1657static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1658 size_t *pcbDataReturned)
1659{
1660 /*
1661 * Validate the client id and invalidate its entry while we're in the call.
1662 */
1663 int rc;
1664 const uint32_t u32ClientId = pInfo->u32ClientID;
1665 unsigned i;
1666 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1667 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1668 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1669 if (pSession->aHGCMClientIds[i] == u32ClientId)
1670 {
1671 pSession->aHGCMClientIds[i] = UINT32_MAX;
1672 break;
1673 }
1674 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1675 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1676 {
1677 static unsigned s_cErrors = 0;
1678 if (s_cErrors++ > 32)
1679 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1680 return VERR_INVALID_HANDLE;
1681 }
1682
1683 /*
1684 * The VbglHGCMConnect call will invoke the callback if the HGCM
1685 * call is performed in an ASYNC fashion. The function is not able
1686 * to deal with cancelled requests.
1687 */
1688 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1689 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1690 if (RT_SUCCESS(rc))
1691 {
1692 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1693 if (pcbDataReturned)
1694 *pcbDataReturned = sizeof(*pInfo);
1695 }
1696
1697 /* Update the client id array according to the result. */
1698 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1699 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1700 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1701 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1702
1703 return rc;
1704}
1705
1706
1707static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1708 PVBOXGUESTSESSION pSession,
1709 VBoxGuestHGCMCallInfo *pInfo,
1710 uint32_t cMillies, bool fInterruptible, bool f32bit,
1711 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1712{
1713 const uint32_t u32ClientId = pInfo->u32ClientID;
1714 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1715 uint32_t fFlags;
1716 size_t cbActual;
1717 unsigned i;
1718 int rc;
1719
1720 /*
1721 * Some more validations.
1722 */
1723 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1724 {
1725 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1726 return VERR_INVALID_PARAMETER;
1727 }
1728
1729 cbActual = cbExtra + sizeof(*pInfo);
1730#ifdef RT_ARCH_AMD64
1731 if (f32bit)
1732 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1733 else
1734#endif
1735 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1736 if (cbData < cbActual)
1737 {
1738 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1739 cbData, cbActual));
1740 return VERR_INVALID_PARAMETER;
1741 }
1742
1743 /*
1744 * Validate the client id.
1745 */
1746 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1747 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1748 if (pSession->aHGCMClientIds[i] == u32ClientId)
1749 break;
1750 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1751 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1752 {
1753 static unsigned s_cErrors = 0;
1754 if (s_cErrors++ > 32)
1755 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1756 return VERR_INVALID_HANDLE;
1757 }
1758
1759 /*
1760 * The VbglHGCMCall call will invoke the callback if the HGCM
1761 * call is performed in an ASYNC fashion. This function can
1762 * deal with cancelled requests, so we let user more requests
1763 * be interruptible (should add a flag for this later I guess).
1764 */
1765 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1766 fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1767#ifdef RT_ARCH_AMD64
1768 if (f32bit)
1769 {
1770 if (fInterruptible)
1771 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1772 else
1773 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1774 }
1775 else
1776#endif
1777 {
1778 if (fInterruptible)
1779 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1780 else
1781 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1782 }
1783 if (RT_SUCCESS(rc))
1784 {
1785 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1786 if (pcbDataReturned)
1787 *pcbDataReturned = cbActual;
1788 }
1789 else
1790 {
1791 if ( rc != VERR_INTERRUPTED
1792 && rc != VERR_TIMEOUT)
1793 {
1794 static unsigned s_cErrors = 0;
1795 if (s_cErrors++ < 32)
1796 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1797 }
1798 else
1799 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1800 }
1801 return rc;
1802}
1803
1804
1805/**
1806 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1807 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1808 *
1809 * @param pDevExt The device extension.
1810 * @param pu32ClientId The client id.
1811 * @param pcbDataReturned Where to store the amount of returned data. Can
1812 * be NULL.
1813 */
1814static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1815{
1816 int rc;
1817 VBoxGuestHGCMConnectInfo CnInfo;
1818
1819 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1820
1821 /*
1822 * If there is an old client, try disconnect it first.
1823 */
1824 if (pDevExt->u32ClipboardClientId != 0)
1825 {
1826 VBoxGuestHGCMDisconnectInfo DiInfo;
1827 DiInfo.result = VERR_WRONG_ORDER;
1828 DiInfo.u32ClientID = pDevExt->u32ClipboardClientId;
1829 rc = VbglR0HGCMInternalDisconnect(&DiInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1830 if (RT_SUCCESS(rc))
1831 {
1832 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1833 return rc;
1834 }
1835 if (RT_FAILURE((int32_t)DiInfo.result))
1836 {
1837 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. DiInfo.result=%Rrc\n", DiInfo.result));
1838 return DiInfo.result;
1839 }
1840 pDevExt->u32ClipboardClientId = 0;
1841 }
1842
1843 /*
1844 * Try connect.
1845 */
1846 CnInfo.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1847 strcpy(CnInfo.Loc.u.host.achName, "VBoxSharedClipboard");
1848 CnInfo.u32ClientID = 0;
1849 CnInfo.result = VERR_WRONG_ORDER;
1850
1851 rc = VbglR0HGCMInternalConnect(&CnInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1852 if (RT_FAILURE(rc))
1853 {
1854 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1855 return rc;
1856 }
1857 if (RT_FAILURE(CnInfo.result))
1858 {
1859 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1860 return rc;
1861 }
1862
1863 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", CnInfo.u32ClientID));
1864
1865 pDevExt->u32ClipboardClientId = CnInfo.u32ClientID;
1866 *pu32ClientId = CnInfo.u32ClientID;
1867 if (pcbDataReturned)
1868 *pcbDataReturned = sizeof(uint32_t);
1869
1870 return VINF_SUCCESS;
1871}
1872
1873#endif /* VBOX_WITH_HGCM */
1874
1875/**
1876 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1877 *
1878 * Ask the host for the size of the balloon and try to set it accordingly. If
1879 * this approach fails because it's not supported, return with fHandleInR3 set
1880 * and let the user land supply memory we can lock via the other ioctl.
1881 *
1882 * @returns VBox status code.
1883 *
1884 * @param pDevExt The device extension.
1885 * @param pSession The session.
1886 * @param pInfo The output buffer.
1887 * @param pcbDataReturned Where to store the amount of returned data. Can
1888 * be NULL.
1889 */
1890static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1891 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1892{
1893 VMMDevGetMemBalloonChangeRequest *pReq;
1894 int rc;
1895
1896 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1897 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1898 AssertRCReturn(rc, rc);
1899
1900 /*
1901 * The first user trying to query/change the balloon becomes the
1902 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1903 */
1904 if ( pDevExt->MemBalloon.pOwner != pSession
1905 && pDevExt->MemBalloon.pOwner == NULL)
1906 pDevExt->MemBalloon.pOwner = pSession;
1907
1908 if (pDevExt->MemBalloon.pOwner == pSession)
1909 {
1910 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1911 if (RT_SUCCESS(rc))
1912 {
1913 /*
1914 * This is a response to that event. Setting this bit means that
1915 * we request the value from the host and change the guest memory
1916 * balloon according to this value.
1917 */
1918 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1919 rc = VbglGRPerform(&pReq->header);
1920 if (RT_SUCCESS(rc))
1921 {
1922 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1923 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1924
1925 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1926 pInfo->fHandleInR3 = false;
1927
1928 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1929 /* Ignore various out of memory failures. */
1930 if ( rc == VERR_NO_MEMORY
1931 || rc == VERR_NO_PHYS_MEMORY
1932 || rc == VERR_NO_CONT_MEMORY)
1933 rc = VINF_SUCCESS;
1934
1935 if (pcbDataReturned)
1936 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1937 }
1938 else
1939 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1940 VbglGRFree(&pReq->header);
1941 }
1942 }
1943 else
1944 rc = VERR_PERMISSION_DENIED;
1945
1946 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1947 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1948 return rc;
1949}
1950
1951
1952/**
1953 * Handle a request for changing the memory balloon.
1954 *
1955 * @returns VBox status code.
1956 *
1957 * @param pDevExt The device extention.
1958 * @param pSession The session.
1959 * @param pInfo The change request structure (input).
1960 * @param pcbDataReturned Where to store the amount of returned data. Can
1961 * be NULL.
1962 */
1963static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1964 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1965{
1966 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1967 AssertRCReturn(rc, rc);
1968
1969 if (!pDevExt->MemBalloon.fUseKernelAPI)
1970 {
1971 /*
1972 * The first user trying to query/change the balloon becomes the
1973 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1974 */
1975 if ( pDevExt->MemBalloon.pOwner != pSession
1976 && pDevExt->MemBalloon.pOwner == NULL)
1977 pDevExt->MemBalloon.pOwner = pSession;
1978
1979 if (pDevExt->MemBalloon.pOwner == pSession)
1980 {
1981 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
1982 if (pcbDataReturned)
1983 *pcbDataReturned = 0;
1984 }
1985 else
1986 rc = VERR_PERMISSION_DENIED;
1987 }
1988 else
1989 rc = VERR_PERMISSION_DENIED;
1990
1991 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1992 return rc;
1993}
1994
1995
1996/**
1997 * Handle a request for writing a core dump of the guest on the host.
1998 *
1999 * @returns VBox status code.
2000 *
2001 * @param pDevExt The device extension.
2002 * @param pInfo The output buffer.
2003 */
2004static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2005{
2006 VMMDevReqWriteCoreDump *pReq = NULL;
2007 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2008 if (RT_FAILURE(rc))
2009 {
2010 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2011 sizeof(*pReq), sizeof(*pReq), rc));
2012 return rc;
2013 }
2014
2015 pReq->fFlags = pInfo->fFlags;
2016 rc = VbglGRPerform(&pReq->header);
2017 if (RT_FAILURE(rc))
2018 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2019
2020 VbglGRFree(&pReq->header);
2021 return rc;
2022}
2023
2024
2025#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2026/**
2027 * Enables the VRDP session and saves its session ID.
2028 *
2029 * @returns VBox status code.
2030 *
2031 * @param pDevExt The device extention.
2032 * @param pSession The session.
2033 */
2034static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2035{
2036 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2037 return VERR_NOT_IMPLEMENTED;
2038}
2039
2040
2041/**
2042 * Disables the VRDP session.
2043 *
2044 * @returns VBox status code.
2045 *
2046 * @param pDevExt The device extention.
2047 * @param pSession The session.
2048 */
2049static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2050{
2051 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2052 return VERR_NOT_IMPLEMENTED;
2053}
2054#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2055
2056
2057/**
2058 * Guest backdoor logging.
2059 *
2060 * @returns VBox status code.
2061 *
2062 * @param pch The log message (need not be NULL terminated).
2063 * @param cbData Size of the buffer.
2064 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2065 */
2066static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2067{
2068 NOREF(pch);
2069 NOREF(cbData);
2070 Log(("%.*s", cbData, pch));
2071 if (pcbDataReturned)
2072 *pcbDataReturned = 0;
2073 return VINF_SUCCESS;
2074}
2075
2076
2077/**
2078 * Common IOCtl for user to kernel and kernel to kernel communcation.
2079 *
2080 * This function only does the basic validation and then invokes
2081 * worker functions that takes care of each specific function.
2082 *
2083 * @returns VBox status code.
2084 *
2085 * @param iFunction The requested function.
2086 * @param pDevExt The device extension.
2087 * @param pSession The client session.
2088 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2089 * @param cbData The max size of the data buffer.
2090 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2091 */
2092int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2093 void *pvData, size_t cbData, size_t *pcbDataReturned)
2094{
2095 int rc;
2096 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2097 iFunction, pDevExt, pSession, pvData, cbData));
2098
2099 /*
2100 * Make sure the returned data size is set to zero.
2101 */
2102 if (pcbDataReturned)
2103 *pcbDataReturned = 0;
2104
2105 /*
2106 * Define some helper macros to simplify validation.
2107 */
2108#define CHECKRET_RING0(mnemonic) \
2109 do { \
2110 if (pSession->R0Process != NIL_RTR0PROCESS) \
2111 { \
2112 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2113 pSession->Process, (uintptr_t)pSession->R0Process)); \
2114 return VERR_PERMISSION_DENIED; \
2115 } \
2116 } while (0)
2117#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2118 do { \
2119 if (cbData < (cbMin)) \
2120 { \
2121 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2122 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2123 return VERR_BUFFER_OVERFLOW; \
2124 } \
2125 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2126 { \
2127 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2128 return VERR_INVALID_POINTER; \
2129 } \
2130 } while (0)
2131
2132
2133 /*
2134 * Deal with variably sized requests first.
2135 */
2136 rc = VINF_SUCCESS;
2137 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2138 {
2139 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2140 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2141 }
2142#ifdef VBOX_WITH_HGCM
2143 /*
2144 * These ones are a bit tricky.
2145 */
2146 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2147 {
2148 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2149 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2150 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2151 fInterruptible, false /*f32bit*/,
2152 0, cbData, pcbDataReturned);
2153 }
2154 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2155 {
2156 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2157 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2158 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2159 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2160 false /*f32bit*/,
2161 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2162 }
2163# ifdef RT_ARCH_AMD64
2164 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2165 {
2166 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2167 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2168 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2169 fInterruptible, true /*f32bit*/,
2170 0, cbData, pcbDataReturned);
2171 }
2172 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2173 {
2174 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2175 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2176 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2177 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2178 true /*f32bit*/,
2179 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2180 }
2181# endif
2182#endif /* VBOX_WITH_HGCM */
2183 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2184 {
2185 CHECKRET_MIN_SIZE("LOG", 1);
2186 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2187 }
2188 else
2189 {
2190 switch (iFunction)
2191 {
2192 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2193 CHECKRET_RING0("GETVMMDEVPORT");
2194 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2195 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2196 break;
2197
2198 case VBOXGUEST_IOCTL_WAITEVENT:
2199 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2200 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2201 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2202 break;
2203
2204 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2205 if (cbData != 0)
2206 rc = VERR_INVALID_PARAMETER;
2207 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2208 break;
2209
2210 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2211 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2212 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2213 break;
2214
2215#ifdef VBOX_WITH_HGCM
2216 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2217# ifdef RT_ARCH_AMD64
2218 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2219# endif
2220 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2221 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2222 break;
2223
2224 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2225# ifdef RT_ARCH_AMD64
2226 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2227# endif
2228 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2229 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2230 break;
2231
2232 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2233 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2234 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2235 break;
2236#endif /* VBOX_WITH_HGCM */
2237
2238 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2239 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2240 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2241 break;
2242
2243 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2244 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2245 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2246 break;
2247
2248 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2249 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2250 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2251 break;
2252
2253#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2254 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2255 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2256 break;
2257
2258 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2259 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2260 break;
2261#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2262
2263 default:
2264 {
2265 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2266 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2267 rc = VERR_NOT_SUPPORTED;
2268 break;
2269 }
2270 }
2271 }
2272
2273 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2274 return rc;
2275}
2276
2277
2278
2279/**
2280 * Common interrupt service routine.
2281 *
2282 * This deals with events and with waking up thread waiting for those events.
2283 *
2284 * @returns true if it was our interrupt, false if it wasn't.
2285 * @param pDevExt The VBoxGuest device extension.
2286 */
2287bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2288{
2289 bool fMousePositionChanged = false;
2290 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2291 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2292 int rc = 0;
2293 bool fOurIrq;
2294
2295 /*
2296 * Make sure we've initalized the device extension.
2297 */
2298 if (RT_UNLIKELY(!pReq))
2299 return false;
2300
2301 /*
2302 * Enter the spinlock and check if it's our IRQ or not.
2303 */
2304 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2305 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2306 if (fOurIrq)
2307 {
2308 /*
2309 * Acknowlegde events.
2310 * We don't use VbglGRPerform here as it may take another spinlocks.
2311 */
2312 pReq->header.rc = VERR_INTERNAL_ERROR;
2313 pReq->events = 0;
2314 ASMCompilerBarrier();
2315 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2316 ASMCompilerBarrier(); /* paranoia */
2317 if (RT_SUCCESS(pReq->header.rc))
2318 {
2319 uint32_t fEvents = pReq->events;
2320 PVBOXGUESTWAIT pWait;
2321 PVBOXGUESTWAIT pSafe;
2322
2323 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2324
2325 /*
2326 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2327 */
2328 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2329 {
2330 fMousePositionChanged = true;
2331 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2332 }
2333
2334#ifdef VBOX_WITH_HGCM
2335 /*
2336 * The HGCM event/list is kind of different in that we evaluate all entries.
2337 */
2338 if (fEvents & VMMDEV_EVENT_HGCM)
2339 {
2340 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2341 {
2342 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2343 {
2344 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2345 RTListNodeRemove(&pWait->ListNode);
2346# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2347 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2348# else
2349 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2350 rc |= RTSemEventMultiSignal(pWait->Event);
2351# endif
2352 }
2353 }
2354 fEvents &= ~VMMDEV_EVENT_HGCM;
2355 }
2356#endif
2357
2358 /*
2359 * Normal FIFO waiter evaluation.
2360 */
2361 fEvents |= pDevExt->f32PendingEvents;
2362 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2363 {
2364 if ( (pWait->fReqEvents & fEvents)
2365 && !pWait->fResEvents)
2366 {
2367 pWait->fResEvents = pWait->fReqEvents & fEvents;
2368 fEvents &= ~pWait->fResEvents;
2369 RTListNodeRemove(&pWait->ListNode);
2370#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2371 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2372#else
2373 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2374 rc |= RTSemEventMultiSignal(pWait->Event);
2375#endif
2376 if (!fEvents)
2377 break;
2378 }
2379 }
2380 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2381 }
2382 else /* something is serious wrong... */
2383 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2384 pReq->header.rc, pReq->events));
2385 }
2386 else
2387 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2388
2389 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2390
2391#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2392 /*
2393 * Do wake-ups.
2394 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2395 * care of it.
2396 */
2397 VBoxGuestWaitDoWakeUps(pDevExt);
2398#endif
2399
2400 /*
2401 * Work the poll and async notification queues on OSes that implements that.
2402 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2403 */
2404 if (fMousePositionChanged)
2405 {
2406 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2407 VBoxGuestNativeISRMousePollEvent(pDevExt);
2408 }
2409
2410 Assert(rc == 0);
2411 return fOurIrq;
2412}
2413
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette