VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 29021

Last change on this file since 29021 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.4 KB
Line 
1/* $Id: VBoxGuest.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
25#include <VBox/log.h>
26#include <iprt/mem.h>
27#include <iprt/time.h>
28#include <iprt/memobj.h>
29#include <iprt/asm.h>
30#include <iprt/string.h>
31#include <iprt/process.h>
32#include <iprt/assert.h>
33#include <iprt/param.h>
34#ifdef VBOX_WITH_HGCM
35# include <iprt/thread.h>
36#endif
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42#ifdef VBOX_WITH_HGCM
43static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
44#endif
45
46
47/*******************************************************************************
48* Global Variables *
49*******************************************************************************/
50static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
51
52
53
54/**
55 * Reserves memory in which the VMM can relocate any guest mappings
56 * that are floating around.
57 *
58 * This operation is a little bit tricky since the VMM might not accept
59 * just any address because of address clashes between the three contexts
60 * it operates in, so use a small stack to perform this operation.
61 *
62 * @returns VBox status code (ignored).
63 * @param pDevExt The device extension.
64 */
65static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
66{
67 /*
68 * Query the required space.
69 */
70 VMMDevReqHypervisorInfo *pReq;
71 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
72 if (RT_FAILURE(rc))
73 return rc;
74 pReq->hypervisorStart = 0;
75 pReq->hypervisorSize = 0;
76 rc = VbglGRPerform(&pReq->header);
77 if (RT_FAILURE(rc)) /* this shouldn't happen! */
78 {
79 VbglGRFree(&pReq->header);
80 return rc;
81 }
82
83 /*
84 * The VMM will report back if there is nothing it wants to map, like for
85 * insance in VT-x and AMD-V mode.
86 */
87 if (pReq->hypervisorSize == 0)
88 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
89 else
90 {
91 /*
92 * We have to try several times since the host can be picky
93 * about certain addresses.
94 */
95 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
96 uint32_t cbHypervisor = pReq->hypervisorSize;
97 RTR0MEMOBJ ahTries[5];
98 uint32_t iTry;
99 bool fBitched = false;
100 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
101 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
102 {
103 /*
104 * Reserve space, or if that isn't supported, create a object for
105 * some fictive physical memory and map that in to kernel space.
106 *
107 * To make the code a bit uglier, most systems cannot help with
108 * 4MB alignment, so we have to deal with that in addition to
109 * having two ways of getting the memory.
110 */
111 uint32_t uAlignment = _4M;
112 RTR0MEMOBJ hObj;
113 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
114 if (rc == VERR_NOT_SUPPORTED)
115 {
116 uAlignment = PAGE_SIZE;
117 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
118 }
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 if (hFictive == NIL_RTR0MEMOBJ)
122 {
123 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
124 if (RT_FAILURE(rc))
125 break;
126 hFictive = hObj;
127 }
128 uAlignment = _4M;
129 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
130 if (rc == VERR_NOT_SUPPORTED)
131 {
132 uAlignment = PAGE_SIZE;
133 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
134 }
135 }
136 if (RT_FAILURE(rc))
137 {
138 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
139 rc, cbHypervisor, uAlignment, iTry));
140 fBitched = true;
141 break;
142 }
143
144 /*
145 * Try set it.
146 */
147 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
148 pReq->header.rc = VERR_INTERNAL_ERROR;
149 pReq->hypervisorSize = cbHypervisor;
150 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
151 if ( uAlignment == PAGE_SIZE
152 && pReq->hypervisorStart & (_4M - 1))
153 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
154 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
155
156 rc = VbglGRPerform(&pReq->header);
157 if (RT_SUCCESS(rc))
158 {
159 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
160 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
161 RTR0MemObjAddress(pDevExt->hGuestMappings),
162 RTR0MemObjSize(pDevExt->hGuestMappings),
163 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
164 break;
165 }
166 ahTries[iTry] = hObj;
167 }
168
169 /*
170 * Cleanup failed attempts.
171 */
172 while (iTry-- > 0)
173 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
174 if ( RT_FAILURE(rc)
175 && hFictive != NIL_RTR0PTR)
176 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
177 if (RT_FAILURE(rc) && !fBitched)
178 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
179 }
180 VbglGRFree(&pReq->header);
181
182 /*
183 * We ignore failed attempts for now.
184 */
185 return VINF_SUCCESS;
186}
187
188
189/**
190 * Undo what vboxGuestInitFixateGuestMappings did.
191 *
192 * @param pDevExt The device extension.
193 */
194static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
195{
196 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
197 {
198 /*
199 * Tell the host that we're going to free the memory we reserved for
200 * it, the free it up. (Leak the memory if anything goes wrong here.)
201 */
202 VMMDevReqHypervisorInfo *pReq;
203 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
204 if (RT_SUCCESS(rc))
205 {
206 pReq->hypervisorStart = 0;
207 pReq->hypervisorSize = 0;
208 rc = VbglGRPerform(&pReq->header);
209 VbglGRFree(&pReq->header);
210 }
211 if (RT_SUCCESS(rc))
212 {
213 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
214 AssertRC(rc);
215 }
216 else
217 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
218
219 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
220 }
221}
222
223
224/**
225 * Sets the interrupt filter mask during initialization and termination.
226 *
227 * This will ASSUME that we're the ones in carge over the mask, so
228 * we'll simply clear all bits we don't set.
229 *
230 * @returns VBox status code (ignored).
231 * @param pDevExt The device extension.
232 * @param fMask The new mask.
233 */
234static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
235{
236 VMMDevCtlGuestFilterMask *pReq;
237 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
238 if (RT_SUCCESS(rc))
239 {
240 pReq->u32OrMask = fMask;
241 pReq->u32NotMask = ~fMask;
242 rc = VbglGRPerform(&pReq->header);
243 if ( RT_FAILURE(rc)
244 || RT_FAILURE(pReq->header.rc))
245 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
246 rc, pReq->header.rc));
247 VbglGRFree(&pReq->header);
248 }
249 return rc;
250}
251
252
253/**
254 * Report guest information to the VMMDev.
255 *
256 * @returns VBox status code.
257 * @param pDevExt The device extension.
258 * @param enmOSType The OS type to report.
259 */
260static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
261{
262 VMMDevReportGuestInfo *pReq;
263 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
264 if (RT_SUCCESS(rc))
265 {
266 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
267 pReq->guestInfo.osType = enmOSType;
268 rc = VbglGRPerform(&pReq->header);
269 if ( RT_FAILURE(rc)
270 || RT_FAILURE(pReq->header.rc))
271 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
272 rc, pReq->header.rc));
273 VbglGRFree(&pReq->header);
274 }
275 return rc;
276}
277
278
279/**
280 * Inflate the balloon by one chunk represented by an R0 memory object.
281 *
282 * The caller owns the balloon mutex.
283 *
284 * @returns IPRT status code.
285 * @param pMemObj Pointer to the R0 memory object.
286 * @param pReq The pre-allocated request for performing the VMMDev call.
287 */
288static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
289{
290 uint32_t iPage;
291 int rc;
292
293 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
294 {
295 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
296 pReq->aPhysPage[iPage] = phys;
297 }
298
299 /* Protect this memory from being accessed. Doesn't work on every platform and probably
300 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
301 * done when object is freed. */
302 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
303
304 pReq->fInflate = true;
305 pReq->header.size = cbChangeMemBalloonReq;
306 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
307
308 rc = VbglGRPerform(&pReq->header);
309 if (RT_FAILURE(rc))
310 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
311 return rc;
312}
313
314
315/**
316 * Deflate the balloon by one chunk - info the host and free the memory object.
317 *
318 * The caller owns the balloon mutex.
319 *
320 * @returns IPRT status code.
321 * @param pMemObj Pointer to the R0 memory object.
322 * The memory object will be freed afterwards.
323 * @param pReq The pre-allocated request for performing the VMMDev call.
324 */
325static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
326{
327 uint32_t iPage;
328 int rc;
329
330 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
331 {
332 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
333 pReq->aPhysPage[iPage] = phys;
334 }
335
336 pReq->fInflate = false;
337 pReq->header.size = cbChangeMemBalloonReq;
338 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
339
340 rc = VbglGRPerform(&pReq->header);
341 if (RT_FAILURE(rc))
342 {
343 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
344 return rc;
345 }
346
347 /* undo previous protec call, ignore rc for reasons stated there. */
348 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
349 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
350
351 rc = RTR0MemObjFree(*pMemObj, true);
352 if (RT_FAILURE(rc))
353 {
354 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
355 return rc;
356 }
357
358 *pMemObj = NIL_RTR0MEMOBJ;
359 return VINF_SUCCESS;
360}
361
362
363/**
364 * Inflate/deflate the memory balloon and notify the host.
365 *
366 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
367 * the mutex.
368 *
369 * @returns VBox status code.
370 * @param pDevExt The device extension.
371 * @param pSession The session.
372 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
373 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
374 * (VINF_SUCCESS if set).
375 */
376static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
377{
378 int rc = VINF_SUCCESS;
379
380 if (pDevExt->MemBalloon.fUseKernelAPI)
381 {
382 VMMDevChangeMemBalloon *pReq;
383 uint32_t i;
384
385 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
386 {
387 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
388 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
389 return VERR_INVALID_PARAMETER;
390 }
391
392 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
393 return VINF_SUCCESS; /* nothing to do */
394
395 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
396 && !pDevExt->MemBalloon.paMemObj)
397 {
398 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
399 if (!pDevExt->MemBalloon.paMemObj)
400 {
401 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
402 return VERR_NO_MEMORY;
403 }
404 }
405
406 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
407 if (RT_FAILURE(rc))
408 return rc;
409
410 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
411 {
412 /* inflate */
413 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
414 {
415 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
416 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
417 if (RT_FAILURE(rc))
418 {
419 if (rc == VERR_NOT_SUPPORTED)
420 {
421 /* not supported -- fall back to the R3-allocated memory. */
422 rc = VINF_SUCCESS;
423 pDevExt->MemBalloon.fUseKernelAPI = false;
424 Assert(pDevExt->MemBalloon.cChunks == 0);
425 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
426 }
427 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
428 * cannot allocate more memory => don't try further, just stop here */
429 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
430 break;
431 }
432
433 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
434 if (RT_FAILURE(rc))
435 {
436 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
437 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
438 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
439 break;
440 }
441 pDevExt->MemBalloon.cChunks++;
442 }
443 }
444 else
445 {
446 /* deflate */
447 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
448 {
449 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
450 if (RT_FAILURE(rc))
451 {
452 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
453 break;
454 }
455 pDevExt->MemBalloon.cChunks--;
456 }
457 }
458
459 VbglGRFree(&pReq->header);
460 }
461
462 /*
463 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
464 * the balloon changes via the other API.
465 */
466 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
467
468 return rc;
469}
470
471
472/**
473 * Inflate/deflate the balloon by one chunk.
474 *
475 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
476 *
477 * @returns VBox status code.
478 * @param pDevExt The device extension.
479 * @param pSession The session.
480 * @param u64ChunkAddr The address of the chunk to add to / remove from the
481 * balloon.
482 * @param fInflate Inflate if true, deflate if false.
483 */
484static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
485 uint64_t u64ChunkAddr, bool fInflate)
486{
487 VMMDevChangeMemBalloon *pReq;
488 int rc = VINF_SUCCESS;
489 uint32_t i;
490 PRTR0MEMOBJ pMemObj = NULL;
491
492 if (fInflate)
493 {
494 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
495 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
496 {
497 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
498 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
499 return VERR_INVALID_PARAMETER;
500 }
501
502 if (!pDevExt->MemBalloon.paMemObj)
503 {
504 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
505 if (!pDevExt->MemBalloon.paMemObj)
506 {
507 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
508 return VERR_NO_MEMORY;
509 }
510 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
511 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
512 }
513 }
514 else
515 {
516 if (pDevExt->MemBalloon.cChunks == 0)
517 {
518 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
519 return VERR_INVALID_PARAMETER;
520 }
521 }
522
523 /*
524 * Enumerate all memory objects and check if the object is already registered.
525 */
526 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
527 {
528 if ( fInflate
529 && !pMemObj
530 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
531 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
532 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
533 {
534 if (fInflate)
535 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
536 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
537 break;
538 }
539 }
540 if (!pMemObj)
541 {
542 if (fInflate)
543 {
544 /* no free object pointer found -- should not happen */
545 return VERR_NO_MEMORY;
546 }
547
548 /* cannot free this memory as it wasn't provided before */
549 return VERR_NOT_FOUND;
550 }
551
552 /*
553 * Try inflate / defalte the balloon as requested.
554 */
555 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
556 if (RT_FAILURE(rc))
557 return rc;
558
559 if (fInflate)
560 {
561 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
562 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
563 if (RT_SUCCESS(rc))
564 {
565 rc = vboxGuestBalloonInflate(pMemObj, pReq);
566 if (RT_SUCCESS(rc))
567 pDevExt->MemBalloon.cChunks++;
568 else
569 {
570 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
571 RTR0MemObjFree(*pMemObj, true);
572 *pMemObj = NIL_RTR0MEMOBJ;
573 }
574 }
575 }
576 else
577 {
578 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
579 if (RT_SUCCESS(rc))
580 pDevExt->MemBalloon.cChunks--;
581 else
582 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
583 }
584
585 VbglGRFree(&pReq->header);
586 return rc;
587}
588
589
590/**
591 * Cleanup the memory balloon of a session.
592 *
593 * Will request the balloon mutex, so it must be valid and the caller must not
594 * own it already.
595 *
596 * @param pDevExt The device extension.
597 * @param pDevExt The session. Can be NULL at unload.
598 */
599static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
600{
601 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
602 if ( pDevExt->MemBalloon.pOwner == pSession
603 || pSession == NULL /*unload*/)
604 {
605 if (pDevExt->MemBalloon.paMemObj)
606 {
607 VMMDevChangeMemBalloon *pReq;
608 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
609 if (RT_SUCCESS(rc))
610 {
611 uint32_t i;
612 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
613 {
614 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
615 if (RT_FAILURE(rc))
616 {
617 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
618 rc, pDevExt->MemBalloon.cChunks));
619 break;
620 }
621 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
622 pDevExt->MemBalloon.cChunks--;
623 }
624 VbglGRFree(&pReq->header);
625 }
626 else
627 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
628 rc, pDevExt->MemBalloon.cChunks));
629 RTMemFree(pDevExt->MemBalloon.paMemObj);
630 pDevExt->MemBalloon.paMemObj = NULL;
631 }
632
633 pDevExt->MemBalloon.pOwner = NULL;
634 }
635 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
636}
637
638
639/**
640 * Initializes the VBoxGuest device extension when the
641 * device driver is loaded.
642 *
643 * The native code locates the VMMDev on the PCI bus and retrieve
644 * the MMIO and I/O port ranges, this function will take care of
645 * mapping the MMIO memory (if present). Upon successful return
646 * the native code should set up the interrupt handler.
647 *
648 * @returns VBox status code.
649 *
650 * @param pDevExt The device extension. Allocated by the native code.
651 * @param IOPortBase The base of the I/O port range.
652 * @param pvMMIOBase The base of the MMIO memory mapping.
653 * This is optional, pass NULL if not present.
654 * @param cbMMIO The size of the MMIO memory mapping.
655 * This is optional, pass 0 if not present.
656 * @param enmOSType The guest OS type to report to the VMMDev.
657 * @param fFixedEvents Events that will be enabled upon init and no client
658 * will ever be allowed to mask.
659 */
660int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
661 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
662{
663 int rc, rc2;
664
665 /*
666 * Adjust fFixedEvents.
667 */
668#ifdef VBOX_WITH_HGCM
669 fFixedEvents |= VMMDEV_EVENT_HGCM;
670#endif
671
672 /*
673 * Initalize the data.
674 */
675 pDevExt->IOPortBase = IOPortBase;
676 pDevExt->pVMMDevMemory = NULL;
677 pDevExt->fFixedEvents = fFixedEvents;
678 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
679 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
680 pDevExt->pIrqAckEvents = NULL;
681 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
682 pDevExt->WaitList.pHead = NULL;
683 pDevExt->WaitList.pTail = NULL;
684#ifdef VBOX_WITH_HGCM
685 pDevExt->HGCMWaitList.pHead = NULL;
686 pDevExt->HGCMWaitList.pTail = NULL;
687#endif
688 pDevExt->FreeList.pHead = NULL;
689 pDevExt->FreeList.pTail = NULL;
690 pDevExt->f32PendingEvents = 0;
691 pDevExt->u32MousePosChangedSeq = 0;
692 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
693 pDevExt->u32ClipboardClientId = 0;
694 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
695 pDevExt->MemBalloon.cChunks = 0;
696 pDevExt->MemBalloon.cMaxChunks = 0;
697 pDevExt->MemBalloon.fUseKernelAPI = true;
698 pDevExt->MemBalloon.paMemObj = NULL;
699 pDevExt->MemBalloon.pOwner = NULL;
700
701 /*
702 * If there is an MMIO region validate the version and size.
703 */
704 if (pvMMIOBase)
705 {
706 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
707 Assert(cbMMIO);
708 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
709 && pVMMDev->u32Size >= 32
710 && pVMMDev->u32Size <= cbMMIO)
711 {
712 pDevExt->pVMMDevMemory = pVMMDev;
713 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
714 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
715 }
716 else /* try live without it. */
717 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
718 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
719 }
720
721 /*
722 * Create the wait and session spinlocks as well as the ballooning mutex.
723 */
724 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
725 if (RT_SUCCESS(rc))
726 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
727 if (RT_FAILURE(rc))
728 {
729 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
730 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
731 RTSpinlockDestroy(pDevExt->EventSpinlock);
732 return rc;
733 }
734
735 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
736 if (RT_FAILURE(rc))
737 {
738 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
739 RTSpinlockDestroy(pDevExt->SessionSpinlock);
740 RTSpinlockDestroy(pDevExt->EventSpinlock);
741 return rc;
742 }
743
744 /*
745 * Initialize the guest library and report the guest info back to VMMDev,
746 * set the interrupt control filter mask, and fixate the guest mappings
747 * made by the VMM.
748 */
749 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
750 if (RT_SUCCESS(rc))
751 {
752 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
753 if (RT_SUCCESS(rc))
754 {
755 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
756 Assert(pDevExt->PhysIrqAckEvents != 0);
757
758 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
759 if (RT_SUCCESS(rc))
760 {
761 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
762 if (RT_SUCCESS(rc))
763 {
764 /*
765 * Disable guest graphics capability by default. The guest specific
766 * graphics driver will re-enable this when it is necessary.
767 */
768 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
769 if (RT_SUCCESS(rc))
770 {
771 vboxGuestInitFixateGuestMappings(pDevExt);
772 Log(("VBoxGuestInitDevExt: returns success\n"));
773 return VINF_SUCCESS;
774 }
775
776 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
777 }
778 else
779 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
780 }
781 else
782 LogRel(("VBoxGuestInitDevExt: vboxGuestInitReportGuestInfo failed, rc=%Rrc\n", rc));
783
784 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
785 }
786 else
787 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
788
789 VbglTerminate();
790 }
791 else
792 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
793
794 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
795 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
796 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
797 return rc; /* (failed) */
798}
799
800
801/**
802 * Deletes all the items in a wait chain.
803 * @param pWait The head of the chain.
804 */
805static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
806{
807 while (pList->pHead)
808 {
809 int rc2;
810 PVBOXGUESTWAIT pWait = pList->pHead;
811 pList->pHead = pWait->pNext;
812
813 pWait->pNext = NULL;
814 pWait->pPrev = NULL;
815 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
816 pWait->Event = NIL_RTSEMEVENTMULTI;
817 pWait->pSession = NULL;
818 RTMemFree(pWait);
819 }
820 pList->pHead = NULL;
821 pList->pTail = NULL;
822}
823
824
825/**
826 * Destroys the VBoxGuest device extension.
827 *
828 * The native code should call this before the driver is loaded,
829 * but don't call this on shutdown.
830 *
831 * @param pDevExt The device extension.
832 */
833void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
834{
835 int rc2;
836 Log(("VBoxGuestDeleteDevExt:\n"));
837 Log(("VBoxGuest: The additions driver is terminating.\n"));
838
839 /*
840 * Clean up the bits that involves the host first.
841 */
842 vboxGuestTermUnfixGuestMappings(pDevExt);
843 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
844 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
845 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
846
847 /*
848 * Cleanup all the other resources.
849 */
850 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
851 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
852 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
853
854 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
855#ifdef VBOX_WITH_HGCM
856 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
857#endif
858 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
859
860 VbglTerminate();
861
862 pDevExt->pVMMDevMemory = NULL;
863
864 pDevExt->IOPortBase = 0;
865 pDevExt->pIrqAckEvents = NULL;
866}
867
868
869/**
870 * Creates a VBoxGuest user session.
871 *
872 * The native code calls this when a ring-3 client opens the device.
873 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
874 *
875 * @returns VBox status code.
876 * @param pDevExt The device extension.
877 * @param ppSession Where to store the session on success.
878 */
879int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
880{
881 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
882 if (RT_UNLIKELY(!pSession))
883 {
884 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
885 return VERR_NO_MEMORY;
886 }
887
888 pSession->Process = RTProcSelf();
889 pSession->R0Process = RTR0ProcHandleSelf();
890 pSession->pDevExt = pDevExt;
891
892 *ppSession = pSession;
893 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
894 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Creates a VBoxGuest kernel session.
901 *
902 * The native code calls this when a ring-0 client connects to the device.
903 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
904 *
905 * @returns VBox status code.
906 * @param pDevExt The device extension.
907 * @param ppSession Where to store the session on success.
908 */
909int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
910{
911 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
912 if (RT_UNLIKELY(!pSession))
913 {
914 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
915 return VERR_NO_MEMORY;
916 }
917
918 pSession->Process = NIL_RTPROCESS;
919 pSession->R0Process = NIL_RTR0PROCESS;
920 pSession->pDevExt = pDevExt;
921
922 *ppSession = pSession;
923 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
924 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
925 return VINF_SUCCESS;
926}
927
928
929
930/**
931 * Closes a VBoxGuest session.
932 *
933 * @param pDevExt The device extension.
934 * @param pSession The session to close (and free).
935 */
936void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
937{
938 unsigned i; NOREF(i);
939 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
940 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
941
942#ifdef VBOX_WITH_HGCM
943 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
944 if (pSession->aHGCMClientIds[i])
945 {
946 VBoxGuestHGCMDisconnectInfo Info;
947 Info.result = 0;
948 Info.u32ClientID = pSession->aHGCMClientIds[i];
949 pSession->aHGCMClientIds[i] = 0;
950 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
951 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
952 }
953#endif
954
955 pSession->pDevExt = NULL;
956 pSession->Process = NIL_RTPROCESS;
957 pSession->R0Process = NIL_RTR0PROCESS;
958 vboxGuestCloseMemBalloon(pDevExt, pSession);
959 RTMemFree(pSession);
960}
961
962
963/**
964 * Links the wait-for-event entry into the tail of the given list.
965 *
966 * @param pList The list to link it into.
967 * @param pWait The wait for event entry to append.
968 */
969DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
970{
971 const PVBOXGUESTWAIT pTail = pList->pTail;
972 pWait->pNext = NULL;
973 pWait->pPrev = pTail;
974 if (pTail)
975 pTail->pNext = pWait;
976 else
977 pList->pHead = pWait;
978 pList->pTail = pWait;
979}
980
981
982/**
983 * Unlinks the wait-for-event entry.
984 *
985 * @param pList The list to unlink it from.
986 * @param pWait The wait for event entry to unlink.
987 */
988DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
989{
990 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
991 const PVBOXGUESTWAIT pNext = pWait->pNext;
992 if (pNext)
993 pNext->pPrev = pPrev;
994 else
995 pList->pTail = pPrev;
996 if (pPrev)
997 pPrev->pNext = pNext;
998 else
999 pList->pHead = pNext;
1000}
1001
1002
1003/**
1004 * Allocates a wiat-for-event entry.
1005 *
1006 * @returns The wait-for-event entry.
1007 * @param pDevExt The device extension.
1008 * @param pSession The session that's allocating this. Can be NULL.
1009 */
1010static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1011{
1012 /*
1013 * Allocate it one way or the other.
1014 */
1015 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
1016 if (pWait)
1017 {
1018 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1019 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1020
1021 pWait = pDevExt->FreeList.pTail;
1022 if (pWait)
1023 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1024
1025 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1026 }
1027 if (!pWait)
1028 {
1029 static unsigned s_cErrors = 0;
1030 int rc;
1031
1032 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1033 if (!pWait)
1034 {
1035 if (s_cErrors++ < 32)
1036 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1037 return NULL;
1038 }
1039
1040 rc = RTSemEventMultiCreate(&pWait->Event);
1041 if (RT_FAILURE(rc))
1042 {
1043 if (s_cErrors++ < 32)
1044 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1045 RTMemFree(pWait);
1046 return NULL;
1047 }
1048 }
1049
1050 /*
1051 * Zero members just as an precaution.
1052 */
1053 pWait->pNext = NULL;
1054 pWait->pPrev = NULL;
1055 pWait->fReqEvents = 0;
1056 pWait->fResEvents = 0;
1057 pWait->pSession = pSession;
1058#ifdef VBOX_WITH_HGCM
1059 pWait->pHGCMReq = NULL;
1060#endif
1061 RTSemEventMultiReset(pWait->Event);
1062 return pWait;
1063}
1064
1065
1066/**
1067 * Frees the wait-for-event entry.
1068 * The caller must own the wait spinlock!
1069 *
1070 * @param pDevExt The device extension.
1071 * @param pWait The wait-for-event entry to free.
1072 */
1073static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1074{
1075 pWait->fReqEvents = 0;
1076 pWait->fResEvents = 0;
1077#ifdef VBOX_WITH_HGCM
1078 pWait->pHGCMReq = NULL;
1079#endif
1080 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1081}
1082
1083
1084/**
1085 * Frees the wait-for-event entry.
1086 *
1087 * @param pDevExt The device extension.
1088 * @param pWait The wait-for-event entry to free.
1089 */
1090static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1091{
1092 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1093 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1094 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1095 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1096}
1097
1098
1099/**
1100 * Modifies the guest capabilities.
1101 *
1102 * Should be called during driver init and termination.
1103 *
1104 * @returns VBox status code.
1105 * @param fOr The Or mask (what to enable).
1106 * @param fNot The Not mask (what to disable).
1107 */
1108int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1109{
1110 VMMDevReqGuestCapabilities2 *pReq;
1111 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1112 if (RT_FAILURE(rc))
1113 {
1114 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1115 sizeof(*pReq), sizeof(*pReq), rc));
1116 return rc;
1117 }
1118
1119 pReq->u32OrMask = fOr;
1120 pReq->u32NotMask = fNot;
1121
1122 rc = VbglGRPerform(&pReq->header);
1123 if (RT_FAILURE(rc))
1124 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
1125 else if (RT_FAILURE(pReq->header.rc))
1126 {
1127 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1128 rc = pReq->header.rc;
1129 }
1130
1131 VbglGRFree(&pReq->header);
1132 return rc;
1133}
1134
1135
1136/**
1137 * Implements the fast (no input or output) type of IOCtls.
1138 *
1139 * This is currently just a placeholder stub inherited from the support driver code.
1140 *
1141 * @returns VBox status code.
1142 * @param iFunction The IOCtl function number.
1143 * @param pDevExt The device extension.
1144 * @param pSession The session.
1145 */
1146int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1147{
1148 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1149
1150 NOREF(iFunction);
1151 NOREF(pDevExt);
1152 NOREF(pSession);
1153 return VERR_NOT_SUPPORTED;
1154}
1155
1156
1157/**
1158 * Return the VMM device port.
1159 *
1160 * returns IPRT status code.
1161 * @param pDevExt The device extension.
1162 * @param pInfo The request info.
1163 * @param pcbDataReturned (out) contains the number of bytes to return.
1164 */
1165static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1166{
1167 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1168 pInfo->portAddress = pDevExt->IOPortBase;
1169 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1170 if (pcbDataReturned)
1171 *pcbDataReturned = sizeof(*pInfo);
1172 return VINF_SUCCESS;
1173}
1174
1175
1176/**
1177 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1178 * The caller enters the spinlock, we may or may not leave it.
1179 *
1180 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1181 */
1182DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1183 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1184{
1185 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1186 if (fMatches)
1187 {
1188 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1189 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1190
1191 pInfo->u32EventFlagsOut = fMatches;
1192 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1193 if (fReqEvents & ~((uint32_t)1 << iEvent))
1194 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1195 else
1196 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1197 return VINF_SUCCESS;
1198 }
1199 return VERR_TIMEOUT;
1200}
1201
1202
1203static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1204 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1205{
1206 pInfo->u32EventFlagsOut = 0;
1207 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1208 if (pcbDataReturned)
1209 *pcbDataReturned = sizeof(*pInfo);
1210
1211 /*
1212 * Copy and verify the input mask.
1213 */
1214 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1215 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1216 if (RT_UNLIKELY(iEvent < 0))
1217 {
1218 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1219 return VERR_INVALID_PARAMETER;
1220 }
1221
1222 /*
1223 * Check the condition up front, before doing the wait-for-event allocations.
1224 */
1225 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1226 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1227 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1228 if (rc == VINF_SUCCESS)
1229 return rc;
1230 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1231
1232 if (!pInfo->u32TimeoutIn)
1233 {
1234 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1235 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1236 return VERR_TIMEOUT;
1237 }
1238
1239 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1240 if (!pWait)
1241 return VERR_NO_MEMORY;
1242 pWait->fReqEvents = fReqEvents;
1243
1244 /*
1245 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1246 * If the wait condition is met, return.
1247 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1248 */
1249 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1250 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1251 if (rc == VINF_SUCCESS)
1252 {
1253 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1254 return rc;
1255 }
1256 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1257 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1258
1259 if (fInterruptible)
1260 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1261 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1262 else
1263 rc = RTSemEventMultiWait(pWait->Event,
1264 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1265
1266 /*
1267 * There is one special case here and that's when the semaphore is
1268 * destroyed upon device driver unload. This shouldn't happen of course,
1269 * but in case it does, just get out of here ASAP.
1270 */
1271 if (rc == VERR_SEM_DESTROYED)
1272 return rc;
1273
1274 /*
1275 * Unlink the wait item and dispose of it.
1276 */
1277 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1278 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1279 const uint32_t fResEvents = pWait->fResEvents;
1280 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1281 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1282
1283 /*
1284 * Now deal with the return code.
1285 */
1286 if ( fResEvents
1287 && fResEvents != UINT32_MAX)
1288 {
1289 pInfo->u32EventFlagsOut = fResEvents;
1290 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1291 if (fReqEvents & ~((uint32_t)1 << iEvent))
1292 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1293 else
1294 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1295 rc = VINF_SUCCESS;
1296 }
1297 else if ( fResEvents == UINT32_MAX
1298 || rc == VERR_INTERRUPTED)
1299 {
1300 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1301 rc = VERR_INTERRUPTED;
1302 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1303 }
1304 else if (rc == VERR_TIMEOUT)
1305 {
1306 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1307 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1308 }
1309 else
1310 {
1311 if (RT_SUCCESS(rc))
1312 {
1313 static unsigned s_cErrors = 0;
1314 if (s_cErrors++ < 32)
1315 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1316 rc = VERR_INTERNAL_ERROR;
1317 }
1318 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1319 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1320 }
1321
1322 return rc;
1323}
1324
1325
1326static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1327{
1328 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1329#if defined(RT_OS_SOLARIS)
1330 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1331#endif
1332 PVBOXGUESTWAIT pWait;
1333 int rc = 0;
1334
1335 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1336
1337 /*
1338 * Walk the event list and wake up anyone with a matching session.
1339 *
1340 * Note! On Solaris we have to do really ugly stuff here because
1341 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1342 * The hack is racy, but what we can we do... (Eliminate this
1343 * termination hack, perhaps?)
1344 */
1345#if defined(RT_OS_SOLARIS)
1346 RTThreadPreemptDisable(&State);
1347 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1348 do
1349 {
1350 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1351 if ( pWait->pSession == pSession
1352 && pWait->fResEvents != UINT32_MAX)
1353 {
1354 RTSEMEVENTMULTI hEvent = pWait->Event;
1355 pWait->fResEvents = UINT32_MAX;
1356 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1357 /* HACK ALRET! This races wakeup + reuse! */
1358 rc |= RTSemEventMultiSignal(hEvent);
1359 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1360 break;
1361 }
1362 } while (pWait);
1363 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1364 RTThreadPreemptDisable(&State);
1365#else
1366 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1367 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1368 if (pWait->pSession == pSession)
1369 {
1370 pWait->fResEvents = UINT32_MAX;
1371 rc |= RTSemEventMultiSignal(pWait->Event);
1372 }
1373 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1374#endif
1375 Assert(rc == 0);
1376
1377 return VINF_SUCCESS;
1378}
1379
1380
1381static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1382 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1383{
1384 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1385
1386 /*
1387 * Validate the header and request size.
1388 */
1389 const VMMDevRequestType enmType = pReqHdr->requestType;
1390 const uint32_t cbReq = pReqHdr->size;
1391 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1392 if (cbReq < cbMinSize)
1393 {
1394 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1395 cbReq, cbMinSize, enmType));
1396 return VERR_INVALID_PARAMETER;
1397 }
1398 if (cbReq > cbData)
1399 {
1400 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1401 cbData, cbReq, enmType));
1402 return VERR_INVALID_PARAMETER;
1403 }
1404 int rc = VbglGRVerify(pReqHdr, cbData);
1405 if (RT_FAILURE(rc))
1406 {
1407 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1408 cbData, cbReq, enmType, rc));
1409 return rc;
1410 }
1411
1412 /*
1413 * Make a copy of the request in the physical memory heap so
1414 * the VBoxGuestLibrary can more easily deal with the request.
1415 * (This is really a waste of time since the OS or the OS specific
1416 * code has already buffered or locked the input/output buffer, but
1417 * it does makes things a bit simpler wrt to phys address.)
1418 */
1419 VMMDevRequestHeader *pReqCopy;
1420 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1421 if (RT_FAILURE(rc))
1422 {
1423 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1424 cbReq, cbReq, rc));
1425 return rc;
1426 }
1427 memcpy(pReqCopy, pReqHdr, cbReq);
1428
1429 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1430 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1431
1432 rc = VbglGRPerform(pReqCopy);
1433 if ( RT_SUCCESS(rc)
1434 && RT_SUCCESS(pReqCopy->rc))
1435 {
1436 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1437 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1438
1439 memcpy(pReqHdr, pReqCopy, cbReq);
1440 if (pcbDataReturned)
1441 *pcbDataReturned = cbReq;
1442 }
1443 else if (RT_FAILURE(rc))
1444 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1445 else
1446 {
1447 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1448 rc = pReqCopy->rc;
1449 }
1450
1451 VbglGRFree(pReqCopy);
1452 return rc;
1453}
1454
1455
1456static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1457{
1458 VMMDevCtlGuestFilterMask *pReq;
1459 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1460 if (RT_FAILURE(rc))
1461 {
1462 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1463 sizeof(*pReq), sizeof(*pReq), rc));
1464 return rc;
1465 }
1466
1467 pReq->u32OrMask = pInfo->u32OrMask;
1468 pReq->u32NotMask = pInfo->u32NotMask;
1469 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1470 rc = VbglGRPerform(&pReq->header);
1471 if (RT_FAILURE(rc))
1472 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1473 else if (RT_FAILURE(pReq->header.rc))
1474 {
1475 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1476 rc = pReq->header.rc;
1477 }
1478
1479 VbglGRFree(&pReq->header);
1480 return rc;
1481}
1482
1483#ifdef VBOX_WITH_HGCM
1484
1485AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1486
1487/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1488static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1489 bool fInterruptible, uint32_t cMillies)
1490{
1491
1492 /*
1493 * Check to see if the condition was met by the time we got here.
1494 *
1495 * We create a simple poll loop here for dealing with out-of-memory
1496 * conditions since the caller isn't necessarily able to deal with
1497 * us returning too early.
1498 */
1499 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1500 PVBOXGUESTWAIT pWait;
1501 for (;;)
1502 {
1503 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1504 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1505 {
1506 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1507 return VINF_SUCCESS;
1508 }
1509 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1510
1511 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1512 if (pWait)
1513 break;
1514 if (fInterruptible)
1515 return VERR_INTERRUPTED;
1516 RTThreadSleep(1);
1517 }
1518 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1519 pWait->pHGCMReq = pHdr;
1520
1521 /*
1522 * Re-enter the spinlock and re-check for the condition.
1523 * If the condition is met, return.
1524 * Otherwise link us into the HGCM wait list and go to sleep.
1525 */
1526 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1527 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1528 {
1529 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1530 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1531 return VINF_SUCCESS;
1532 }
1533 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1534 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1535
1536 int rc;
1537 if (fInterruptible)
1538 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1539 else
1540 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1541 if (rc == VERR_SEM_DESTROYED)
1542 return rc;
1543
1544 /*
1545 * Unlink, free and return.
1546 */
1547 if ( RT_FAILURE(rc)
1548 && rc != VERR_TIMEOUT
1549 && ( !fInterruptible
1550 || rc != VERR_INTERRUPTED))
1551 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1552
1553 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1554 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1555 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1556 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1557 return rc;
1558}
1559
1560
1561/**
1562 * This is a callback for dealing with async waits.
1563 *
1564 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1565 */
1566static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1567{
1568 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1569 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1570 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1571 pDevExt,
1572 false /* fInterruptible */,
1573 u32User /* cMillies */);
1574}
1575
1576
1577/**
1578 * This is a callback for dealing with async waits with a timeout.
1579 *
1580 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1581 */
1582static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1583 void *pvUser, uint32_t u32User)
1584{
1585 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1586 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1587 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1588 pDevExt,
1589 true /* fInterruptible */,
1590 u32User /* cMillies */ );
1591
1592}
1593
1594
1595static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1596 size_t *pcbDataReturned)
1597{
1598 /*
1599 * The VbglHGCMConnect call will invoke the callback if the HGCM
1600 * call is performed in an ASYNC fashion. The function is not able
1601 * to deal with cancelled requests.
1602 */
1603 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1604 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1605 ? pInfo->Loc.u.host.achName : "<not local host>"));
1606
1607 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1608 if (RT_SUCCESS(rc))
1609 {
1610 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1611 pInfo->u32ClientID, pInfo->result, rc));
1612 if (RT_SUCCESS(pInfo->result))
1613 {
1614 /*
1615 * Append the client id to the client id table.
1616 * If the table has somehow become filled up, we'll disconnect the session.
1617 */
1618 unsigned i;
1619 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1620 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1621 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1622 if (!pSession->aHGCMClientIds[i])
1623 {
1624 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1625 break;
1626 }
1627 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1628 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1629 {
1630 static unsigned s_cErrors = 0;
1631 if (s_cErrors++ < 32)
1632 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1633
1634 VBoxGuestHGCMDisconnectInfo Info;
1635 Info.result = 0;
1636 Info.u32ClientID = pInfo->u32ClientID;
1637 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1638 return VERR_TOO_MANY_OPEN_FILES;
1639 }
1640 }
1641 if (pcbDataReturned)
1642 *pcbDataReturned = sizeof(*pInfo);
1643 }
1644 return rc;
1645}
1646
1647
1648static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1649 size_t *pcbDataReturned)
1650{
1651 /*
1652 * Validate the client id and invalidate its entry while we're in the call.
1653 */
1654 const uint32_t u32ClientId = pInfo->u32ClientID;
1655 unsigned i;
1656 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1657 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1658 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1659 if (pSession->aHGCMClientIds[i] == u32ClientId)
1660 {
1661 pSession->aHGCMClientIds[i] = UINT32_MAX;
1662 break;
1663 }
1664 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1665 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1666 {
1667 static unsigned s_cErrors = 0;
1668 if (s_cErrors++ > 32)
1669 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1670 return VERR_INVALID_HANDLE;
1671 }
1672
1673 /*
1674 * The VbglHGCMConnect call will invoke the callback if the HGCM
1675 * call is performed in an ASYNC fashion. The function is not able
1676 * to deal with cancelled requests.
1677 */
1678 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1679 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1680 if (RT_SUCCESS(rc))
1681 {
1682 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1683 if (pcbDataReturned)
1684 *pcbDataReturned = sizeof(*pInfo);
1685 }
1686
1687 /* Update the client id array according to the result. */
1688 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1689 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1690 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1691 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1692
1693 return rc;
1694}
1695
1696
1697static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1698 PVBOXGUESTSESSION pSession,
1699 VBoxGuestHGCMCallInfo *pInfo,
1700 uint32_t cMillies, bool fInterruptible, bool f32bit,
1701 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1702{
1703 /*
1704 * Some more validations.
1705 */
1706 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1707 {
1708 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1709 return VERR_INVALID_PARAMETER;
1710 }
1711 size_t cbActual = cbExtra + sizeof(*pInfo);
1712#ifdef RT_ARCH_AMD64
1713 if (f32bit)
1714 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1715 else
1716#endif
1717 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1718 if (cbData < cbActual)
1719 {
1720 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1721 cbData, cbActual));
1722 return VERR_INVALID_PARAMETER;
1723 }
1724
1725 /*
1726 * Validate the client id.
1727 */
1728 const uint32_t u32ClientId = pInfo->u32ClientID;
1729 unsigned i;
1730 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1731 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1732 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1733 if (pSession->aHGCMClientIds[i] == u32ClientId)
1734 break;
1735 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1736 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1737 {
1738 static unsigned s_cErrors = 0;
1739 if (s_cErrors++ > 32)
1740 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1741 return VERR_INVALID_HANDLE;
1742 }
1743
1744 /*
1745 * The VbglHGCMCall call will invoke the callback if the HGCM
1746 * call is performed in an ASYNC fashion. This function can
1747 * deal with cancelled requests, so we let user more requests
1748 * be interruptible (should add a flag for this later I guess).
1749 */
1750 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1751 int rc;
1752 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1753#ifdef RT_ARCH_AMD64
1754 if (f32bit)
1755 {
1756 if (fInterruptible)
1757 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1758 else
1759 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1760 }
1761 else
1762#endif
1763 {
1764 if (fInterruptible)
1765 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1766 else
1767 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1768 }
1769 if (RT_SUCCESS(rc))
1770 {
1771 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1772 if (pcbDataReturned)
1773 *pcbDataReturned = cbActual;
1774 }
1775 else
1776 {
1777 if ( rc != VERR_INTERRUPTED
1778 && rc != VERR_TIMEOUT)
1779 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1780 else
1781 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1782 }
1783 return rc;
1784}
1785
1786
1787/**
1788 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1789 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1790 *
1791 * @param pDevExt The device extension.
1792 * @param pu32ClientId The client id.
1793 * @param pcbDataReturned Where to store the amount of returned data. Can
1794 * be NULL.
1795 */
1796static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1797{
1798 int rc;
1799 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1800
1801
1802 /*
1803 * If there is an old client, try disconnect it first.
1804 */
1805 if (pDevExt->u32ClipboardClientId != 0)
1806 {
1807 VBoxGuestHGCMDisconnectInfo Info;
1808 Info.result = VERR_WRONG_ORDER;
1809 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1810 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1811 if (RT_SUCCESS(rc))
1812 {
1813 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1814 return rc;
1815 }
1816 if (RT_FAILURE((int32_t)Info.result))
1817 {
1818 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1819 return Info.result;
1820 }
1821 pDevExt->u32ClipboardClientId = 0;
1822 }
1823
1824 /*
1825 * Try connect.
1826 */
1827 VBoxGuestHGCMConnectInfo Info;
1828 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1829 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1830 Info.u32ClientID = 0;
1831 Info.result = VERR_WRONG_ORDER;
1832
1833 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1834 if (RT_FAILURE(rc))
1835 {
1836 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1837 return rc;
1838 }
1839 if (RT_FAILURE(Info.result))
1840 {
1841 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1842 return rc;
1843 }
1844
1845 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1846
1847 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1848 *pu32ClientId = Info.u32ClientID;
1849 if (pcbDataReturned)
1850 *pcbDataReturned = sizeof(uint32_t);
1851
1852 return VINF_SUCCESS;
1853}
1854
1855#endif /* VBOX_WITH_HGCM */
1856
1857/**
1858 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1859 *
1860 * Ask the host for the size of the balloon and try to set it accordingly. If
1861 * this approach fails because it's not supported, return with fHandleInR3 set
1862 * and let the user land supply memory we can lock via the other ioctl.
1863 *
1864 * @returns VBox status code.
1865 *
1866 * @param pDevExt The device extension.
1867 * @param pSession The session.
1868 * @param pInfo The output buffer.
1869 * @param pcbDataReturned Where to store the amount of returned data. Can
1870 * be NULL.
1871 */
1872static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1873 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1874{
1875 VMMDevGetMemBalloonChangeRequest *pReq;
1876 int rc;
1877
1878 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1879 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1880 AssertRCReturn(rc, rc);
1881
1882 /*
1883 * The first user trying to query/change the balloon becomes the
1884 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1885 */
1886 if ( pDevExt->MemBalloon.pOwner != pSession
1887 && pDevExt->MemBalloon.pOwner == NULL)
1888 pDevExt->MemBalloon.pOwner = pSession;
1889
1890 if (pDevExt->MemBalloon.pOwner == pSession)
1891 {
1892 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1893 if (RT_SUCCESS(rc))
1894 {
1895 /*
1896 * This is a response to that event. Setting this bit means that
1897 * we request the value from the host and change the guest memory
1898 * balloon according to this value.
1899 */
1900 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1901 rc = VbglGRPerform(&pReq->header);
1902 if (RT_SUCCESS(rc))
1903 {
1904 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1905 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1906
1907 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1908 pInfo->fHandleInR3 = false;
1909
1910 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1911 /* Ignore various out of memory failures. */
1912 if ( rc == VERR_NO_MEMORY
1913 || rc == VERR_NO_PHYS_MEMORY
1914 || rc == VERR_NO_CONT_MEMORY)
1915 rc = VINF_SUCCESS;
1916
1917 if (pcbDataReturned)
1918 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1919 }
1920 else
1921 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1922 VbglGRFree(&pReq->header);
1923 }
1924 }
1925 else
1926 rc = VERR_PERMISSION_DENIED;
1927
1928 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1929 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1930 return rc;
1931}
1932
1933
1934/**
1935 * Handle a request for changing the memory balloon.
1936 *
1937 * @returns VBox status code.
1938 *
1939 * @param pDevExt The device extention.
1940 * @param pSession The session.
1941 * @param pInfo The change request structure (input).
1942 * @param pcbDataReturned Where to store the amount of returned data. Can
1943 * be NULL.
1944 */
1945static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1946 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1947{
1948 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1949 AssertRCReturn(rc, rc);
1950
1951 if (!pDevExt->MemBalloon.fUseKernelAPI)
1952 {
1953 /*
1954 * The first user trying to query/change the balloon becomes the
1955 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1956 */
1957 if ( pDevExt->MemBalloon.pOwner != pSession
1958 && pDevExt->MemBalloon.pOwner == NULL)
1959 pDevExt->MemBalloon.pOwner = pSession;
1960
1961 if (pDevExt->MemBalloon.pOwner == pSession)
1962 {
1963 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1964 if (pcbDataReturned)
1965 *pcbDataReturned = 0;
1966 }
1967 else
1968 rc = VERR_PERMISSION_DENIED;
1969 }
1970 else
1971 rc = VERR_PERMISSION_DENIED;
1972
1973 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1974 return rc;
1975}
1976
1977
1978/**
1979 * Guest backdoor logging.
1980 *
1981 * @returns VBox status code.
1982 *
1983 * @param pch The log message (need not be NULL terminated).
1984 * @param cbData Size of the buffer.
1985 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1986 */
1987static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1988{
1989 NOREF(pch);
1990 NOREF(cbData);
1991 Log(("%.*s", cbData, pch));
1992 if (pcbDataReturned)
1993 *pcbDataReturned = 0;
1994 return VINF_SUCCESS;
1995}
1996
1997
1998/**
1999 * Common IOCtl for user to kernel and kernel to kernel communcation.
2000 *
2001 * This function only does the basic validation and then invokes
2002 * worker functions that takes care of each specific function.
2003 *
2004 * @returns VBox status code.
2005 *
2006 * @param iFunction The requested function.
2007 * @param pDevExt The device extension.
2008 * @param pSession The client session.
2009 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2010 * @param cbData The max size of the data buffer.
2011 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2012 */
2013int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2014 void *pvData, size_t cbData, size_t *pcbDataReturned)
2015{
2016 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2017 iFunction, pDevExt, pSession, pvData, cbData));
2018
2019 /*
2020 * Make sure the returned data size is set to zero.
2021 */
2022 if (pcbDataReturned)
2023 *pcbDataReturned = 0;
2024
2025 /*
2026 * Define some helper macros to simplify validation.
2027 */
2028#define CHECKRET_RING0(mnemonic) \
2029 do { \
2030 if (pSession->R0Process != NIL_RTR0PROCESS) \
2031 { \
2032 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2033 pSession->Process, (uintptr_t)pSession->R0Process)); \
2034 return VERR_PERMISSION_DENIED; \
2035 } \
2036 } while (0)
2037#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2038 do { \
2039 if (cbData < (cbMin)) \
2040 { \
2041 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2042 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2043 return VERR_BUFFER_OVERFLOW; \
2044 } \
2045 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2046 { \
2047 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2048 return VERR_INVALID_POINTER; \
2049 } \
2050 } while (0)
2051
2052
2053 /*
2054 * Deal with variably sized requests first.
2055 */
2056 int rc = VINF_SUCCESS;
2057 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2058 {
2059 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2060 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2061 }
2062#ifdef VBOX_WITH_HGCM
2063 /*
2064 * These ones are a bit tricky.
2065 */
2066 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2067 {
2068 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2069 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2070 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2071 fInterruptible, false /*f32bit*/,
2072 0, cbData, pcbDataReturned);
2073 }
2074 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2075 {
2076 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2077 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2078 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2079 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2080 false /*f32bit*/,
2081 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2082 }
2083# ifdef RT_ARCH_AMD64
2084 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2085 {
2086 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2087 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2088 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2089 fInterruptible, true /*f32bit*/,
2090 0, cbData, pcbDataReturned);
2091 }
2092 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2093 {
2094 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2095 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2096 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2097 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2098 true /*f32bit*/,
2099 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2100 }
2101# endif
2102#endif /* VBOX_WITH_HGCM */
2103 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2104 {
2105 CHECKRET_MIN_SIZE("LOG", 1);
2106 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2107 }
2108 else
2109 {
2110 switch (iFunction)
2111 {
2112 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2113 CHECKRET_RING0("GETVMMDEVPORT");
2114 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2115 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2116 break;
2117
2118 case VBOXGUEST_IOCTL_WAITEVENT:
2119 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2120 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2121 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2122 break;
2123
2124 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2125 if (cbData != 0)
2126 rc = VERR_INVALID_PARAMETER;
2127 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2128 break;
2129
2130 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2131 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2132 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2133 break;
2134
2135#ifdef VBOX_WITH_HGCM
2136 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2137# ifdef RT_ARCH_AMD64
2138 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2139# endif
2140 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2141 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2142 break;
2143
2144 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2145# ifdef RT_ARCH_AMD64
2146 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2147# endif
2148 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2149 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2150 break;
2151
2152 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2153 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2154 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2155 break;
2156#endif /* VBOX_WITH_HGCM */
2157
2158 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2159 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2160 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2161 break;
2162
2163 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2164 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2165 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2166 break;
2167
2168 default:
2169 {
2170 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2171 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2172 rc = VERR_NOT_SUPPORTED;
2173 break;
2174 }
2175 }
2176 }
2177
2178 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2179 return rc;
2180}
2181
2182
2183
2184/**
2185 * Common interrupt service routine.
2186 *
2187 * This deals with events and with waking up thread waiting for those events.
2188 *
2189 * @returns true if it was our interrupt, false if it wasn't.
2190 * @param pDevExt The VBoxGuest device extension.
2191 */
2192bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2193{
2194 bool fMousePositionChanged = false;
2195 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2196 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2197 int rc = 0;
2198 bool fOurIrq;
2199
2200 /*
2201 * Make sure we've initalized the device extension.
2202 */
2203 if (RT_UNLIKELY(!pReq))
2204 return false;
2205
2206 /*
2207 * Enter the spinlock and check if it's our IRQ or not.
2208 *
2209 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2210 * so we're entering the spinlock without disabling them. This works
2211 * fine as long as we never called in a nested fashion.
2212 */
2213#if defined(RT_OS_SOLARIS)
2214 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2215#else
2216 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2217#endif
2218 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2219 if (fOurIrq)
2220 {
2221 /*
2222 * Acknowlegde events.
2223 * We don't use VbglGRPerform here as it may take another spinlocks.
2224 */
2225 pReq->header.rc = VERR_INTERNAL_ERROR;
2226 pReq->events = 0;
2227 ASMCompilerBarrier();
2228 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2229 ASMCompilerBarrier(); /* paranoia */
2230 if (RT_SUCCESS(pReq->header.rc))
2231 {
2232 uint32_t fEvents = pReq->events;
2233 PVBOXGUESTWAIT pWait;
2234
2235 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2236
2237 /*
2238 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2239 */
2240 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2241 {
2242 fMousePositionChanged = true;
2243 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2244 }
2245
2246#ifdef VBOX_WITH_HGCM
2247 /*
2248 * The HGCM event/list is kind of different in that we evaluate all entries.
2249 */
2250 if (fEvents & VMMDEV_EVENT_HGCM)
2251 {
2252 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2253 if ( !pWait->fResEvents
2254 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2255 {
2256 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2257 rc |= RTSemEventMultiSignal(pWait->Event);
2258 }
2259 fEvents &= ~VMMDEV_EVENT_HGCM;
2260 }
2261#endif
2262
2263 /*
2264 * Normal FIFO waiter evaluation.
2265 */
2266 fEvents |= pDevExt->f32PendingEvents;
2267 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2268 if ( (pWait->fReqEvents & fEvents)
2269 && !pWait->fResEvents)
2270 {
2271 pWait->fResEvents = pWait->fReqEvents & fEvents;
2272 fEvents &= ~pWait->fResEvents;
2273 rc |= RTSemEventMultiSignal(pWait->Event);
2274 if (!fEvents)
2275 break;
2276 }
2277 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2278 }
2279 else /* something is serious wrong... */
2280 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2281 pReq->header.rc, pReq->events));
2282 }
2283 else
2284 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2285
2286 /*
2287 * Work the poll and async notification queues on OSes that implements that.
2288 * Do this outside the spinlock to prevent some recursive spinlocking.
2289 */
2290#if defined(RT_OS_SOLARIS)
2291 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2292#else
2293 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2294#endif
2295
2296 if (fMousePositionChanged)
2297 {
2298 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2299 VBoxGuestNativeISRMousePollEvent(pDevExt);
2300 }
2301
2302 Assert(rc == 0);
2303 return fOurIrq;
2304}
2305
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette