VirtualBox

source: vbox/trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp@ 79812

Last change on this file since 79812 was 77243, checked in by vboxsync, 6 years ago

HGCM,SharedFolders: Added new variation on the HGCM page list type that does not use a bounce buffer. bugref:9172

  • Added VMMDevHGCMParmType_NoBouncePageList.
  • Made VMMDevHGCMParmType_Embedded
  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 101.8 KB
Line 
1/* $Id: VMMDevHGCM.cpp 77243 2019-02-10 22:44:00Z vboxsync $ */
2/** @file
3 * VMMDev - HGCM - Host-Guest Communication Manager Device.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VMM
23#include <iprt/alloc.h>
24#include <iprt/asm.h>
25#include <iprt/assert.h>
26#include <iprt/param.h>
27#include <iprt/string.h>
28
29#include <VBox/AssertGuest.h>
30#include <VBox/err.h>
31#include <VBox/hgcmsvc.h>
32#include <VBox/log.h>
33
34#include "VMMDevHGCM.h"
35
36#ifdef DEBUG
37# define VBOX_STRICT_GUEST
38#endif
39
40#ifdef VBOX_WITH_DTRACE
41# include "dtrace/VBoxDD.h"
42#else
43# define VBOXDD_HGCMCALL_ENTER(a,b,c,d) do { } while (0)
44# define VBOXDD_HGCMCALL_COMPLETED_REQ(a,b) do { } while (0)
45# define VBOXDD_HGCMCALL_COMPLETED_EMT(a,b) do { } while (0)
46# define VBOXDD_HGCMCALL_COMPLETED_DONE(a,b,c,d) do { } while (0)
47#endif
48
49
50/*********************************************************************************************************************************
51* Structures and Typedefs *
52*********************************************************************************************************************************/
53typedef enum VBOXHGCMCMDTYPE
54{
55 VBOXHGCMCMDTYPE_LOADSTATE = 0,
56 VBOXHGCMCMDTYPE_CONNECT,
57 VBOXHGCMCMDTYPE_DISCONNECT,
58 VBOXHGCMCMDTYPE_CALL,
59 VBOXHGCMCMDTYPE_SizeHack = 0x7fffffff
60} VBOXHGCMCMDTYPE;
61
62/**
63 * Information about a 32 or 64 bit parameter.
64 */
65typedef struct VBOXHGCMPARMVAL
66{
67 /** Actual value. Both 32 and 64 bit is saved here. */
68 uint64_t u64Value;
69
70 /** Offset from the start of the request where the value is stored. */
71 uint32_t offValue;
72
73 /** Size of the value: 4 for 32 bit and 8 for 64 bit. */
74 uint32_t cbValue;
75
76} VBOXHGCMPARMVAL;
77
78/**
79 * Information about a pointer parameter.
80 */
81typedef struct VBOXHGCMPARMPTR
82{
83 /** Size of the buffer described by the pointer parameter. */
84 uint32_t cbData;
85
86/** @todo save 8 bytes here by putting offFirstPage, cPages, and f32Direction
87 * into a bitfields like in VBOXHGCMPARMPAGES. */
88 /** Offset in the first physical page of the region. */
89 uint32_t offFirstPage;
90
91 /** How many pages. */
92 uint32_t cPages;
93
94 /** How the buffer should be copied VBOX_HGCM_F_PARM_*. */
95 uint32_t fu32Direction;
96
97 /** Pointer to array of the GC physical addresses for these pages.
98 * It is assumed that the physical address of the locked resident guest page
99 * does not change. */
100 RTGCPHYS *paPages;
101
102 /** For single page requests. */
103 RTGCPHYS GCPhysSinglePage;
104
105} VBOXHGCMPARMPTR;
106
107
108/**
109 * Pages w/o bounce buffering.
110 */
111typedef struct VBOXHGCMPARMPAGES
112{
113 /** The buffer size. */
114 uint32_t cbData;
115 /** Start of buffer offset into the first page. */
116 uint32_t offFirstPage : 12;
117 /** VBOX_HGCM_F_PARM_XXX flags. */
118 uint32_t fFlags : 3;
119 /** Set if we've locked all the pages. */
120 uint32_t fLocked : 1;
121 /** Number of pages. */
122 uint32_t cPages : 16;
123 /**< Array of page locks followed by array of page pointers, the first page
124 * pointer is adjusted by offFirstPage. */
125 PPGMPAGEMAPLOCK paPgLocks;
126} VBOXHGCMPARMPAGES;
127
128/**
129 * Information about a guest HGCM parameter.
130 */
131typedef struct VBOXHGCMGUESTPARM
132{
133 /** The parameter type. */
134 HGCMFunctionParameterType enmType;
135
136 union
137 {
138 VBOXHGCMPARMVAL val;
139 VBOXHGCMPARMPTR ptr;
140 VBOXHGCMPARMPAGES Pages;
141 } u;
142
143} VBOXHGCMGUESTPARM;
144
145typedef struct VBOXHGCMCMD
146{
147 /** Active commands, list is protected by critsectHGCMCmdList. */
148 RTLISTNODE node;
149
150 /** The type of the command (VBOXHGCMCMDTYPE). */
151 uint8_t enmCmdType;
152
153 /** Whether the command was cancelled by the guest. */
154 bool fCancelled;
155
156 /** Set if allocated from the memory cache, clear if heap. */
157 bool fMemCache;
158
159 /** Whether the command was restored from saved state. */
160 bool fRestored : 1;
161 /** Whether this command has a no-bounce page list and needs to be restored
162 * from guest memory the old fashioned way. */
163 bool fRestoreFromGuestMem : 1;
164
165 /** Copy of VMMDevRequestHeader::fRequestor.
166 * @note Only valid if VBOXGSTINFO2_F_REQUESTOR_INFO is set in
167 * VMMDevState.guestInfo2.fFeatures. */
168 uint32_t fRequestor;
169
170 /** GC physical address of the guest request. */
171 RTGCPHYS GCPhys;
172
173 /** Request packet size. */
174 uint32_t cbRequest;
175
176 /** The type of the guest request. */
177 VMMDevRequestType enmRequestType;
178
179 /** Pointer to the locked request, NULL if not locked. */
180 void *pvReqLocked;
181 /** The PGM lock for GCPhys if pvReqLocked is not NULL. */
182 PGMPAGEMAPLOCK ReqMapLock;
183
184 /** The STAM_GET_TS() value when the request arrived. */
185 uint64_t tsArrival;
186 /** The STAM_GET_TS() value when the hgcmCompleted() is called. */
187 uint64_t tsComplete;
188
189 union
190 {
191 struct
192 {
193 uint32_t u32ClientID;
194 HGCMServiceLocation *pLoc; /**< Allocated after this structure. */
195 } connect;
196
197 struct
198 {
199 uint32_t u32ClientID;
200 } disconnect;
201
202 struct
203 {
204 /* Number of elements in paGuestParms and paHostParms arrays. */
205 uint32_t cParms;
206
207 uint32_t u32ClientID;
208
209 uint32_t u32Function;
210
211 /** Pointer to information about guest parameters in case of a Call request.
212 * Follows this structure in the same memory block.
213 */
214 VBOXHGCMGUESTPARM *paGuestParms;
215
216 /** Pointer to converted host parameters in case of a Call request.
217 * Follows this structure in the same memory block.
218 */
219 VBOXHGCMSVCPARM *paHostParms;
220
221 /* VBOXHGCMGUESTPARM[] */
222 /* VBOXHGCMSVCPARM[] */
223 } call;
224 } u;
225} VBOXHGCMCMD;
226
227
228/**
229 * Version for the memory cache.
230 */
231typedef struct VBOXHGCMCMDCACHED
232{
233 VBOXHGCMCMD Core; /**< 112 */
234 VBOXHGCMGUESTPARM aGuestParms[6]; /**< 40 * 6 = 240 */
235 VBOXHGCMSVCPARM aHostParms[6]; /**< 24 * 6 = 144 */
236} VBOXHGCMCMDCACHED; /**< 112+240+144 = 496 */
237AssertCompile(sizeof(VBOXHGCMCMD) <= 112);
238AssertCompile(sizeof(VBOXHGCMGUESTPARM) <= 40);
239AssertCompile(sizeof(VBOXHGCMSVCPARM) <= 24);
240AssertCompile(sizeof(VBOXHGCMCMDCACHED) <= 512);
241AssertCompile(sizeof(VBOXHGCMCMDCACHED) > sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
242
243
244static int vmmdevHGCMCmdListLock(PVMMDEV pThis)
245{
246 int rc = RTCritSectEnter(&pThis->critsectHGCMCmdList);
247 AssertRC(rc);
248 return rc;
249}
250
251static void vmmdevHGCMCmdListUnlock(PVMMDEV pThis)
252{
253 int rc = RTCritSectLeave(&pThis->critsectHGCMCmdList);
254 AssertRC(rc);
255}
256
257/** Allocate and initialize VBOXHGCMCMD structure for HGCM request.
258 *
259 * @returns Pointer to the command on success, NULL otherwise.
260 * @param pThis The VMMDev instance data.
261 * @param enmCmdType Type of the command.
262 * @param GCPhys The guest physical address of the HGCM request.
263 * @param cbRequest The size of the HGCM request.
264 * @param cParms Number of HGCM parameters for VBOXHGCMCMDTYPE_CALL command.
265 * @param fRequestor The VMMDevRequestHeader::fRequestor value.
266 */
267static PVBOXHGCMCMD vmmdevHGCMCmdAlloc(PVMMDEV pThis, VBOXHGCMCMDTYPE enmCmdType, RTGCPHYS GCPhys,
268 uint32_t cbRequest, uint32_t cParms, uint32_t fRequestor)
269{
270#if 1
271 /*
272 * Try use the cache.
273 */
274 VBOXHGCMCMDCACHED *pCmdCached;
275 AssertCompile(sizeof(*pCmdCached) >= sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
276 if (cParms <= RT_ELEMENTS(pCmdCached->aGuestParms))
277 {
278 int rc = RTMemCacheAllocEx(pThis->hHgcmCmdCache, (void **)&pCmdCached);
279 if (RT_SUCCESS(rc))
280 {
281 RT_ZERO(*pCmdCached);
282 pCmdCached->Core.fMemCache = true;
283 pCmdCached->Core.GCPhys = GCPhys;
284 pCmdCached->Core.cbRequest = cbRequest;
285 pCmdCached->Core.enmCmdType = enmCmdType;
286 pCmdCached->Core.fRequestor = fRequestor;
287 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
288 {
289 pCmdCached->Core.u.call.cParms = cParms;
290 pCmdCached->Core.u.call.paGuestParms = pCmdCached->aGuestParms;
291 pCmdCached->Core.u.call.paHostParms = pCmdCached->aHostParms;
292 }
293 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
294 pCmdCached->Core.u.connect.pLoc = (HGCMServiceLocation *)(&pCmdCached->Core + 1);
295
296 return &pCmdCached->Core;
297 }
298 return NULL;
299 }
300 STAM_REL_COUNTER_INC(&pThis->StatHgcmLargeCmdAllocs);
301
302#else
303 RT_NOREF(pThis);
304#endif
305
306 /* Size of required memory buffer. */
307 const uint32_t cbCmd = sizeof(VBOXHGCMCMD) + cParms * (sizeof(VBOXHGCMGUESTPARM) + sizeof(VBOXHGCMSVCPARM))
308 + (enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? sizeof(HGCMServiceLocation) : 0);
309
310 PVBOXHGCMCMD pCmd = (PVBOXHGCMCMD)RTMemAllocZ(cbCmd);
311 if (pCmd)
312 {
313 pCmd->enmCmdType = enmCmdType;
314 pCmd->GCPhys = GCPhys;
315 pCmd->cbRequest = cbRequest;
316 pCmd->fRequestor = fRequestor;
317
318 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
319 {
320 pCmd->u.call.cParms = cParms;
321 if (cParms)
322 {
323 pCmd->u.call.paGuestParms = (VBOXHGCMGUESTPARM *)((uint8_t *)pCmd
324 + sizeof(struct VBOXHGCMCMD));
325 pCmd->u.call.paHostParms = (VBOXHGCMSVCPARM *)((uint8_t *)pCmd->u.call.paGuestParms
326 + cParms * sizeof(VBOXHGCMGUESTPARM));
327 }
328 }
329 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
330 pCmd->u.connect.pLoc = (HGCMServiceLocation *)(pCmd + 1);
331 }
332 return pCmd;
333}
334
335/** Deallocate VBOXHGCMCMD memory.
336 *
337 * @param pThis The VMMDev instance data.
338 * @param pCmd Command to deallocate.
339 */
340static void vmmdevHGCMCmdFree(PVMMDEV pThis, PVBOXHGCMCMD pCmd)
341{
342 if (pCmd)
343 {
344 if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
345 {
346 uint32_t i;
347 for (i = 0; i < pCmd->u.call.cParms; ++i)
348 {
349 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
350 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
351
352 if (pHostParm->type == VBOX_HGCM_SVC_PARM_PTR)
353 RTMemFree(pHostParm->u.pointer.addr);
354
355 if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
356 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
357 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
358 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
359 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
360 {
361 if (pGuestParm->u.ptr.paPages != &pGuestParm->u.ptr.GCPhysSinglePage)
362 RTMemFree(pGuestParm->u.ptr.paPages);
363 }
364 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
365 {
366 if (pGuestParm->u.Pages.paPgLocks)
367 {
368 if (pGuestParm->u.Pages.fLocked)
369 PDMDevHlpPhysBulkReleasePageMappingLocks(pThis->pDevInsR3, pGuestParm->u.Pages.cPages,
370 pGuestParm->u.Pages.paPgLocks);
371 RTMemFree(pGuestParm->u.Pages.paPgLocks);
372 pGuestParm->u.Pages.paPgLocks = NULL;
373 }
374 }
375 }
376 }
377
378 if (pCmd->pvReqLocked)
379 {
380 PDMDevHlpPhysReleasePageMappingLock(pThis->pDevInsR3, &pCmd->ReqMapLock);
381 pCmd->pvReqLocked = NULL;
382 }
383
384#if 1
385 if (pCmd->fMemCache)
386 RTMemCacheFree(pThis->hHgcmCmdCache, pCmd);
387 else
388#endif
389 RTMemFree(pCmd);
390 }
391}
392
393/** Add VBOXHGCMCMD to the list of pending commands.
394 *
395 * @returns VBox status code.
396 * @param pThis The VMMDev instance data.
397 * @param pCmd Command to add.
398 */
399static int vmmdevHGCMAddCommand(PVMMDEV pThis, PVBOXHGCMCMD pCmd)
400{
401 int rc = vmmdevHGCMCmdListLock(pThis);
402 AssertRCReturn(rc, rc);
403
404 LogFlowFunc(("%p type %d\n", pCmd, pCmd->enmCmdType));
405
406 RTListPrepend(&pThis->listHGCMCmd, &pCmd->node);
407
408 /* Automatically enable HGCM events, if there are HGCM commands. */
409 if ( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
410 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
411 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
412 {
413 LogFunc(("u32HGCMEnabled = %d\n", pThis->u32HGCMEnabled));
414 if (ASMAtomicCmpXchgU32(&pThis->u32HGCMEnabled, 1, 0))
415 VMMDevCtlSetGuestFilterMask(pThis, VMMDEV_EVENT_HGCM, 0);
416 }
417
418 vmmdevHGCMCmdListUnlock(pThis);
419 return rc;
420}
421
422/** Remove VBOXHGCMCMD from the list of pending commands.
423 *
424 * @returns VBox status code.
425 * @param pThis The VMMDev instance data.
426 * @param pCmd Command to remove.
427 */
428static int vmmdevHGCMRemoveCommand(PVMMDEV pThis, PVBOXHGCMCMD pCmd)
429{
430 int rc = vmmdevHGCMCmdListLock(pThis);
431 AssertRCReturn(rc, rc);
432
433 LogFlowFunc(("%p\n", pCmd));
434
435 RTListNodeRemove(&pCmd->node);
436
437 vmmdevHGCMCmdListUnlock(pThis);
438 return rc;
439}
440
441/**
442 * Find a HGCM command by its physical address.
443 *
444 * The caller is responsible for taking the command list lock before calling
445 * this function.
446 *
447 * @returns Pointer to the command on success, NULL otherwise.
448 * @param pThis The VMMDev instance data.
449 * @param GCPhys The physical address of the command we're looking for.
450 */
451DECLINLINE(PVBOXHGCMCMD) vmmdevHGCMFindCommandLocked(PVMMDEV pThis, RTGCPHYS GCPhys)
452{
453 PVBOXHGCMCMD pCmd;
454 RTListForEach(&pThis->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
455 {
456 if (pCmd->GCPhys == GCPhys)
457 return pCmd;
458 }
459 return NULL;
460}
461
462/** Copy VMMDevHGCMConnect request data from the guest to VBOXHGCMCMD command.
463 *
464 * @param pHGCMConnect The source guest request (cached in host memory).
465 * @param pCmd Destination command.
466 */
467static void vmmdevHGCMConnectFetch(const VMMDevHGCMConnect *pHGCMConnect, PVBOXHGCMCMD pCmd)
468{
469 pCmd->enmRequestType = pHGCMConnect->header.header.requestType;
470 pCmd->u.connect.u32ClientID = pHGCMConnect->u32ClientID;
471 *pCmd->u.connect.pLoc = pHGCMConnect->loc;
472}
473
474/** Handle VMMDevHGCMConnect request.
475 *
476 * @param pThis The VMMDev instance data.
477 * @param pHGCMConnect The guest request (cached in host memory).
478 * @param GCPhys The physical address of the request.
479 */
480int vmmdevHGCMConnect(PVMMDEV pThis, const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPhys)
481{
482 int rc = VINF_SUCCESS;
483
484 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_CONNECT, GCPhys, pHGCMConnect->header.header.size, 0,
485 pHGCMConnect->header.header.fRequestor);
486 if (pCmd)
487 {
488 vmmdevHGCMConnectFetch(pHGCMConnect, pCmd);
489
490 /* Only allow the guest to use existing services! */
491 ASSERT_GUEST(pHGCMConnect->loc.type == VMMDevHGCMLoc_LocalHost_Existing);
492 pCmd->u.connect.pLoc->type = VMMDevHGCMLoc_LocalHost_Existing;
493
494 vmmdevHGCMAddCommand(pThis, pCmd);
495 rc = pThis->pHGCMDrv->pfnConnect(pThis->pHGCMDrv, pCmd, pCmd->u.connect.pLoc, &pCmd->u.connect.u32ClientID);
496 if (RT_FAILURE(rc))
497 vmmdevHGCMRemoveCommand(pThis, pCmd);
498 }
499 else
500 {
501 rc = VERR_NO_MEMORY;
502 }
503
504 return rc;
505}
506
507/** Copy VMMDevHGCMDisconnect request data from the guest to VBOXHGCMCMD command.
508 *
509 * @param pHGCMDisconnect The source guest request (cached in host memory).
510 * @param pCmd Destination command.
511 */
512static void vmmdevHGCMDisconnectFetch(const VMMDevHGCMDisconnect *pHGCMDisconnect, PVBOXHGCMCMD pCmd)
513{
514 pCmd->enmRequestType = pHGCMDisconnect->header.header.requestType;
515 pCmd->u.disconnect.u32ClientID = pHGCMDisconnect->u32ClientID;
516}
517
518/** Handle VMMDevHGCMDisconnect request.
519 *
520 * @param pThis The VMMDev instance data.
521 * @param pHGCMDisconnect The guest request (cached in host memory).
522 * @param GCPhys The physical address of the request.
523 */
524int vmmdevHGCMDisconnect(PVMMDEV pThis, const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPhys)
525{
526 int rc = VINF_SUCCESS;
527
528 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_DISCONNECT, GCPhys, pHGCMDisconnect->header.header.size, 0,
529 pHGCMDisconnect->header.header.fRequestor);
530 if (pCmd)
531 {
532 vmmdevHGCMDisconnectFetch(pHGCMDisconnect, pCmd);
533
534 vmmdevHGCMAddCommand(pThis, pCmd);
535 rc = pThis->pHGCMDrv->pfnDisconnect (pThis->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
536 if (RT_FAILURE(rc))
537 vmmdevHGCMRemoveCommand(pThis, pCmd);
538 }
539 else
540 rc = VERR_NO_MEMORY;
541
542 return rc;
543}
544
545/** Translate LinAddr parameter type to the direction of data transfer.
546 *
547 * @returns VBOX_HGCM_F_PARM_DIRECTION_* flags.
548 * @param enmType Type of the LinAddr parameter.
549 */
550static uint32_t vmmdevHGCMParmTypeToDirection(HGCMFunctionParameterType enmType)
551{
552 if (enmType == VMMDevHGCMParmType_LinAddr_In) return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
553 if (enmType == VMMDevHGCMParmType_LinAddr_Out) return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
554 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
555}
556
557/** Check if list of pages in a HGCM pointer parameter corresponds to a contiguous buffer.
558 *
559 * @returns true if pages are contiguous, false otherwise.
560 * @param pPtr Information about a pointer HGCM parameter.
561 */
562DECLINLINE(bool) vmmdevHGCMGuestBufferIsContiguous(const VBOXHGCMPARMPTR *pPtr)
563{
564 if (pPtr->cPages == 1)
565 return true;
566 RTGCPHYS64 Phys = pPtr->paPages[0] + PAGE_SIZE;
567 if (Phys != pPtr->paPages[1])
568 return false;
569 if (pPtr->cPages > 2)
570 {
571 uint32_t iPage = 2;
572 do
573 {
574 Phys += PAGE_SIZE;
575 if (Phys != pPtr->paPages[iPage])
576 return false;
577 ++iPage;
578 } while (iPage < pPtr->cPages);
579 }
580 return true;
581}
582
583/** Copy data from guest memory to the host buffer.
584 *
585 * @returns VBox status code.
586 * @param pDevIns The device instance for PDMDevHlp.
587 * @param pvDst The destination host buffer.
588 * @param cbDst Size of the destination host buffer.
589 * @param pPtr Description of the source HGCM pointer parameter.
590 */
591static int vmmdevHGCMGuestBufferRead(PPDMDEVINSR3 pDevIns, void *pvDst, uint32_t cbDst,
592 const VBOXHGCMPARMPTR *pPtr)
593{
594 /*
595 * Try detect contiguous buffers.
596 */
597 /** @todo We need a flag for indicating this. */
598 if (vmmdevHGCMGuestBufferIsContiguous(pPtr))
599 return PDMDevHlpPhysRead(pDevIns, pPtr->paPages[0] | pPtr->offFirstPage, pvDst, cbDst);
600
601 /*
602 * Page by page fallback.
603 */
604 uint8_t *pu8Dst = (uint8_t *)pvDst;
605 uint32_t offPage = pPtr->offFirstPage;
606 uint32_t cbRemaining = cbDst;
607
608 for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
609 {
610 uint32_t cbToRead = PAGE_SIZE - offPage;
611 if (cbToRead > cbRemaining)
612 cbToRead = cbRemaining;
613
614 /* Skip invalid pages. */
615 const RTGCPHYS GCPhys = pPtr->paPages[iPage];
616 if (GCPhys != NIL_RTGCPHYS)
617 {
618 int rc = PDMDevHlpPhysRead(pDevIns, GCPhys + offPage, pu8Dst, cbToRead);
619 AssertMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp offPage=%#x cbToRead=%#x\n", rc, GCPhys, offPage, cbToRead), rc);
620 }
621
622 offPage = 0; /* A next page is read from 0 offset. */
623 cbRemaining -= cbToRead;
624 pu8Dst += cbToRead;
625 }
626
627 return VINF_SUCCESS;
628}
629
630/** Copy data from the host buffer to guest memory.
631 *
632 * @returns VBox status code.
633 * @param pDevIns The device instance for PDMDevHlp.
634 * @param pPtr Description of the destination HGCM pointer parameter.
635 * @param pvSrc The source host buffer.
636 * @param cbSrc Size of the source host buffer.
637 */
638static int vmmdevHGCMGuestBufferWrite(PPDMDEVINSR3 pDevIns, const VBOXHGCMPARMPTR *pPtr,
639 const void *pvSrc, uint32_t cbSrc)
640{
641 int rc = VINF_SUCCESS;
642
643 uint8_t *pu8Src = (uint8_t *)pvSrc;
644 uint32_t offPage = pPtr->offFirstPage;
645 uint32_t cbRemaining = RT_MIN(cbSrc, pPtr->cbData);
646
647 uint32_t iPage;
648 for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
649 {
650 uint32_t cbToWrite = PAGE_SIZE - offPage;
651 if (cbToWrite > cbRemaining)
652 cbToWrite = cbRemaining;
653
654 /* Skip invalid pages. */
655 const RTGCPHYS GCPhys = pPtr->paPages[iPage];
656 if (GCPhys != NIL_RTGCPHYS)
657 {
658 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys + offPage, pu8Src, cbToWrite);
659 AssertRCBreak(rc);
660 }
661
662 offPage = 0; /* A next page is written at 0 offset. */
663 cbRemaining -= cbToWrite;
664 pu8Src += cbToWrite;
665 }
666
667 return rc;
668}
669
670/** Initializes pCmd->paHostParms from already initialized pCmd->paGuestParms.
671 * Allocates memory for pointer parameters and copies data from the guest.
672 *
673 * @returns VBox status code that the guest should see.
674 * @param pThis The VMMDev instance data.
675 * @param pCmd Command structure where host parameters needs initialization.
676 * @param pbReq The request buffer.
677 */
678static int vmmdevHGCMInitHostParameters(PVMMDEV pThis, PVBOXHGCMCMD pCmd, uint8_t const *pbReq)
679{
680 AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
681
682 for (uint32_t i = 0; i < pCmd->u.call.cParms; ++i)
683 {
684 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
685 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
686
687 switch (pGuestParm->enmType)
688 {
689 case VMMDevHGCMParmType_32bit:
690 {
691 pHostParm->type = VBOX_HGCM_SVC_PARM_32BIT;
692 pHostParm->u.uint32 = (uint32_t)pGuestParm->u.val.u64Value;
693
694 break;
695 }
696
697 case VMMDevHGCMParmType_64bit:
698 {
699 pHostParm->type = VBOX_HGCM_SVC_PARM_64BIT;
700 pHostParm->u.uint64 = pGuestParm->u.val.u64Value;
701
702 break;
703 }
704
705 case VMMDevHGCMParmType_PageList:
706 RT_FALL_THRU();
707 case VMMDevHGCMParmType_LinAddr_In:
708 case VMMDevHGCMParmType_LinAddr_Out:
709 case VMMDevHGCMParmType_LinAddr:
710 case VMMDevHGCMParmType_Embedded:
711 case VMMDevHGCMParmType_ContiguousPageList:
712 {
713 const uint32_t cbData = pGuestParm->u.ptr.cbData;
714
715 pHostParm->type = VBOX_HGCM_SVC_PARM_PTR;
716 pHostParm->u.pointer.size = cbData;
717
718 if (cbData)
719 {
720 /* Zero memory, the buffer content is potentially copied to the guest. */
721 void *pv = RTMemAllocZ(cbData);
722 AssertReturn(pv, VERR_NO_MEMORY);
723 pHostParm->u.pointer.addr = pv;
724
725 if (pGuestParm->u.ptr.fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
726 {
727 if (pGuestParm->enmType != VMMDevHGCMParmType_Embedded)
728 {
729 if (pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList)
730 {
731 int rc = vmmdevHGCMGuestBufferRead(pThis->pDevInsR3, pv, cbData, &pGuestParm->u.ptr);
732 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
733 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
734 }
735 else
736 {
737 int rc = PDMDevHlpPhysRead(pThis->pDevInsR3,
738 pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
739 pv, cbData);
740 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
741 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
742 }
743 }
744 else
745 {
746 memcpy(pv, &pbReq[pGuestParm->u.ptr.offFirstPage], cbData);
747 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
748 }
749 }
750 }
751 else
752 {
753 pHostParm->u.pointer.addr = NULL;
754 }
755
756 break;
757 }
758
759 case VMMDevHGCMParmType_NoBouncePageList:
760 {
761 pHostParm->type = VBOX_HGCM_SVC_PARM_PAGES;
762 pHostParm->u.Pages.cb = pGuestParm->u.Pages.cbData;
763 pHostParm->u.Pages.cPages = pGuestParm->u.Pages.cPages;
764 pHostParm->u.Pages.papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[pGuestParm->u.Pages.cPages];
765
766 break;
767 }
768
769 default:
770 ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
771 }
772 }
773
774 return VINF_SUCCESS;
775}
776
777
778/** Allocate and initialize VBOXHGCMCMD structure for a HGCMCall request.
779 *
780 * @returns VBox status code that the guest should see.
781 * @param pThis The VMMDev instance data.
782 * @param pHGCMCall The HGCMCall request (cached in host memory).
783 * @param cbHGCMCall Size of the request.
784 * @param GCPhys Guest physical address of the request.
785 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
786 * @param ppCmd Where to store pointer to allocated command.
787 * @param pcbHGCMParmStruct Where to store size of used HGCM parameter structure.
788 */
789static int vmmdevHGCMCallAlloc(PVMMDEV pThis, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
790 VMMDevRequestType enmRequestType, PVBOXHGCMCMD *ppCmd, uint32_t *pcbHGCMParmStruct)
791{
792#ifdef VBOX_WITH_64_BITS_GUESTS
793 const uint32_t cbHGCMParmStruct = enmRequestType == VMMDevReq_HGCMCall64 ? sizeof(HGCMFunctionParameter64)
794 : sizeof(HGCMFunctionParameter32);
795#else
796 const uint32_t cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
797#endif
798
799 const uint32_t cParms = pHGCMCall->cParms;
800
801 /* Whether there is enough space for parameters and sane upper limit. */
802 ASSERT_GUEST_STMT_RETURN( cParms <= (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct
803 && cParms <= VMMDEV_MAX_HGCM_PARMS,
804 LogRelMax(50, ("VMMDev: request packet with invalid number of HGCM parameters: %d vs %d. Refusing operation.\n",
805 (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct, cParms)),
806 VERR_INVALID_PARAMETER);
807 RT_UNTRUSTED_VALIDATED_FENCE();
808
809 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_CALL, GCPhys, cbHGCMCall, cParms,
810 pHGCMCall->header.header.fRequestor);
811 if (pCmd == NULL)
812 return VERR_NO_MEMORY;
813
814 /* Request type has been validated in vmmdevReqDispatcher. */
815 pCmd->enmRequestType = enmRequestType;
816 pCmd->u.call.u32ClientID = pHGCMCall->u32ClientID;
817 pCmd->u.call.u32Function = pHGCMCall->u32Function;
818
819 *ppCmd = pCmd;
820 *pcbHGCMParmStruct = cbHGCMParmStruct;
821 return VINF_SUCCESS;
822}
823
824/** Copy VMMDevHGCMCall request data from the guest to VBOXHGCMCMD command.
825 *
826 * @returns VBox status code that the guest should see.
827 * @param pThis The VMMDev instance data.
828 * @param pCmd The destination command.
829 * @param pHGCMCall The HGCMCall request (cached in host memory).
830 * @param cbHGCMCall Size of the request.
831 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
832 * @param cbHGCMParmStruct Size of used HGCM parameter structure.
833 */
834static int vmmdevHGCMCallFetchGuestParms(PVMMDEV pThis, PVBOXHGCMCMD pCmd,
835 const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
836 VMMDevRequestType enmRequestType, uint32_t cbHGCMParmStruct)
837{
838 /*
839 * Go over all guest parameters and initialize relevant VBOXHGCMCMD fields.
840 * VBOXHGCMCMD must contain all information about the request,
841 * the request will be not read from the guest memory again.
842 */
843#ifdef VBOX_WITH_64_BITS_GUESTS
844 const bool f64Bits = (enmRequestType == VMMDevReq_HGCMCall64);
845#endif
846
847 const uint32_t cParms = pCmd->u.call.cParms;
848
849 /* Offsets in the request buffer to HGCM parameters and additional data. */
850 const uint32_t offHGCMParms = sizeof(VMMDevHGCMCall);
851 const uint32_t offExtra = offHGCMParms + cParms * cbHGCMParmStruct;
852
853 /* Pointer to the next HGCM parameter of the request. */
854 const uint8_t *pu8HGCMParm = (uint8_t *)pHGCMCall + offHGCMParms;
855
856 uint32_t cbTotalData = 0;
857 for (uint32_t i = 0; i < cParms; ++i, pu8HGCMParm += cbHGCMParmStruct)
858 {
859 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
860
861#ifdef VBOX_WITH_64_BITS_GUESTS
862 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, type, HGCMFunctionParameter32, type);
863 pGuestParm->enmType = ((HGCMFunctionParameter64 *)pu8HGCMParm)->type;
864#else
865 pGuestParm->enmType = ((HGCMFunctionParameter *)pu8HGCMParm)->type;
866#endif
867
868 switch (pGuestParm->enmType)
869 {
870 case VMMDevHGCMParmType_32bit:
871 {
872#ifdef VBOX_WITH_64_BITS_GUESTS
873 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value32, HGCMFunctionParameter32, u.value32);
874 uint32_t *pu32 = &((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value32;
875#else
876 uint32_t *pu32 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value32;
877#endif
878 LogFunc(("uint32 guest parameter %RI32\n", *pu32));
879
880 pGuestParm->u.val.u64Value = *pu32;
881 pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu32 - (uintptr_t)pHGCMCall);
882 pGuestParm->u.val.cbValue = sizeof(uint32_t);
883
884 break;
885 }
886
887 case VMMDevHGCMParmType_64bit:
888 {
889#ifdef VBOX_WITH_64_BITS_GUESTS
890 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value64, HGCMFunctionParameter32, u.value64);
891 uint64_t *pu64 = (uint64_t *)(uintptr_t)&((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value64; /* MSC detect misalignment, thus casts. */
892#else
893 uint64_t *pu64 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value64;
894#endif
895 LogFunc(("uint64 guest parameter %RI64\n", *pu64));
896
897 pGuestParm->u.val.u64Value = *pu64;
898 pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu64 - (uintptr_t)pHGCMCall);
899 pGuestParm->u.val.cbValue = sizeof(uint64_t);
900
901 break;
902 }
903
904 case VMMDevHGCMParmType_LinAddr_In: /* In (read) */
905 case VMMDevHGCMParmType_LinAddr_Out: /* Out (write) */
906 case VMMDevHGCMParmType_LinAddr: /* In & Out */
907 {
908#ifdef VBOX_WITH_64_BITS_GUESTS
909 uint32_t cbData = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.size
910 : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.size;
911 RTGCPTR GCPtr = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.u.linearAddr
912 : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.u.linearAddr;
913#else
914 uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.size;
915 RTGCPTR GCPtr = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.u.linearAddr;
916#endif
917 LogFunc(("LinAddr guest parameter %RGv, cb %u\n", GCPtr, cbData));
918
919 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE - cbTotalData, VERR_INVALID_PARAMETER);
920 cbTotalData += cbData;
921
922 const uint32_t offFirstPage = cbData > 0 ? GCPtr & PAGE_OFFSET_MASK : 0;
923 const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + PAGE_SIZE - 1) / PAGE_SIZE : 0;
924
925 pGuestParm->u.ptr.cbData = cbData;
926 pGuestParm->u.ptr.offFirstPage = offFirstPage;
927 pGuestParm->u.ptr.cPages = cPages;
928 pGuestParm->u.ptr.fu32Direction = vmmdevHGCMParmTypeToDirection(pGuestParm->enmType);
929
930 if (cbData > 0)
931 {
932 if (cPages == 1)
933 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
934 else
935 {
936 pGuestParm->u.ptr.paPages = (RTGCPHYS *)RTMemAlloc(cPages * sizeof(RTGCPHYS));
937 AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
938 }
939
940 /* Gonvert the guest linear pointers of pages to physical addresses. */
941 GCPtr &= PAGE_BASE_GC_MASK;
942 for (uint32_t iPage = 0; iPage < cPages; ++iPage)
943 {
944 /* The guest might specify invalid GCPtr, just skip such addresses.
945 * Also if the guest parameters are fetched when restoring an old saved state,
946 * then GCPtr may become invalid and do not have a corresponding GCPhys.
947 * The command restoration routine will take care of this.
948 */
949 RTGCPHYS GCPhys;
950 int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pThis->pDevInsR3, GCPtr, &GCPhys);
951 if (RT_FAILURE(rc2))
952 GCPhys = NIL_RTGCPHYS;
953 LogFunc(("Page %d: %RGv -> %RGp. %Rrc\n", iPage, GCPtr, GCPhys, rc2));
954
955 pGuestParm->u.ptr.paPages[iPage] = GCPhys;
956 GCPtr += PAGE_SIZE;
957 }
958 }
959
960 break;
961 }
962
963 case VMMDevHGCMParmType_PageList:
964 case VMMDevHGCMParmType_ContiguousPageList:
965 case VMMDevHGCMParmType_NoBouncePageList:
966 {
967#ifdef VBOX_WITH_64_BITS_GUESTS
968 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
969 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.offset, HGCMFunctionParameter32, u.PageList.offset);
970 uint32_t cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.size;
971 uint32_t offPageListInfo = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.offset;
972#else
973 uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.size;
974 uint32_t offPageListInfo = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.offset;
975#endif
976 LogFunc(("PageList guest parameter cb %u, offset %u\n", cbData, offPageListInfo));
977
978 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE - cbTotalData, VERR_INVALID_PARAMETER);
979 cbTotalData += cbData;
980
981/** @todo respect zero byte page lists... */
982 /* Check that the page list info is within the request. */
983 ASSERT_GUEST_RETURN( offPageListInfo >= offExtra
984 && cbHGCMCall >= sizeof(HGCMPageListInfo)
985 && offPageListInfo <= cbHGCMCall - sizeof(HGCMPageListInfo),
986 VERR_INVALID_PARAMETER);
987 RT_UNTRUSTED_VALIDATED_FENCE();
988
989 /* The HGCMPageListInfo structure is within the request. */
990 const HGCMPageListInfo *pPageListInfo = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offPageListInfo);
991
992 /* Enough space for page pointers? */
993 const uint32_t cMaxPages = 1 + (cbHGCMCall - offPageListInfo - sizeof(HGCMPageListInfo)) / sizeof(RTGCPHYS);
994 ASSERT_GUEST_RETURN( pPageListInfo->cPages > 0
995 && pPageListInfo->cPages <= cMaxPages,
996 VERR_INVALID_PARAMETER);
997
998 /* Flags. */
999 ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(pPageListInfo->flags),
1000 ("%#x\n", pPageListInfo->flags), VERR_INVALID_FLAGS);
1001 /* First page offset. */
1002 ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < PAGE_SIZE,
1003 ("%#x\n", pPageListInfo->offFirstPage), VERR_INVALID_PARAMETER);
1004
1005 /* Contiguous page lists only ever have a single page and
1006 no-bounce page list requires cPages to match the size exactly.
1007 Plain page list does not impose any restrictions on cPages currently. */
1008 ASSERT_GUEST_MSG_RETURN( pPageListInfo->cPages
1009 == (pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList ? 1
1010 : RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, PAGE_SIZE) >> PAGE_SHIFT)
1011 || pGuestParm->enmType == VMMDevHGCMParmType_PageList,
1012 ("offFirstPage=%#x cbData=%#x cPages=%#x enmType=%d\n",
1013 pPageListInfo->offFirstPage, cbData, pPageListInfo->cPages, pGuestParm->enmType),
1014 VERR_INVALID_PARAMETER);
1015
1016 RT_UNTRUSTED_VALIDATED_FENCE();
1017
1018 /*
1019 * Deal with no-bounce buffers first, as
1020 * VMMDevHGCMParmType_PageList is the fallback.
1021 */
1022 if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
1023 {
1024 /* Validate page offsets */
1025 ASSERT_GUEST_MSG_RETURN( !(pPageListInfo->aPages[0] & PAGE_OFFSET_MASK)
1026 || (pPageListInfo->aPages[0] & PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,
1027 ("%#RX64 offFirstPage=%#x\n", pPageListInfo->aPages[0], pPageListInfo->offFirstPage),
1028 VERR_INVALID_POINTER);
1029 uint32_t const cPages = pPageListInfo->cPages;
1030 for (uint32_t iPage = 1; iPage < cPages; iPage++)
1031 ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & PAGE_OFFSET_MASK),
1032 ("[%#zx]=%#RX64\n", iPage, pPageListInfo->aPages[iPage]), VERR_INVALID_POINTER);
1033 RT_UNTRUSTED_VALIDATED_FENCE();
1034
1035 pGuestParm->u.Pages.cbData = cbData;
1036 pGuestParm->u.Pages.offFirstPage = pPageListInfo->offFirstPage;
1037 pGuestParm->u.Pages.fFlags = pPageListInfo->flags;
1038 pGuestParm->u.Pages.cPages = (uint16_t)cPages;
1039 pGuestParm->u.Pages.fLocked = false;
1040 pGuestParm->u.Pages.paPgLocks = (PPGMPAGEMAPLOCK)RTMemAllocZ( (sizeof(PGMPAGEMAPLOCK) + sizeof(void *))
1041 * cPages);
1042 AssertReturn(pGuestParm->u.Pages.paPgLocks, VERR_NO_MEMORY);
1043
1044 /* Make sure the page offsets are sensible. */
1045 int rc = VINF_SUCCESS;
1046 void **papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[cPages];
1047 if (pPageListInfo->flags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST)
1048 rc = PDMDevHlpPhysBulkGCPhys2CCPtr(pThis->pDevInsR3, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
1049 papvPages, pGuestParm->u.Pages.paPgLocks);
1050 else
1051 rc = PDMDevHlpPhysBulkGCPhys2CCPtrReadOnly(pThis->pDevInsR3, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
1052 (void const **)papvPages, pGuestParm->u.Pages.paPgLocks);
1053 if (RT_SUCCESS(rc))
1054 {
1055 papvPages[0] = (void *)((uintptr_t)papvPages[0] | pPageListInfo->offFirstPage);
1056 pGuestParm->u.Pages.fLocked = true;
1057 break;
1058 }
1059
1060 /* Locking failed, bail out. In case of MMIO we fall back on regular page list handling. */
1061 RTMemFree(pGuestParm->u.Pages.paPgLocks);
1062 pGuestParm->u.Pages.paPgLocks = NULL;
1063 STAM_REL_COUNTER_INC(&pThis->StatHgcmFailedPageListLocking);
1064 ASSERT_GUEST_MSG_RETURN(rc == VERR_PGM_PHYS_PAGE_RESERVED, ("cPages=%u %Rrc\n", cPages, rc), rc);
1065 pGuestParm->enmType = VMMDevHGCMParmType_PageList;
1066 }
1067
1068 /*
1069 * Regular page list or contiguous page list.
1070 */
1071 pGuestParm->u.ptr.cbData = cbData;
1072 pGuestParm->u.ptr.offFirstPage = pPageListInfo->offFirstPage;
1073 pGuestParm->u.ptr.cPages = pPageListInfo->cPages;
1074 pGuestParm->u.ptr.fu32Direction = pPageListInfo->flags;
1075 if (pPageListInfo->cPages == 1)
1076 {
1077 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1078 pGuestParm->u.ptr.GCPhysSinglePage = pPageListInfo->aPages[0];
1079 }
1080 else
1081 {
1082 pGuestParm->u.ptr.paPages = (RTGCPHYS *)RTMemAlloc(pPageListInfo->cPages * sizeof(RTGCPHYS));
1083 AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
1084
1085 for (uint32_t iPage = 0; iPage < pGuestParm->u.ptr.cPages; ++iPage)
1086 pGuestParm->u.ptr.paPages[iPage] = pPageListInfo->aPages[iPage];
1087 }
1088 break;
1089 }
1090
1091 case VMMDevHGCMParmType_Embedded:
1092 {
1093#ifdef VBOX_WITH_64_BITS_GUESTS
1094 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
1095 uint32_t const cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.cbData;
1096 uint32_t const offData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.offData;
1097 uint32_t const fFlags = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.fFlags;
1098#else
1099 uint32_t const cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.cbData;
1100 uint32_t const offData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.offData;
1101 uint32_t const fFlags = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.fFlags;
1102#endif
1103 LogFunc(("Embedded guest parameter cb %u, offset %u, flags %#x\n", cbData, offData, fFlags));
1104
1105 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE - cbTotalData, VERR_INVALID_PARAMETER);
1106 cbTotalData += cbData;
1107
1108 /* Check flags and buffer range. */
1109 ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(fFlags), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
1110 ASSERT_GUEST_MSG_RETURN( offData >= offExtra
1111 && offData <= cbHGCMCall
1112 && cbData <= cbHGCMCall - offData,
1113 ("offData=%#x cbData=%#x cbHGCMCall=%#x offExtra=%#x\n", offData, cbData, cbHGCMCall, offExtra),
1114 VERR_INVALID_PARAMETER);
1115 RT_UNTRUSTED_VALIDATED_FENCE();
1116
1117 /* We use part of the ptr member. */
1118 pGuestParm->u.ptr.fu32Direction = fFlags;
1119 pGuestParm->u.ptr.cbData = cbData;
1120 pGuestParm->u.ptr.offFirstPage = offData;
1121 pGuestParm->u.ptr.GCPhysSinglePage = pCmd->GCPhys + offData;
1122 pGuestParm->u.ptr.cPages = 1;
1123 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1124 break;
1125 }
1126
1127 default:
1128 ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
1129 }
1130 }
1131
1132 return VINF_SUCCESS;
1133}
1134
1135/**
1136 * Handles VMMDevHGCMCall request.
1137 *
1138 * @returns VBox status code that the guest should see.
1139 * @param pThis The VMMDev instance data.
1140 * @param pHGCMCall The request to handle (cached in host memory).
1141 * @param cbHGCMCall Size of the entire request (including HGCM parameters).
1142 * @param GCPhys The guest physical address of the request.
1143 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
1144 * @param tsArrival The STAM_GET_TS() value when the request arrived.
1145 * @param ppLock Pointer to the lock info pointer (latter can be
1146 * NULL). Set to NULL if HGCM takes lock ownership.
1147 */
1148int vmmdevHGCMCall(PVMMDEV pThis, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
1149 VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
1150{
1151 LogFunc(("client id = %d, function = %d, cParms = %d, enmRequestType = %d, fRequestor = %#x\n", pHGCMCall->u32ClientID,
1152 pHGCMCall->u32Function, pHGCMCall->cParms, enmRequestType, pHGCMCall->header.header.fRequestor));
1153
1154 /*
1155 * Validation.
1156 */
1157 ASSERT_GUEST_RETURN(cbHGCMCall >= sizeof(VMMDevHGCMCall), VERR_INVALID_PARAMETER);
1158#ifdef VBOX_WITH_64_BITS_GUESTS
1159 ASSERT_GUEST_RETURN( enmRequestType == VMMDevReq_HGCMCall32
1160 || enmRequestType == VMMDevReq_HGCMCall64, VERR_INVALID_PARAMETER);
1161#else
1162 ASSERT_GUEST_RETURN(enmRequestType == VMMDevReq_HGCMCall32, VERR_INVALID_PARAMETER);
1163#endif
1164 RT_UNTRUSTED_VALIDATED_FENCE();
1165
1166 /*
1167 * Create a command structure.
1168 */
1169 PVBOXHGCMCMD pCmd;
1170 uint32_t cbHGCMParmStruct;
1171 int rc = vmmdevHGCMCallAlloc(pThis, pHGCMCall, cbHGCMCall, GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
1172 if (RT_SUCCESS(rc))
1173 {
1174 pCmd->tsArrival = tsArrival;
1175 PVMMDEVREQLOCK pLock = *ppLock;
1176 if (pLock)
1177 {
1178 pCmd->ReqMapLock = pLock->Lock;
1179 pCmd->pvReqLocked = pLock->pvReq;
1180 *ppLock = NULL;
1181 }
1182
1183 rc = vmmdevHGCMCallFetchGuestParms(pThis, pCmd, pHGCMCall, cbHGCMCall, enmRequestType, cbHGCMParmStruct);
1184 if (RT_SUCCESS(rc))
1185 {
1186 /* Copy guest data to host parameters, so HGCM services can use the data. */
1187 rc = vmmdevHGCMInitHostParameters(pThis, pCmd, (uint8_t const *)pHGCMCall);
1188 if (RT_SUCCESS(rc))
1189 {
1190 /*
1191 * Pass the function call to HGCM connector for actual processing
1192 */
1193 vmmdevHGCMAddCommand(pThis, pCmd);
1194
1195#if 0 /* DONT ENABLE - for performance hacking. */
1196 if ( pCmd->u.call.u32Function == 9
1197 && pCmd->u.call.cParms == 5)
1198 {
1199 vmmdevHGCMRemoveCommand(pThis, pCmd);
1200
1201 if (pCmd->pvReqLocked)
1202 {
1203 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
1204 pHeader->header.rc = VINF_SUCCESS;
1205 pHeader->result = VINF_SUCCESS;
1206 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1207 }
1208 else
1209 {
1210 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)pHGCMCall;
1211 pHeader->header.rc = VINF_SUCCESS;
1212 pHeader->result = VINF_SUCCESS;
1213 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1214 PDMDevHlpPhysWrite(pThis->pDevInsR3, GCPhys, pHeader, sizeof(*pHeader));
1215 }
1216 vmmdevHGCMCmdFree(pThis, pCmd);
1217 return VINF_HGCM_ASYNC_EXECUTE; /* ignored, but avoids assertions. */
1218 }
1219#endif
1220
1221 rc = pThis->pHGCMDrv->pfnCall(pThis->pHGCMDrv, pCmd,
1222 pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
1223 pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsArrival);
1224
1225 if (rc == VINF_HGCM_ASYNC_EXECUTE)
1226 {
1227 /*
1228 * Done. Just update statistics and return.
1229 */
1230#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1231 uint64_t tsNow;
1232 STAM_GET_TS(tsNow);
1233 STAM_REL_PROFILE_ADD_PERIOD(&pThis->StatHgcmCmdArrival, tsNow - tsArrival);
1234#endif
1235 return rc;
1236 }
1237
1238 /*
1239 * Failed, bail out.
1240 */
1241 LogFunc(("pfnCall rc = %Rrc\n", rc));
1242 vmmdevHGCMRemoveCommand(pThis, pCmd);
1243 }
1244 }
1245 vmmdevHGCMCmdFree(pThis, pCmd);
1246 }
1247 return rc;
1248}
1249
1250/**
1251 * VMMDevReq_HGCMCancel worker.
1252 *
1253 * @returns VBox status code that the guest should see.
1254 * @param pThis The VMMDev instance data.
1255 * @param pHGCMCancel The request to handle (cached in host memory).
1256 * @param GCPhys The address of the request.
1257 *
1258 * @thread EMT
1259 */
1260int vmmdevHGCMCancel(PVMMDEV pThis, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPhys)
1261{
1262 NOREF(pHGCMCancel);
1263 int rc = vmmdevHGCMCancel2(pThis, GCPhys);
1264 return rc == VERR_NOT_FOUND ? VERR_INVALID_PARAMETER : rc;
1265}
1266
1267/**
1268 * VMMDevReq_HGCMCancel2 worker.
1269 *
1270 * @retval VINF_SUCCESS on success.
1271 * @retval VERR_NOT_FOUND if the request was not found.
1272 * @retval VERR_INVALID_PARAMETER if the request address is invalid.
1273 *
1274 * @param pThis The VMMDev instance data.
1275 * @param GCPhys The address of the request that should be cancelled.
1276 *
1277 * @thread EMT
1278 */
1279int vmmdevHGCMCancel2(PVMMDEV pThis, RTGCPHYS GCPhys)
1280{
1281 if ( GCPhys == 0
1282 || GCPhys == NIL_RTGCPHYS
1283 || GCPhys == NIL_RTGCPHYS32)
1284 {
1285 Log(("vmmdevHGCMCancel2: GCPhys=%#x\n", GCPhys));
1286 return VERR_INVALID_PARAMETER;
1287 }
1288
1289 /*
1290 * Locate the command and cancel it while under the protection of
1291 * the lock. hgcmCompletedWorker makes assumptions about this.
1292 */
1293 int rc = vmmdevHGCMCmdListLock(pThis);
1294 AssertRCReturn(rc, rc);
1295
1296 PVBOXHGCMCMD pCmd = vmmdevHGCMFindCommandLocked(pThis, GCPhys);
1297 if (pCmd)
1298 {
1299 pCmd->fCancelled = true;
1300
1301 Log(("vmmdevHGCMCancel2: Cancelled pCmd=%p / GCPhys=%#x\n", pCmd, GCPhys));
1302 if (pThis->pHGCMDrv)
1303 pThis->pHGCMDrv->pfnCancelled(pThis->pHGCMDrv, pCmd,
1304 pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.u32ClientID
1305 : pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? pCmd->u.connect.u32ClientID
1306 : pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT ? pCmd->u.disconnect.u32ClientID
1307 : 0);
1308 }
1309 else
1310 rc = VERR_NOT_FOUND;
1311
1312 vmmdevHGCMCmdListUnlock(pThis);
1313 return rc;
1314}
1315
1316/** Write HGCM call parameters and buffers back to the guest request and memory.
1317 *
1318 * @returns VBox status code that the guest should see.
1319 * @param pThis The VMMDev instance data.
1320 * @param pCmd Completed call command.
1321 * @param pHGCMCall The guestrequest which needs updating (cached in the host memory).
1322 * @param pbReq The request copy or locked memory for handling
1323 * embedded buffers.
1324 */
1325static int vmmdevHGCMCompleteCallRequest(PVMMDEV pThis, PVBOXHGCMCMD pCmd, VMMDevHGCMCall *pHGCMCall, uint8_t *pbReq)
1326{
1327 AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
1328
1329 /*
1330 * Go over parameter descriptions saved in pCmd.
1331 */
1332#ifdef VBOX_WITH_64_BITS_GUESTS
1333 HGCMFunctionParameter64 *pReqParm = (HGCMFunctionParameter64 *)(pbReq + sizeof(VMMDevHGCMCall));
1334 size_t const cbHGCMParmStruct = pCmd->enmRequestType == VMMDevReq_HGCMCall64
1335 ? sizeof(HGCMFunctionParameter64) : sizeof(HGCMFunctionParameter32);
1336#else
1337 HGCMFunctionParameter *pReqParm = (HGCMFunctionParameter *)(pbReq + sizeof(VMMDevHGCMCall));
1338 size_t const cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
1339#endif
1340 for (uint32_t i = 0;
1341 i < pCmd->u.call.cParms;
1342#ifdef VBOX_WITH_64_BITS_GUESTS
1343 ++i, pReqParm = (HGCMFunctionParameter64 *)((uint8_t *)pReqParm + cbHGCMParmStruct)
1344#else
1345 ++i, pReqParm = (HGCMFunctionParameter *)((uint8_t *)pReqParm + cbHGCMParmStruct)
1346#endif
1347 )
1348 {
1349 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1350 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
1351
1352 const HGCMFunctionParameterType enmType = pGuestParm->enmType;
1353 switch (enmType)
1354 {
1355 case VMMDevHGCMParmType_32bit:
1356 case VMMDevHGCMParmType_64bit:
1357 {
1358 const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
1359 const void *pvSrc = enmType == VMMDevHGCMParmType_32bit ? (void *)&pHostParm->u.uint32
1360 : (void *)&pHostParm->u.uint64;
1361/** @todo optimize memcpy away here. */
1362 memcpy((uint8_t *)pHGCMCall + pVal->offValue, pvSrc, pVal->cbValue);
1363 break;
1364 }
1365
1366 case VMMDevHGCMParmType_LinAddr_In:
1367 case VMMDevHGCMParmType_LinAddr_Out:
1368 case VMMDevHGCMParmType_LinAddr:
1369 case VMMDevHGCMParmType_PageList:
1370 {
1371/** @todo Update the return buffer size? */
1372 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1373 if ( pPtr->cbData > 0
1374 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1375 {
1376 const void *pvSrc = pHostParm->u.pointer.addr;
1377 uint32_t cbSrc = pHostParm->u.pointer.size;
1378 int rc = vmmdevHGCMGuestBufferWrite(pThis->pDevInsR3, pPtr, pvSrc, cbSrc);
1379 if (RT_FAILURE(rc))
1380 break;
1381 }
1382 break;
1383 }
1384
1385 case VMMDevHGCMParmType_Embedded:
1386 {
1387 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1388
1389 /* Update size. */
1390#ifdef VBOX_WITH_64_BITS_GUESTS
1391 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
1392#endif
1393 pReqParm->u.Embedded.cbData = pHostParm->u.pointer.size;
1394
1395 /* Copy out data. */
1396 if ( pPtr->cbData > 0
1397 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1398 {
1399 const void *pvSrc = pHostParm->u.pointer.addr;
1400 uint32_t cbSrc = pHostParm->u.pointer.size;
1401 uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
1402 memcpy(pbReq + pPtr->offFirstPage, pvSrc, cbToCopy);
1403 }
1404 break;
1405 }
1406
1407 case VMMDevHGCMParmType_ContiguousPageList:
1408 {
1409 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1410
1411 /* Update size. */
1412#ifdef VBOX_WITH_64_BITS_GUESTS
1413 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1414#endif
1415 pReqParm->u.PageList.size = pHostParm->u.pointer.size;
1416
1417 /* Copy out data. */
1418 if ( pPtr->cbData > 0
1419 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1420 {
1421 const void *pvSrc = pHostParm->u.pointer.addr;
1422 uint32_t cbSrc = pHostParm->u.pointer.size;
1423 uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
1424 int rc = PDMDevHlpPhysWrite(pThis->pDevInsR3, pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
1425 pvSrc, cbToCopy);
1426 if (RT_FAILURE(rc))
1427 break;
1428 }
1429 break;
1430 }
1431
1432 case VMMDevHGCMParmType_NoBouncePageList:
1433 {
1434 /* Update size. */
1435#ifdef VBOX_WITH_64_BITS_GUESTS
1436 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1437#endif
1438 pReqParm->u.PageList.size = pHostParm->u.Pages.cb;
1439
1440 /* unlock early. */
1441 if (pGuestParm->u.Pages.fLocked)
1442 {
1443 PDMDevHlpPhysBulkReleasePageMappingLocks(pThis->pDevInsR3, pGuestParm->u.Pages.cPages,
1444 pGuestParm->u.Pages.paPgLocks);
1445 pGuestParm->u.Pages.fLocked = false;
1446 }
1447 break;
1448 }
1449
1450 default:
1451 break;
1452 }
1453 }
1454
1455 return VINF_SUCCESS;
1456}
1457
1458/** Update HGCM request in the guest memory and mark it as completed.
1459 *
1460 * @returns VINF_SUCCESS or VERR_CANCELLED.
1461 * @param pInterface Pointer to this PDM interface.
1462 * @param result HGCM completion status code (VBox status code).
1463 * @param pCmd Completed command, which contains updated host parameters.
1464 *
1465 * @thread EMT
1466 */
1467static int hgcmCompletedWorker(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
1468{
1469 PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
1470#ifdef VBOX_WITH_DTRACE
1471 uint32_t idFunction = 0;
1472 uint32_t idClient = 0;
1473#endif
1474
1475 if (result == VINF_HGCM_SAVE_STATE)
1476 {
1477 /* If the completion routine was called while the HGCM service saves its state,
1478 * then currently nothing to be done here. The pCmd stays in the list and will
1479 * be saved later when the VMMDev state will be saved and re-submitted on load.
1480 *
1481 * It it assumed that VMMDev saves state after the HGCM services (VMMDev driver
1482 * attached by constructor before it registers its SSM state), and, therefore,
1483 * VBOXHGCMCMD structures are not removed by vmmdevHGCMSaveState from the list,
1484 * while HGCM uses them.
1485 */
1486 LogFlowFunc(("VINF_HGCM_SAVE_STATE for command %p\n", pCmd));
1487 return VINF_SUCCESS;
1488 }
1489
1490 VBOXDD_HGCMCALL_COMPLETED_EMT(pCmd, result);
1491
1492 int rc = VINF_SUCCESS;
1493
1494 /*
1495 * The cancellation protocol requires us to remove the command here
1496 * and then check the flag. Cancelled commands must not be written
1497 * back to guest memory.
1498 */
1499 vmmdevHGCMRemoveCommand(pThis, pCmd);
1500
1501 if (RT_LIKELY(!pCmd->fCancelled))
1502 {
1503 if (!pCmd->pvReqLocked)
1504 {
1505 /*
1506 * Request is not locked:
1507 */
1508 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
1509 if (pHeader)
1510 {
1511 /*
1512 * Read the request from the guest memory for updating.
1513 * The request data is not be used for anything but checking the request type.
1514 */
1515 PDMDevHlpPhysRead(pThis->pDevInsR3, pCmd->GCPhys, pHeader, pCmd->cbRequest);
1516 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1517
1518 /* Verify the request type. This is the only field which is used from the guest memory. */
1519 const VMMDevRequestType enmRequestType = pHeader->header.requestType;
1520 if ( enmRequestType == pCmd->enmRequestType
1521 || enmRequestType == VMMDevReq_HGCMCancel)
1522 {
1523 RT_UNTRUSTED_VALIDATED_FENCE();
1524
1525 /*
1526 * Update parameters and data buffers.
1527 */
1528 switch (enmRequestType)
1529 {
1530#ifdef VBOX_WITH_64_BITS_GUESTS
1531 case VMMDevReq_HGCMCall64:
1532#endif
1533 case VMMDevReq_HGCMCall32:
1534 {
1535 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
1536 rc = vmmdevHGCMCompleteCallRequest(pThis, pCmd, pHGCMCall, (uint8_t *)pHeader);
1537#ifdef VBOX_WITH_DTRACE
1538 idFunction = pCmd->u.call.u32Function;
1539 idClient = pCmd->u.call.u32ClientID;
1540#endif
1541 break;
1542 }
1543
1544 case VMMDevReq_HGCMConnect:
1545 {
1546 /* save the client id in the guest request packet */
1547 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
1548 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
1549 break;
1550 }
1551
1552 default:
1553 /* make compiler happy */
1554 break;
1555 }
1556 }
1557 else
1558 {
1559 /* Guest has changed the command type. */
1560 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
1561 pCmd->enmCmdType, pHeader->header.requestType));
1562
1563 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
1564 }
1565
1566 /* Setup return code for the guest. */
1567 if (RT_SUCCESS(rc))
1568 pHeader->result = result;
1569 else
1570 pHeader->result = rc;
1571
1572 /* First write back the request. */
1573 PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys, pHeader, pCmd->cbRequest);
1574
1575 /* Mark request as processed. */
1576 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1577
1578 /* Second write the flags to mark the request as processed. */
1579 PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags),
1580 &pHeader->fu32Flags, sizeof(pHeader->fu32Flags));
1581
1582 /* Now, when the command was removed from the internal list, notify the guest. */
1583 VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM);
1584
1585 RTMemFree(pHeader);
1586 }
1587 else
1588 {
1589 LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest));
1590 }
1591 }
1592 /*
1593 * Request was locked:
1594 */
1595 else
1596 {
1597 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
1598
1599 /* Verify the request type. This is the only field which is used from the guest memory. */
1600 const VMMDevRequestType enmRequestType = pHeader->header.requestType;
1601 if ( enmRequestType == pCmd->enmRequestType
1602 || enmRequestType == VMMDevReq_HGCMCancel)
1603 {
1604 RT_UNTRUSTED_VALIDATED_FENCE();
1605
1606 /*
1607 * Update parameters and data buffers.
1608 */
1609 switch (enmRequestType)
1610 {
1611#ifdef VBOX_WITH_64_BITS_GUESTS
1612 case VMMDevReq_HGCMCall64:
1613#endif
1614 case VMMDevReq_HGCMCall32:
1615 {
1616 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
1617 rc = vmmdevHGCMCompleteCallRequest(pThis, pCmd, pHGCMCall, (uint8_t *)pHeader);
1618#ifdef VBOX_WITH_DTRACE
1619 idFunction = pCmd->u.call.u32Function;
1620 idClient = pCmd->u.call.u32ClientID;
1621#endif
1622 break;
1623 }
1624
1625 case VMMDevReq_HGCMConnect:
1626 {
1627 /* save the client id in the guest request packet */
1628 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
1629 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
1630 break;
1631 }
1632
1633 default:
1634 /* make compiler happy */
1635 break;
1636 }
1637 }
1638 else
1639 {
1640 /* Guest has changed the command type. */
1641 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
1642 pCmd->enmCmdType, pHeader->header.requestType));
1643
1644 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
1645 }
1646
1647 /* Setup return code for the guest. */
1648 if (RT_SUCCESS(rc))
1649 pHeader->result = result;
1650 else
1651 pHeader->result = rc;
1652
1653 /* Mark request as processed. */
1654 ASMAtomicOrU32(&pHeader->fu32Flags, VBOX_HGCM_REQ_DONE);
1655
1656 /* Now, when the command was removed from the internal list, notify the guest. */
1657 VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM);
1658 }
1659
1660 /* Set the status to success for now, though we might consider passing
1661 along the vmmdevHGCMCompleteCallRequest errors... */
1662 rc = VINF_SUCCESS;
1663 }
1664 else
1665 {
1666 LogFlowFunc(("Cancelled command %p\n", pCmd));
1667 rc = VERR_CANCELLED;
1668 }
1669
1670#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1671 /* Save for final stats. */
1672 uint64_t const tsArrival = pCmd->tsArrival;
1673 uint64_t const tsComplete = pCmd->tsComplete;
1674#endif
1675
1676 /* Deallocate the command memory. */
1677 VBOXDD_HGCMCALL_COMPLETED_DONE(pCmd, idFunction, idClient, result);
1678 vmmdevHGCMCmdFree(pThis, pCmd);
1679
1680#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1681 /* Update stats. */
1682 uint64_t tsNow;
1683 STAM_GET_TS(tsNow);
1684 STAM_REL_PROFILE_ADD_PERIOD(&pThis->StatHgcmCmdCompletion, tsNow - tsComplete);
1685 if (tsArrival != 0)
1686 STAM_REL_PROFILE_ADD_PERIOD(&pThis->StatHgcmCmdTotal, tsNow - tsArrival);
1687#endif
1688
1689 return rc;
1690}
1691
1692/**
1693 * HGCM callback for request completion. Forwards to hgcmCompletedWorker.
1694 *
1695 * @returns VINF_SUCCESS or VERR_CANCELLED.
1696 * @param pInterface Pointer to this PDM interface.
1697 * @param result HGCM completion status code (VBox status code).
1698 * @param pCmd Completed command, which contains updated host parameters.
1699 */
1700DECLCALLBACK(int) hgcmCompleted(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
1701{
1702#if 0 /* This seems to be significantly slower. Half of MsgTotal time seems to be spend here. */
1703 PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
1704 STAM_GET_TS(pCmd->tsComplete);
1705
1706 VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
1707
1708/** @todo no longer necessary to forward to EMT, but it might be more
1709 * efficient...? */
1710 /* Not safe to execute asynchronously; forward to EMT */
1711 int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pThis->pDevInsR3), VMCPUID_ANY,
1712 (PFNRT)hgcmCompletedWorker, 3, pInterface, result, pCmd);
1713 AssertRC(rc);
1714 return VINF_SUCCESS; /* cannot tell if canceled or not... */
1715#else
1716 STAM_GET_TS(pCmd->tsComplete);
1717 VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
1718 return hgcmCompletedWorker(pInterface, result, pCmd);
1719#endif
1720}
1721
1722/**
1723 * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdRestored}
1724 */
1725DECLCALLBACK(bool) hgcmIsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1726{
1727 RT_NOREF(pInterface);
1728 return pCmd && pCmd->fRestored;
1729}
1730
1731/**
1732 * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdCancelled}
1733 */
1734DECLCALLBACK(bool) hgcmIsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1735{
1736 RT_NOREF(pInterface);
1737 return pCmd && pCmd->fCancelled;
1738}
1739
1740/**
1741 * @interface_method_impl{PDMIHGCMPORT,pfnGetRequestor}
1742 */
1743DECLCALLBACK(uint32_t) hgcmGetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1744{
1745 PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
1746 AssertPtrReturn(pCmd, VMMDEV_REQUESTOR_LOWEST);
1747 if (pThis->guestInfo2.fFeatures & VBOXGSTINFO2_F_REQUESTOR_INFO)
1748 return pCmd->fRequestor;
1749 return VMMDEV_REQUESTOR_LEGACY;
1750}
1751
1752/**
1753 * @interface_method_impl{PDMIHGCMPORT,pfnGetVMMDevSessionId}
1754 */
1755DECLCALLBACK(uint64_t) hgcmGetVMMDevSessionId(PPDMIHGCMPORT pInterface)
1756{
1757 PVMMDEV pThis = RT_FROM_MEMBER(pInterface, VMMDevState, IHGCMPort);
1758 return pThis->idSession;
1759}
1760
1761/** Save information about pending HGCM requests from pThis->listHGCMCmd.
1762 *
1763 * @returns VBox status code that the guest should see.
1764 * @param pThis The VMMDev instance data.
1765 * @param pSSM SSM handle for SSM functions.
1766 *
1767 * @thread EMT
1768 */
1769int vmmdevHGCMSaveState(PVMMDEV pThis, PSSMHANDLE pSSM)
1770{
1771 LogFlowFunc(("\n"));
1772
1773 /* Compute how many commands are pending. */
1774 uint32_t cCmds = 0;
1775 PVBOXHGCMCMD pCmd;
1776 RTListForEach(&pThis->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
1777 {
1778 LogFlowFunc(("pCmd %p\n", pCmd));
1779 ++cCmds;
1780 }
1781 LogFlowFunc(("cCmds = %d\n", cCmds));
1782
1783 /* Save number of commands. */
1784 int rc = SSMR3PutU32(pSSM, cCmds);
1785 AssertRCReturn(rc, rc);
1786
1787 if (cCmds > 0)
1788 {
1789 RTListForEach(&pThis->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
1790 {
1791 LogFlowFunc(("Saving %RGp, size %d\n", pCmd->GCPhys, pCmd->cbRequest));
1792
1793 /** @todo Don't save cancelled requests! It serves no purpose. See restore and
1794 * @bugref{4032#c4} for details. */
1795 SSMR3PutU32 (pSSM, (uint32_t)pCmd->enmCmdType);
1796 SSMR3PutBool (pSSM, pCmd->fCancelled);
1797 SSMR3PutGCPhys (pSSM, pCmd->GCPhys);
1798 SSMR3PutU32 (pSSM, pCmd->cbRequest);
1799 SSMR3PutU32 (pSSM, (uint32_t)pCmd->enmRequestType);
1800 const uint32_t cParms = pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.cParms : 0;
1801 rc = SSMR3PutU32(pSSM, cParms);
1802 AssertRCReturn(rc, rc);
1803
1804 if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
1805 {
1806 SSMR3PutU32 (pSSM, pCmd->u.call.u32ClientID);
1807 rc = SSMR3PutU32(pSSM, pCmd->u.call.u32Function);
1808 AssertRCReturn(rc, rc);
1809
1810 /* Guest parameters. */
1811 uint32_t i;
1812 for (i = 0; i < pCmd->u.call.cParms; ++i)
1813 {
1814 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1815
1816 rc = SSMR3PutU32(pSSM, (uint32_t)pGuestParm->enmType);
1817 AssertRCReturn(rc, rc);
1818
1819 if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
1820 || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
1821 {
1822 const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
1823 SSMR3PutU64 (pSSM, pVal->u64Value);
1824 SSMR3PutU32 (pSSM, pVal->offValue);
1825 rc = SSMR3PutU32(pSSM, pVal->cbValue);
1826 }
1827 else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
1828 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
1829 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
1830 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
1831 || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
1832 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
1833 {
1834 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1835 SSMR3PutU32 (pSSM, pPtr->cbData);
1836 SSMR3PutU32 (pSSM, pPtr->offFirstPage);
1837 SSMR3PutU32 (pSSM, pPtr->cPages);
1838 rc = SSMR3PutU32(pSSM, pPtr->fu32Direction);
1839
1840 uint32_t iPage;
1841 for (iPage = 0; RT_SUCCESS(rc) && iPage < pPtr->cPages; ++iPage)
1842 rc = SSMR3PutGCPhys(pSSM, pPtr->paPages[iPage]);
1843 }
1844 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
1845 {
1846 /* We don't have the page addresses here, so it will need to be
1847 restored from guest memory. This isn't an issue as it is only
1848 use with services which won't survive a save/restore anyway. */
1849 }
1850 else
1851 {
1852 AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
1853 }
1854 AssertRCReturn(rc, rc);
1855 }
1856 }
1857 else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
1858 {
1859 SSMR3PutU32(pSSM, pCmd->u.connect.u32ClientID);
1860 SSMR3PutMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
1861 }
1862 else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
1863 {
1864 SSMR3PutU32(pSSM, pCmd->u.disconnect.u32ClientID);
1865 }
1866 else
1867 {
1868 AssertFailedReturn(VERR_INTERNAL_ERROR);
1869 }
1870
1871 /* A reserved field, will allow to extend saved data for a command. */
1872 rc = SSMR3PutU32(pSSM, 0);
1873 AssertRCReturn(rc, rc);
1874 }
1875 }
1876
1877 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
1878 rc = SSMR3PutU32(pSSM, 0);
1879 AssertRCReturn(rc, rc);
1880
1881 return rc;
1882}
1883
1884/** Load information about pending HGCM requests.
1885 *
1886 * Allocate VBOXHGCMCMD commands and add them to pThis->listHGCMCmd temporarily.
1887 * vmmdevHGCMLoadStateDone will process the temporary list. This includes
1888 * loading the correct fRequestor fields.
1889 *
1890 * @returns VBox status code that the guest should see.
1891 * @param pThis The VMMDev instance data.
1892 * @param pSSM SSM handle for SSM functions.
1893 * @param uVersion Saved state version.
1894 *
1895 * @thread EMT
1896 */
1897int vmmdevHGCMLoadState(PVMMDEV pThis, PSSMHANDLE pSSM, uint32_t uVersion)
1898{
1899 LogFlowFunc(("\n"));
1900
1901 pThis->u32SSMVersion = uVersion; /* For vmmdevHGCMLoadStateDone */
1902
1903 /* Read how many commands were pending. */
1904 uint32_t cCmds = 0;
1905 int rc = SSMR3GetU32(pSSM, &cCmds);
1906 AssertRCReturn(rc, rc);
1907
1908 LogFlowFunc(("cCmds = %d\n", cCmds));
1909
1910 if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
1911 {
1912 /* Saved information about all HGCM parameters. */
1913 uint32_t u32;
1914
1915 uint32_t iCmd;
1916 for (iCmd = 0; iCmd < cCmds; ++iCmd)
1917 {
1918 /* Command fields. */
1919 VBOXHGCMCMDTYPE enmCmdType;
1920 bool fCancelled;
1921 RTGCPHYS GCPhys;
1922 uint32_t cbRequest;
1923 VMMDevRequestType enmRequestType;
1924 uint32_t cParms;
1925
1926 SSMR3GetU32 (pSSM, &u32);
1927 enmCmdType = (VBOXHGCMCMDTYPE)u32;
1928 SSMR3GetBool (pSSM, &fCancelled);
1929 SSMR3GetGCPhys (pSSM, &GCPhys);
1930 SSMR3GetU32 (pSSM, &cbRequest);
1931 SSMR3GetU32 (pSSM, &u32);
1932 enmRequestType = (VMMDevRequestType)u32;
1933 rc = SSMR3GetU32(pSSM, &cParms);
1934 AssertRCReturn(rc, rc);
1935
1936 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, enmCmdType, GCPhys, cbRequest, cParms, 0 /*fRequestor*/);
1937 AssertReturn(pCmd, VERR_NO_MEMORY);
1938
1939 pCmd->fCancelled = fCancelled;
1940 pCmd->GCPhys = GCPhys;
1941 pCmd->cbRequest = cbRequest;
1942 pCmd->enmRequestType = enmRequestType;
1943
1944 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
1945 {
1946 SSMR3GetU32 (pSSM, &pCmd->u.call.u32ClientID);
1947 rc = SSMR3GetU32(pSSM, &pCmd->u.call.u32Function);
1948 AssertRCReturn(rc, rc);
1949
1950 /* Guest parameters. */
1951 uint32_t i;
1952 for (i = 0; i < cParms; ++i)
1953 {
1954 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1955
1956 rc = SSMR3GetU32(pSSM, &u32);
1957 AssertRCReturn(rc, rc);
1958 pGuestParm->enmType = (HGCMFunctionParameterType)u32;
1959
1960 if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
1961 || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
1962 {
1963 VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
1964 SSMR3GetU64 (pSSM, &pVal->u64Value);
1965 SSMR3GetU32 (pSSM, &pVal->offValue);
1966 rc = SSMR3GetU32(pSSM, &pVal->cbValue);
1967 }
1968 else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
1969 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
1970 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
1971 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
1972 || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
1973 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
1974 {
1975 VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1976 SSMR3GetU32 (pSSM, &pPtr->cbData);
1977 SSMR3GetU32 (pSSM, &pPtr->offFirstPage);
1978 SSMR3GetU32 (pSSM, &pPtr->cPages);
1979 rc = SSMR3GetU32(pSSM, &pPtr->fu32Direction);
1980 if (RT_SUCCESS(rc))
1981 {
1982 if (pPtr->cPages == 1)
1983 pPtr->paPages = &pPtr->GCPhysSinglePage;
1984 else
1985 {
1986 AssertReturn( pGuestParm->enmType != VMMDevHGCMParmType_Embedded
1987 && pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList, VERR_INTERNAL_ERROR_3);
1988 pPtr->paPages = (RTGCPHYS *)RTMemAlloc(pPtr->cPages * sizeof(RTGCPHYS));
1989 AssertStmt(pPtr->paPages, rc = VERR_NO_MEMORY);
1990 }
1991
1992 if (RT_SUCCESS(rc))
1993 {
1994 uint32_t iPage;
1995 for (iPage = 0; iPage < pPtr->cPages; ++iPage)
1996 rc = SSMR3GetGCPhys(pSSM, &pPtr->paPages[iPage]);
1997 }
1998 }
1999 }
2000 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
2001 {
2002 /* This request type can only be stored from guest memory for now. */
2003 pCmd->fRestoreFromGuestMem = true;
2004 }
2005 else
2006 {
2007 AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
2008 }
2009 AssertRCReturn(rc, rc);
2010 }
2011 }
2012 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
2013 {
2014 SSMR3GetU32(pSSM, &pCmd->u.connect.u32ClientID);
2015 rc = SSMR3GetMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
2016 AssertRCReturn(rc, rc);
2017 }
2018 else if (enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
2019 {
2020 rc = SSMR3GetU32(pSSM, &pCmd->u.disconnect.u32ClientID);
2021 AssertRCReturn(rc, rc);
2022 }
2023 else
2024 {
2025 AssertFailedReturn(VERR_INTERNAL_ERROR);
2026 }
2027
2028 /* A reserved field, will allow to extend saved data for a command. */
2029 rc = SSMR3GetU32(pSSM, &u32);
2030 AssertRCReturn(rc, rc);
2031
2032 /*
2033 * Do not restore cancelled calls. Why do we save them to start with?
2034 *
2035 * The guest memory no longer contains a valid request! So, it is not
2036 * possible to restore it. The memory is often reused for a new request
2037 * by now and we will end up trying to complete that more than once if
2038 * we restore a cancelled call. In some cases VERR_HGCM_INVALID_CLIENT_ID
2039 * is returned, though it might just be silent memory corruption.
2040 */
2041 /* See current version above. */
2042 if (!fCancelled)
2043 vmmdevHGCMAddCommand(pThis, pCmd);
2044 else
2045 {
2046 Log(("vmmdevHGCMLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
2047 enmCmdType, GCPhys, cbRequest));
2048 vmmdevHGCMCmdFree(pThis, pCmd);
2049 }
2050 }
2051
2052 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2053 rc = SSMR3GetU32(pSSM, &u32);
2054 AssertRCReturn(rc, rc);
2055 }
2056 else if (uVersion >= 9)
2057 {
2058 /* Version 9+: Load information about commands. Pre-rewrite. */
2059 uint32_t u32;
2060
2061 uint32_t iCmd;
2062 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2063 {
2064 VBOXHGCMCMDTYPE enmCmdType;
2065 bool fCancelled;
2066 RTGCPHYS GCPhys;
2067 uint32_t cbRequest;
2068 uint32_t cLinAddrs;
2069
2070 SSMR3GetGCPhys (pSSM, &GCPhys);
2071 rc = SSMR3GetU32(pSSM, &cbRequest);
2072 AssertRCReturn(rc, rc);
2073
2074 LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
2075
2076 /* For uVersion <= 12, this was the size of entire command.
2077 * Now the command is reconstructed in vmmdevHGCMLoadStateDone.
2078 */
2079 if (uVersion <= 12)
2080 SSMR3Skip(pSSM, sizeof (uint32_t));
2081
2082 SSMR3GetU32 (pSSM, &u32);
2083 enmCmdType = (VBOXHGCMCMDTYPE)u32;
2084 SSMR3GetBool (pSSM, &fCancelled);
2085 /* How many linear pointers. Always 0 if not a call command. */
2086 rc = SSMR3GetU32(pSSM, &cLinAddrs);
2087 AssertRCReturn(rc, rc);
2088
2089 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, enmCmdType, GCPhys, cbRequest, cLinAddrs, 0 /*fRequestor*/);
2090 AssertReturn(pCmd, VERR_NO_MEMORY);
2091
2092 pCmd->fCancelled = fCancelled;
2093 pCmd->GCPhys = GCPhys;
2094 pCmd->cbRequest = cbRequest;
2095
2096 if (cLinAddrs > 0)
2097 {
2098 /* Skip number of pages for all LinAddrs in this command. */
2099 SSMR3Skip(pSSM, sizeof(uint32_t));
2100
2101 uint32_t i;
2102 for (i = 0; i < cLinAddrs; ++i)
2103 {
2104 VBOXHGCMPARMPTR * const pPtr = &pCmd->u.call.paGuestParms[i].u.ptr;
2105
2106 /* Index of the parameter. Use cbData field to store the index. */
2107 SSMR3GetU32 (pSSM, &pPtr->cbData);
2108 SSMR3GetU32 (pSSM, &pPtr->offFirstPage);
2109 rc = SSMR3GetU32(pSSM, &pPtr->cPages);
2110 AssertRCReturn(rc, rc);
2111
2112 pPtr->paPages = (RTGCPHYS *)RTMemAlloc(pPtr->cPages * sizeof(RTGCPHYS));
2113 AssertReturn(pPtr->paPages, VERR_NO_MEMORY);
2114
2115 uint32_t iPage;
2116 for (iPage = 0; iPage < pPtr->cPages; ++iPage)
2117 rc = SSMR3GetGCPhys(pSSM, &pPtr->paPages[iPage]);
2118 }
2119 }
2120
2121 /* A reserved field, will allow to extend saved data for a command. */
2122 rc = SSMR3GetU32(pSSM, &u32);
2123 AssertRCReturn(rc, rc);
2124
2125 /* See current version above. */
2126 if (!fCancelled)
2127 vmmdevHGCMAddCommand(pThis, pCmd);
2128 else
2129 {
2130 Log(("vmmdevHGCMLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
2131 enmCmdType, GCPhys, cbRequest));
2132 vmmdevHGCMCmdFree(pThis, pCmd);
2133 }
2134 }
2135
2136 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2137 rc = SSMR3GetU32(pSSM, &u32);
2138 AssertRCReturn(rc, rc);
2139 }
2140 else
2141 {
2142 /* Ancient. Only the guest physical address is saved. */
2143 uint32_t iCmd;
2144 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2145 {
2146 RTGCPHYS GCPhys;
2147 uint32_t cbRequest;
2148
2149 SSMR3GetGCPhys(pSSM, &GCPhys);
2150 rc = SSMR3GetU32(pSSM, &cbRequest);
2151 AssertRCReturn(rc, rc);
2152
2153 LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
2154
2155 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_LOADSTATE, GCPhys, cbRequest, 0, 0 /*fRequestor*/);
2156 AssertReturn(pCmd, VERR_NO_MEMORY);
2157
2158 vmmdevHGCMAddCommand(pThis, pCmd);
2159 }
2160 }
2161
2162 return rc;
2163}
2164
2165/** Restore HGCM connect command loaded from old saved state.
2166 *
2167 * @returns VBox status code that the guest should see.
2168 * @param pThis The VMMDev instance data.
2169 * @param u32SSMVersion The saved state version the command has been loaded from.
2170 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2171 * @param pReq The guest request (cached in host memory).
2172 * @param cbReq Size of the guest request.
2173 * @param enmRequestType Type of the HGCM request.
2174 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2175 */
2176static int vmmdevHGCMRestoreConnect(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
2177 VMMDevHGCMConnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2178 VBOXHGCMCMD **ppRestoredCmd)
2179{
2180 RT_NOREF(pThis);
2181
2182 int rc = VINF_SUCCESS;
2183
2184 /* Verify the request. */
2185 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2186 if (u32SSMVersion >= 9)
2187 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT, VERR_MISMATCH);
2188
2189 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_CONNECT, pLoadedCmd->GCPhys, cbReq, 0,
2190 pReq->header.header.fRequestor);
2191 AssertReturn(pCmd, VERR_NO_MEMORY);
2192
2193 Assert(pLoadedCmd->fCancelled == false);
2194 pCmd->fCancelled = false;
2195 pCmd->fRestored = true;
2196 pCmd->enmRequestType = enmRequestType;
2197
2198 vmmdevHGCMConnectFetch(pReq, pCmd);
2199
2200 if (RT_SUCCESS(rc))
2201 *ppRestoredCmd = pCmd;
2202
2203 return rc;
2204}
2205
2206/** Restore HGCM disconnect command loaded from old saved state.
2207 *
2208 * @returns VBox status code that the guest should see.
2209 * @param pThis The VMMDev instance data.
2210 * @param u32SSMVersion The saved state version the command has been loaded from.
2211 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2212 * @param pReq The guest request (cached in host memory).
2213 * @param cbReq Size of the guest request.
2214 * @param enmRequestType Type of the HGCM request.
2215 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2216 */
2217static int vmmdevHGCMRestoreDisconnect(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
2218 VMMDevHGCMDisconnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2219 VBOXHGCMCMD **ppRestoredCmd)
2220{
2221 RT_NOREF(pThis);
2222
2223 int rc = VINF_SUCCESS;
2224
2225 /* Verify the request. */
2226 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2227 if (u32SSMVersion >= 9)
2228 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT, VERR_MISMATCH);
2229
2230 PVBOXHGCMCMD pCmd = vmmdevHGCMCmdAlloc(pThis, VBOXHGCMCMDTYPE_DISCONNECT, pLoadedCmd->GCPhys, cbReq, 0,
2231 pReq->header.header.fRequestor);
2232 AssertReturn(pCmd, VERR_NO_MEMORY);
2233
2234 Assert(pLoadedCmd->fCancelled == false);
2235 pCmd->fCancelled = false;
2236 pCmd->fRestored = true;
2237 pCmd->enmRequestType = enmRequestType;
2238
2239 vmmdevHGCMDisconnectFetch(pReq, pCmd);
2240
2241 if (RT_SUCCESS(rc))
2242 *ppRestoredCmd = pCmd;
2243
2244 return rc;
2245}
2246
2247/** Restore HGCM call command loaded from old saved state.
2248 *
2249 * @returns VBox status code that the guest should see.
2250 * @param pThis The VMMDev instance data.
2251 * @param u32SSMVersion The saved state version the command has been loaded from.
2252 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2253 * @param pReq The guest request (cached in host memory).
2254 * @param cbReq Size of the guest request.
2255 * @param enmRequestType Type of the HGCM request.
2256 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2257 */
2258static int vmmdevHGCMRestoreCall(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
2259 VMMDevHGCMCall *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2260 VBOXHGCMCMD **ppRestoredCmd)
2261{
2262 int rc = VINF_SUCCESS;
2263
2264 /* Verify the request. */
2265 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2266 if (u32SSMVersion >= 9)
2267 {
2268 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_MISMATCH);
2269 Assert(pLoadedCmd->fCancelled == false);
2270 }
2271
2272 PVBOXHGCMCMD pCmd;
2273 uint32_t cbHGCMParmStruct;
2274 rc = vmmdevHGCMCallAlloc(pThis, pReq, cbReq, pLoadedCmd->GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
2275 if (RT_FAILURE(rc))
2276 return rc;
2277
2278 /* pLoadedCmd is fake, it does not contain actual call parameters. Only pagelists for LinAddr. */
2279 pCmd->fCancelled = false;
2280 pCmd->fRestored = true;
2281 pCmd->enmRequestType = enmRequestType;
2282
2283 rc = vmmdevHGCMCallFetchGuestParms(pThis, pCmd, pReq, cbReq, enmRequestType, cbHGCMParmStruct);
2284 if (RT_SUCCESS(rc))
2285 {
2286 /* Update LinAddr parameters from pLoadedCmd.
2287 * pLoadedCmd->u.call.cParms is actually the number of LinAddrs, see vmmdevHGCMLoadState.
2288 */
2289 uint32_t iLinAddr;
2290 for (iLinAddr = 0; iLinAddr < pLoadedCmd->u.call.cParms; ++iLinAddr)
2291 {
2292 VBOXHGCMGUESTPARM * const pLoadedParm = &pLoadedCmd->u.call.paGuestParms[iLinAddr];
2293 /* pLoadedParm->cbData is actually index of the LinAddr parameter, see vmmdevHGCMLoadState. */
2294 const uint32_t iParm = pLoadedParm->u.ptr.cbData;
2295 ASSERT_GUEST_STMT_BREAK(iParm < pCmd->u.call.cParms, rc = VERR_MISMATCH);
2296
2297 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[iParm];
2298 ASSERT_GUEST_STMT_BREAK( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2299 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2300 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr,
2301 rc = VERR_MISMATCH);
2302 ASSERT_GUEST_STMT_BREAK( pLoadedParm->u.ptr.offFirstPage == pGuestParm->u.ptr.offFirstPage
2303 && pLoadedParm->u.ptr.cPages == pGuestParm->u.ptr.cPages,
2304 rc = VERR_MISMATCH);
2305 memcpy(pGuestParm->u.ptr.paPages, pLoadedParm->u.ptr.paPages, pGuestParm->u.ptr.cPages * sizeof(RTGCPHYS));
2306 }
2307 }
2308
2309 if (RT_SUCCESS(rc))
2310 *ppRestoredCmd = pCmd;
2311 else
2312 vmmdevHGCMCmdFree(pThis, pCmd);
2313
2314 return rc;
2315}
2316
2317/** Allocate and initialize a HGCM command using the given request (pReqHdr)
2318 * and command loaded from saved state (pCmd).
2319 *
2320 * @returns VBox status code that the guest should see.
2321 * @param pThis The VMMDev instance data.
2322 * @param u32SSMVersion Saved state version.
2323 * @param pLoadedCmd HGCM command which needs restoration.
2324 * @param pReqHdr The request (cached in host memory).
2325 * @param cbReq Size of the entire request (including HGCM parameters).
2326 * @param ppRestoredCmd Where to store pointer to restored command.
2327 */
2328static int vmmdevHGCMRestoreCommand(PVMMDEV pThis, uint32_t u32SSMVersion, const VBOXHGCMCMD *pLoadedCmd,
2329 const VMMDevHGCMRequestHeader *pReqHdr, uint32_t cbReq,
2330 VBOXHGCMCMD **ppRestoredCmd)
2331{
2332 int rc = VINF_SUCCESS;
2333
2334 /* Verify the request. */
2335 ASSERT_GUEST_RETURN(cbReq >= sizeof(VMMDevHGCMRequestHeader), VERR_MISMATCH);
2336 ASSERT_GUEST_RETURN(cbReq == pReqHdr->header.size, VERR_MISMATCH);
2337
2338 const VMMDevRequestType enmRequestType = pReqHdr->header.requestType;
2339 switch (enmRequestType)
2340 {
2341 case VMMDevReq_HGCMConnect:
2342 {
2343 VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
2344 rc = vmmdevHGCMRestoreConnect(pThis, u32SSMVersion, pLoadedCmd, pReq, cbReq, enmRequestType,
2345 ppRestoredCmd);
2346 break;
2347 }
2348
2349 case VMMDevReq_HGCMDisconnect:
2350 {
2351 VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
2352 rc = vmmdevHGCMRestoreDisconnect(pThis, u32SSMVersion, pLoadedCmd, pReq, cbReq, enmRequestType,
2353 ppRestoredCmd);
2354 break;
2355 }
2356
2357#ifdef VBOX_WITH_64_BITS_GUESTS
2358 case VMMDevReq_HGCMCall64:
2359#endif
2360 case VMMDevReq_HGCMCall32:
2361 {
2362 VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
2363 rc = vmmdevHGCMRestoreCall(pThis, u32SSMVersion, pLoadedCmd, pReq, cbReq, enmRequestType,
2364 ppRestoredCmd);
2365 break;
2366 }
2367
2368 default:
2369 ASSERT_GUEST_FAILED_RETURN(VERR_MISMATCH);
2370 }
2371
2372 return rc;
2373}
2374
2375/** Resubmit pending HGCM commands which were loaded form saved state.
2376 *
2377 * @returns VBox status code.
2378 * @param pThis The VMMDev instance data.
2379 *
2380 * @thread EMT
2381 */
2382int vmmdevHGCMLoadStateDone(PVMMDEV pThis)
2383{
2384 /*
2385 * Resubmit pending HGCM commands to services.
2386 *
2387 * pThis->pHGCMCmdList contains commands loaded by vmmdevHGCMLoadState.
2388 *
2389 * Legacy saved states (pre VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
2390 * do not have enough information about the command parameters,
2391 * therefore it is necessary to reload at least some data from the
2392 * guest memory to construct commands.
2393 *
2394 * There are two types of legacy saved states which contain:
2395 * 1) the guest physical address and size of request;
2396 * 2) additionally page lists for LinAddr parameters.
2397 *
2398 * Legacy commands have enmCmdType = VBOXHGCMCMDTYPE_LOADSTATE?
2399 */
2400
2401 int rcFunc = VINF_SUCCESS; /* This status code will make the function fail. I.e. VM will not start. */
2402
2403 /* Get local copy of the list of loaded commands. */
2404 RTLISTANCHOR listLoadedCommands;
2405 RTListMove(&listLoadedCommands, &pThis->listHGCMCmd);
2406
2407 /* Resubmit commands. */
2408 PVBOXHGCMCMD pCmd, pNext;
2409 RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
2410 {
2411 int rcCmd = VINF_SUCCESS; /* This status code will make the HGCM command fail for the guest. */
2412
2413 RTListNodeRemove(&pCmd->node);
2414
2415 /*
2416 * Re-read the request from the guest memory.
2417 * It will be used to:
2418 * * reconstruct commands if legacy saved state has been restored;
2419 * * report an error to the guest if resubmit failed.
2420 */
2421 VMMDevHGCMRequestHeader *pReqHdr = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
2422 AssertBreakStmt(pReqHdr, vmmdevHGCMCmdFree(pThis, pCmd); rcFunc = VERR_NO_MEMORY);
2423
2424 PDMDevHlpPhysRead(pThis->pDevInsR3, pCmd->GCPhys, pReqHdr, pCmd->cbRequest);
2425 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2426
2427 if (pThis->pHGCMDrv)
2428 {
2429 /*
2430 * Reconstruct legacy commands.
2431 */
2432 if (RT_LIKELY( pThis->u32SSMVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS
2433 && !pCmd->fRestoreFromGuestMem))
2434 { /* likely */ }
2435 else
2436 {
2437 PVBOXHGCMCMD pRestoredCmd = NULL;
2438 rcCmd = vmmdevHGCMRestoreCommand(pThis, pThis->u32SSMVersion, pCmd,
2439 pReqHdr, pCmd->cbRequest, &pRestoredCmd);
2440 if (RT_SUCCESS(rcCmd))
2441 {
2442 Assert(pCmd != pRestoredCmd); /* vmmdevHGCMRestoreCommand must allocate restored command. */
2443 vmmdevHGCMCmdFree(pThis, pCmd);
2444 pCmd = pRestoredCmd;
2445 }
2446 }
2447
2448 /* Resubmit commands. */
2449 if (RT_SUCCESS(rcCmd))
2450 {
2451 switch (pCmd->enmCmdType)
2452 {
2453 case VBOXHGCMCMDTYPE_CONNECT:
2454 {
2455 vmmdevHGCMAddCommand(pThis, pCmd);
2456 rcCmd = pThis->pHGCMDrv->pfnConnect(pThis->pHGCMDrv, pCmd, pCmd->u.connect.pLoc,
2457 &pCmd->u.connect.u32ClientID);
2458 if (RT_FAILURE(rcCmd))
2459 vmmdevHGCMRemoveCommand(pThis, pCmd);
2460 break;
2461 }
2462
2463 case VBOXHGCMCMDTYPE_DISCONNECT:
2464 {
2465 vmmdevHGCMAddCommand(pThis, pCmd);
2466 rcCmd = pThis->pHGCMDrv->pfnDisconnect(pThis->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
2467 if (RT_FAILURE(rcCmd))
2468 vmmdevHGCMRemoveCommand(pThis, pCmd);
2469 break;
2470 }
2471
2472 case VBOXHGCMCMDTYPE_CALL:
2473 {
2474 rcCmd = vmmdevHGCMInitHostParameters(pThis, pCmd, (uint8_t const *)pReqHdr);
2475 if (RT_SUCCESS(rcCmd))
2476 {
2477 vmmdevHGCMAddCommand(pThis, pCmd);
2478
2479 /* Pass the function call to HGCM connector for actual processing */
2480 uint64_t tsNow;
2481 STAM_GET_TS(tsNow);
2482 rcCmd = pThis->pHGCMDrv->pfnCall(pThis->pHGCMDrv, pCmd,
2483 pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
2484 pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsNow);
2485 if (RT_FAILURE(rcCmd))
2486 {
2487 LogFunc(("pfnCall rc = %Rrc\n", rcCmd));
2488 vmmdevHGCMRemoveCommand(pThis, pCmd);
2489 }
2490 }
2491 break;
2492 }
2493
2494 default:
2495 AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
2496 }
2497 }
2498 }
2499 else
2500 AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
2501
2502 if (RT_SUCCESS(rcCmd))
2503 { /* likely */ }
2504 else
2505 {
2506 /* Return the error to the guest. Guest may try to repeat the call. */
2507 pReqHdr->result = rcCmd;
2508 pReqHdr->header.rc = rcCmd;
2509 pReqHdr->fu32Flags |= VBOX_HGCM_REQ_DONE;
2510
2511 /* Write back only the header. */
2512 PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr));
2513
2514 VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM);
2515
2516 /* Deallocate the command memory. */
2517 vmmdevHGCMCmdFree(pThis, pCmd);
2518 }
2519
2520 RTMemFree(pReqHdr);
2521 }
2522
2523 if (RT_FAILURE(rcFunc))
2524 {
2525 RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
2526 {
2527 RTListNodeRemove(&pCmd->node);
2528 vmmdevHGCMCmdFree(pThis, pCmd);
2529 }
2530 }
2531
2532 return rcFunc;
2533}
2534
2535
2536/**
2537 * Counterpart to vmmdevHGCMInit().
2538 *
2539 * @param pThis The VMMDev instance data.
2540 */
2541void vmmdevHGCMDestroy(PVMMDEV pThis)
2542{
2543 LogFlowFunc(("\n"));
2544
2545 if (RTCritSectIsInitialized(&pThis->critsectHGCMCmdList))
2546 {
2547 PVBOXHGCMCMD pCmd, pNext;
2548 RTListForEachSafe(&pThis->listHGCMCmd, pCmd, pNext, VBOXHGCMCMD, node)
2549 {
2550 vmmdevHGCMRemoveCommand(pThis, pCmd);
2551 vmmdevHGCMCmdFree(pThis, pCmd);
2552 }
2553
2554 RTCritSectDelete(&pThis->critsectHGCMCmdList);
2555 }
2556
2557 AssertCompile(NIL_RTMEMCACHE == (RTMEMCACHE)0);
2558 if (pThis->hHgcmCmdCache != NIL_RTMEMCACHE)
2559 {
2560 RTMemCacheDestroy(pThis->hHgcmCmdCache);
2561 pThis->hHgcmCmdCache = NIL_RTMEMCACHE;
2562 }
2563}
2564
2565
2566/**
2567 * Initializes the HGCM specific state.
2568 *
2569 * Keeps VBOXHGCMCMDCACHED and friends local.
2570 *
2571 * @returns VBox status code.
2572 * @param pThis The VMMDev instance data.
2573 */
2574int vmmdevHGCMInit(PVMMDEV pThis)
2575{
2576 LogFlowFunc(("\n"));
2577
2578 RTListInit(&pThis->listHGCMCmd);
2579
2580 int rc = RTCritSectInit(&pThis->critsectHGCMCmdList);
2581 AssertLogRelRCReturn(rc, rc);
2582
2583 rc = RTMemCacheCreate(&pThis->hHgcmCmdCache, sizeof(VBOXHGCMCMDCACHED), 64, _1M, NULL, NULL, NULL, 0);
2584 AssertLogRelRCReturn(rc, rc);
2585
2586 pThis->u32HGCMEnabled = 0;
2587
2588 return VINF_SUCCESS;
2589}
2590
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette