VirtualBox

source: vbox/trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp@ 91905

Last change on this file since 91905 was 91887, checked in by vboxsync, 3 years ago

Devices/VMMDev: Access SSM API only through the device helper callbacks, bugref:10074

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 113.6 KB
Line 
1/* $Id: VMMDevHGCM.cpp 91887 2021-10-20 12:02:36Z vboxsync $ */
2/** @file
3 * VMMDev - HGCM - Host-Guest Communication Manager Device.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VMM
23#include <iprt/alloc.h>
24#include <iprt/asm.h>
25#include <iprt/assert.h>
26#include <iprt/param.h>
27#include <iprt/string.h>
28
29#include <VBox/AssertGuest.h>
30#include <VBox/err.h>
31#include <VBox/hgcmsvc.h>
32#include <VBox/log.h>
33
34#include "VMMDevHGCM.h"
35
36#ifdef DEBUG
37# define VBOX_STRICT_GUEST
38#endif
39
40#ifdef VBOX_WITH_DTRACE
41# include "dtrace/VBoxDD.h"
42#else
43# define VBOXDD_HGCMCALL_ENTER(a,b,c,d) do { } while (0)
44# define VBOXDD_HGCMCALL_COMPLETED_REQ(a,b) do { } while (0)
45# define VBOXDD_HGCMCALL_COMPLETED_EMT(a,b) do { } while (0)
46# define VBOXDD_HGCMCALL_COMPLETED_DONE(a,b,c,d) do { } while (0)
47#endif
48
49
50/*********************************************************************************************************************************
51* Structures and Typedefs *
52*********************************************************************************************************************************/
53typedef enum VBOXHGCMCMDTYPE
54{
55 VBOXHGCMCMDTYPE_LOADSTATE = 0,
56 VBOXHGCMCMDTYPE_CONNECT,
57 VBOXHGCMCMDTYPE_DISCONNECT,
58 VBOXHGCMCMDTYPE_CALL,
59 VBOXHGCMCMDTYPE_SizeHack = 0x7fffffff
60} VBOXHGCMCMDTYPE;
61
62/**
63 * Information about a 32 or 64 bit parameter.
64 */
65typedef struct VBOXHGCMPARMVAL
66{
67 /** Actual value. Both 32 and 64 bit is saved here. */
68 uint64_t u64Value;
69
70 /** Offset from the start of the request where the value is stored. */
71 uint32_t offValue;
72
73 /** Size of the value: 4 for 32 bit and 8 for 64 bit. */
74 uint32_t cbValue;
75
76} VBOXHGCMPARMVAL;
77
78/**
79 * Information about a pointer parameter.
80 */
81typedef struct VBOXHGCMPARMPTR
82{
83 /** Size of the buffer described by the pointer parameter. */
84 uint32_t cbData;
85
86/** @todo save 8 bytes here by putting offFirstPage, cPages, and f32Direction
87 * into a bitfields like in VBOXHGCMPARMPAGES. */
88 /** Offset in the first physical page of the region. */
89 uint32_t offFirstPage;
90
91 /** How many pages. */
92 uint32_t cPages;
93
94 /** How the buffer should be copied VBOX_HGCM_F_PARM_*. */
95 uint32_t fu32Direction;
96
97 /** Pointer to array of the GC physical addresses for these pages.
98 * It is assumed that the physical address of the locked resident guest page
99 * does not change. */
100 RTGCPHYS *paPages;
101
102 /** For single page requests. */
103 RTGCPHYS GCPhysSinglePage;
104
105} VBOXHGCMPARMPTR;
106
107
108/**
109 * Pages w/o bounce buffering.
110 */
111typedef struct VBOXHGCMPARMPAGES
112{
113 /** The buffer size. */
114 uint32_t cbData;
115 /** Start of buffer offset into the first page. */
116 uint32_t offFirstPage : 12;
117 /** VBOX_HGCM_F_PARM_XXX flags. */
118 uint32_t fFlags : 3;
119 /** Set if we've locked all the pages. */
120 uint32_t fLocked : 1;
121 /** Number of pages. */
122 uint32_t cPages : 16;
123 /**< Array of page locks followed by array of page pointers, the first page
124 * pointer is adjusted by offFirstPage. */
125 PPGMPAGEMAPLOCK paPgLocks;
126} VBOXHGCMPARMPAGES;
127
128/**
129 * Information about a guest HGCM parameter.
130 */
131typedef struct VBOXHGCMGUESTPARM
132{
133 /** The parameter type. */
134 HGCMFunctionParameterType enmType;
135
136 union
137 {
138 VBOXHGCMPARMVAL val;
139 VBOXHGCMPARMPTR ptr;
140 VBOXHGCMPARMPAGES Pages;
141 } u;
142
143} VBOXHGCMGUESTPARM;
144
145typedef struct VBOXHGCMCMD
146{
147 /** Active commands, list is protected by critsectHGCMCmdList. */
148 RTLISTNODE node;
149
150 /** The type of the command (VBOXHGCMCMDTYPE). */
151 uint8_t enmCmdType;
152
153 /** Whether the command was cancelled by the guest. */
154 bool fCancelled;
155
156 /** Set if allocated from the memory cache, clear if heap. */
157 bool fMemCache;
158
159 /** Whether the command was restored from saved state. */
160 bool fRestored : 1;
161 /** Whether this command has a no-bounce page list and needs to be restored
162 * from guest memory the old fashioned way. */
163 bool fRestoreFromGuestMem : 1;
164
165 /** Copy of VMMDevRequestHeader::fRequestor.
166 * @note Only valid if VBOXGSTINFO2_F_REQUESTOR_INFO is set in
167 * VMMDevState.guestInfo2.fFeatures. */
168 uint32_t fRequestor;
169
170 /** GC physical address of the guest request. */
171 RTGCPHYS GCPhys;
172
173 /** Request packet size. */
174 uint32_t cbRequest;
175
176 /** The type of the guest request. */
177 VMMDevRequestType enmRequestType;
178
179 /** Pointer to the locked request, NULL if not locked. */
180 void *pvReqLocked;
181 /** The PGM lock for GCPhys if pvReqLocked is not NULL. */
182 PGMPAGEMAPLOCK ReqMapLock;
183
184 /** The accounting index (into VMMDEVR3::aHgcmAcc). */
185 uint8_t idxHeapAcc;
186 uint8_t abPadding[3];
187 /** The heap cost of this command. */
188 uint32_t cbHeapCost;
189
190 /** The STAM_GET_TS() value when the request arrived. */
191 uint64_t tsArrival;
192 /** The STAM_GET_TS() value when the hgcmR3Completed() is called. */
193 uint64_t tsComplete;
194
195 union
196 {
197 struct
198 {
199 uint32_t u32ClientID;
200 HGCMServiceLocation *pLoc; /**< Allocated after this structure. */
201 } connect;
202
203 struct
204 {
205 uint32_t u32ClientID;
206 } disconnect;
207
208 struct
209 {
210 /* Number of elements in paGuestParms and paHostParms arrays. */
211 uint32_t cParms;
212
213 uint32_t u32ClientID;
214
215 uint32_t u32Function;
216
217 /** Pointer to information about guest parameters in case of a Call request.
218 * Follows this structure in the same memory block.
219 */
220 VBOXHGCMGUESTPARM *paGuestParms;
221
222 /** Pointer to converted host parameters in case of a Call request.
223 * Follows this structure in the same memory block.
224 */
225 VBOXHGCMSVCPARM *paHostParms;
226
227 /* VBOXHGCMGUESTPARM[] */
228 /* VBOXHGCMSVCPARM[] */
229 } call;
230 } u;
231} VBOXHGCMCMD;
232
233
234/**
235 * Version for the memory cache.
236 */
237typedef struct VBOXHGCMCMDCACHED
238{
239 VBOXHGCMCMD Core; /**< 120 */
240 VBOXHGCMGUESTPARM aGuestParms[6]; /**< 40 * 6 = 240 */
241 VBOXHGCMSVCPARM aHostParms[6]; /**< 24 * 6 = 144 */
242} VBOXHGCMCMDCACHED; /**< 120+240+144 = 504 */
243AssertCompile(sizeof(VBOXHGCMCMD) <= 120);
244AssertCompile(sizeof(VBOXHGCMGUESTPARM) <= 40);
245AssertCompile(sizeof(VBOXHGCMSVCPARM) <= 24);
246AssertCompile(sizeof(VBOXHGCMCMDCACHED) <= 512);
247AssertCompile(sizeof(VBOXHGCMCMDCACHED) > sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
248
249
250/*********************************************************************************************************************************
251* Internal Functions *
252*********************************************************************************************************************************/
253DECLINLINE(void *) vmmdevR3HgcmCallMemAllocZ(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested);
254
255
256
257DECLINLINE(int) vmmdevR3HgcmCmdListLock(PVMMDEVCC pThisCC)
258{
259 int rc = RTCritSectEnter(&pThisCC->critsectHGCMCmdList);
260 AssertRC(rc);
261 return rc;
262}
263
264DECLINLINE(void) vmmdevR3HgcmCmdListUnlock(PVMMDEVCC pThisCC)
265{
266 int rc = RTCritSectLeave(&pThisCC->critsectHGCMCmdList);
267 AssertRC(rc);
268}
269
270/** Allocate and initialize VBOXHGCMCMD structure for HGCM request.
271 *
272 * @returns Pointer to the command on success, NULL otherwise.
273 * @param pThisCC The VMMDev ring-3 instance data.
274 * @param enmCmdType Type of the command.
275 * @param GCPhys The guest physical address of the HGCM request.
276 * @param cbRequest The size of the HGCM request.
277 * @param cParms Number of HGCM parameters for VBOXHGCMCMDTYPE_CALL command.
278 * @param fRequestor The VMMDevRequestHeader::fRequestor value.
279 */
280static PVBOXHGCMCMD vmmdevR3HgcmCmdAlloc(PVMMDEVCC pThisCC, VBOXHGCMCMDTYPE enmCmdType, RTGCPHYS GCPhys,
281 uint32_t cbRequest, uint32_t cParms, uint32_t fRequestor)
282{
283 /*
284 * Pick the heap accounting category.
285 *
286 * Initial idea was to just use what VMMDEV_REQUESTOR_USR_MASK yields directly,
287 * but there are so many unused categories then (DRV, RESERVED1, GUEST). Better
288 * to have fewer and more heap available in each.
289 */
290 uintptr_t idxHeapAcc;
291 if (fRequestor != VMMDEV_REQUESTOR_LEGACY)
292 switch (fRequestor & VMMDEV_REQUESTOR_USR_MASK)
293 {
294 case VMMDEV_REQUESTOR_USR_NOT_GIVEN:
295 case VMMDEV_REQUESTOR_USR_DRV:
296 case VMMDEV_REQUESTOR_USR_DRV_OTHER:
297 idxHeapAcc = VMMDEV_HGCM_CATEGORY_KERNEL;
298 break;
299 case VMMDEV_REQUESTOR_USR_ROOT:
300 case VMMDEV_REQUESTOR_USR_SYSTEM:
301 idxHeapAcc = VMMDEV_HGCM_CATEGORY_ROOT;
302 break;
303 default:
304 AssertFailed(); RT_FALL_THRU();
305 case VMMDEV_REQUESTOR_USR_RESERVED1:
306 case VMMDEV_REQUESTOR_USR_USER:
307 case VMMDEV_REQUESTOR_USR_GUEST:
308 idxHeapAcc = VMMDEV_HGCM_CATEGORY_USER;
309 break;
310 }
311 else
312 idxHeapAcc = VMMDEV_HGCM_CATEGORY_KERNEL;
313
314#if 1
315 /*
316 * Try use the cache.
317 */
318 VBOXHGCMCMDCACHED *pCmdCached;
319 AssertCompile(sizeof(*pCmdCached) >= sizeof(VBOXHGCMCMD) + sizeof(HGCMServiceLocation));
320 if (cParms <= RT_ELEMENTS(pCmdCached->aGuestParms))
321 {
322 if (sizeof(*pCmdCached) <= pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget)
323 {
324 int rc = RTMemCacheAllocEx(pThisCC->hHgcmCmdCache, (void **)&pCmdCached);
325 if (RT_SUCCESS(rc))
326 {
327 RT_ZERO(*pCmdCached);
328 pCmdCached->Core.fMemCache = true;
329 pCmdCached->Core.GCPhys = GCPhys;
330 pCmdCached->Core.cbRequest = cbRequest;
331 pCmdCached->Core.enmCmdType = enmCmdType;
332 pCmdCached->Core.fRequestor = fRequestor;
333 pCmdCached->Core.idxHeapAcc = (uint8_t)idxHeapAcc;
334 pCmdCached->Core.cbHeapCost = sizeof(*pCmdCached);
335 Log5Func(("aHgcmAcc[%zu] %#RX64 -= %#zx (%p)\n",
336 idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, sizeof(*pCmdCached), &pCmdCached->Core));
337 pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget -= sizeof(*pCmdCached);
338
339 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
340 {
341 pCmdCached->Core.u.call.cParms = cParms;
342 pCmdCached->Core.u.call.paGuestParms = pCmdCached->aGuestParms;
343 pCmdCached->Core.u.call.paHostParms = pCmdCached->aHostParms;
344 }
345 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
346 pCmdCached->Core.u.connect.pLoc = (HGCMServiceLocation *)(&pCmdCached->Core + 1);
347
348 Assert(!pCmdCached->Core.pvReqLocked);
349
350 Log3Func(("returns %p (enmCmdType=%d GCPhys=%RGp)\n", &pCmdCached->Core, enmCmdType, GCPhys));
351 return &pCmdCached->Core;
352 }
353 }
354 else
355 LogFunc(("Heap budget overrun: sizeof(*pCmdCached)=%#zx aHgcmAcc[%zu].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
356 sizeof(*pCmdCached), idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, enmCmdType));
357 STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idxHeapAcc].StatBudgetOverruns);
358 return NULL;
359 }
360 STAM_REL_COUNTER_INC(&pThisCC->StatHgcmLargeCmdAllocs);
361
362#else
363 RT_NOREF(pThisCC);
364#endif
365
366 /* Size of required memory buffer. */
367 const uint32_t cbCmd = sizeof(VBOXHGCMCMD) + cParms * (sizeof(VBOXHGCMGUESTPARM) + sizeof(VBOXHGCMSVCPARM))
368 + (enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? sizeof(HGCMServiceLocation) : 0);
369 if (cbCmd <= pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget)
370 {
371 PVBOXHGCMCMD pCmd = (PVBOXHGCMCMD)RTMemAllocZ(cbCmd);
372 if (pCmd)
373 {
374 pCmd->enmCmdType = enmCmdType;
375 pCmd->GCPhys = GCPhys;
376 pCmd->cbRequest = cbRequest;
377 pCmd->fRequestor = fRequestor;
378 pCmd->idxHeapAcc = (uint8_t)idxHeapAcc;
379 pCmd->cbHeapCost = cbCmd;
380 Log5Func(("aHgcmAcc[%zu] %#RX64 -= %#x (%p)\n", idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, cbCmd, pCmd));
381 pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget -= cbCmd;
382
383 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
384 {
385 pCmd->u.call.cParms = cParms;
386 if (cParms)
387 {
388 pCmd->u.call.paGuestParms = (VBOXHGCMGUESTPARM *)((uint8_t *)pCmd
389 + sizeof(struct VBOXHGCMCMD));
390 pCmd->u.call.paHostParms = (VBOXHGCMSVCPARM *)((uint8_t *)pCmd->u.call.paGuestParms
391 + cParms * sizeof(VBOXHGCMGUESTPARM));
392 }
393 }
394 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
395 pCmd->u.connect.pLoc = (HGCMServiceLocation *)(pCmd + 1);
396 }
397 Log3Func(("returns %p (enmCmdType=%d GCPhys=%RGp cbCmd=%#x)\n", pCmd, enmCmdType, GCPhys, cbCmd));
398 return pCmd;
399 }
400 STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idxHeapAcc].StatBudgetOverruns);
401 LogFunc(("Heap budget overrun: cbCmd=%#x aHgcmAcc[%zu].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
402 cbCmd, idxHeapAcc, pThisCC->aHgcmAcc[idxHeapAcc].cbHeapBudget, enmCmdType));
403 return NULL;
404}
405
406/** Deallocate VBOXHGCMCMD memory.
407 *
408 * @param pDevIns The device instance.
409 * @param pThis The VMMDev shared instance data.
410 * @param pThisCC The VMMDev ring-3 instance data.
411 * @param pCmd Command to deallocate.
412 */
413static void vmmdevR3HgcmCmdFree(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
414{
415 if (pCmd)
416 {
417 Assert( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL
418 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
419 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
420 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_LOADSTATE);
421 if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
422 {
423 uint32_t i;
424 for (i = 0; i < pCmd->u.call.cParms; ++i)
425 {
426 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
427 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
428
429 if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
430 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
431 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
432 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
433 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
434 {
435 Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PTR);
436 if (pGuestParm->u.ptr.paPages != &pGuestParm->u.ptr.GCPhysSinglePage)
437 RTMemFree(pGuestParm->u.ptr.paPages);
438 RTMemFreeZ(pHostParm->u.pointer.addr, pGuestParm->u.ptr.cbData);
439 }
440 else if (pGuestParm->enmType == VMMDevHGCMParmType_Embedded)
441 {
442 Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PTR);
443 RTMemFreeZ(pHostParm->u.pointer.addr, pGuestParm->u.ptr.cbData);
444 }
445 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
446 {
447 Assert(pHostParm->type == VBOX_HGCM_SVC_PARM_PAGES);
448 if (pGuestParm->u.Pages.paPgLocks)
449 {
450 if (pGuestParm->u.Pages.fLocked)
451 PDMDevHlpPhysBulkReleasePageMappingLocks(pDevIns, pGuestParm->u.Pages.cPages,
452 pGuestParm->u.Pages.paPgLocks);
453 RTMemFree(pGuestParm->u.Pages.paPgLocks);
454 pGuestParm->u.Pages.paPgLocks = NULL;
455 }
456 }
457 else
458 Assert(pHostParm->type != VBOX_HGCM_SVC_PARM_PTR && pHostParm->type != VBOX_HGCM_SVC_PARM_PAGES);
459 }
460 }
461
462 if (pCmd->pvReqLocked)
463 {
464 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &pCmd->ReqMapLock);
465 pCmd->pvReqLocked = NULL;
466 }
467
468 pCmd->enmCmdType = UINT8_MAX; /* poison */
469
470 /* Update heap budget. Need the critsect to do this safely. */
471 Assert(pCmd->cbHeapCost != 0);
472 uintptr_t idx = pCmd->idxHeapAcc;
473 AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
474
475 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VERR_IGNORED);
476 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
477
478 Log5Func(("aHgcmAcc[%zu] %#RX64 += %#x (%p)\n", idx, pThisCC->aHgcmAcc[idx].cbHeapBudget, pCmd->cbHeapCost, pCmd));
479 pThisCC->aHgcmAcc[idx].cbHeapBudget += pCmd->cbHeapCost;
480 AssertMsg(pThisCC->aHgcmAcc[idx].cbHeapBudget <= pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig,
481 ("idx=%d (%d) fRequestor=%#x pCmd=%p: %#RX64 vs %#RX64 -> %#RX64\n", idx, pCmd->idxHeapAcc, pCmd->fRequestor, pCmd,
482 pThisCC->aHgcmAcc[idx].cbHeapBudget, pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig,
483 pThisCC->aHgcmAcc[idx].cbHeapBudget - pThisCC->aHgcmAcc[idx].cbHeapBudgetConfig));
484 pCmd->cbHeapCost = 0;
485
486#if 1
487 if (pCmd->fMemCache)
488 {
489 RTMemCacheFree(pThisCC->hHgcmCmdCache, pCmd);
490 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); /* releasing it after just to be on the safe side. */
491 }
492 else
493#endif
494 {
495 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
496 RTMemFree(pCmd);
497 }
498 }
499}
500
501/** Add VBOXHGCMCMD to the list of pending commands.
502 *
503 * @returns VBox status code.
504 * @param pDevIns The device instance.
505 * @param pThis The VMMDev shared instance data.
506 * @param pThisCC The VMMDev ring-3 instance data.
507 * @param pCmd Command to add.
508 */
509static int vmmdevR3HgcmAddCommand(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
510{
511 int rc = vmmdevR3HgcmCmdListLock(pThisCC);
512 AssertRCReturn(rc, rc);
513
514 LogFlowFunc(("%p type %d\n", pCmd, pCmd->enmCmdType));
515
516 RTListPrepend(&pThisCC->listHGCMCmd, &pCmd->node);
517
518 /* stats */
519 uintptr_t idx = pCmd->idxHeapAcc;
520 AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
521 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->aHgcmAcc[idx].StateMsgHeapUsage, pCmd->cbHeapCost);
522
523 /* Automatically enable HGCM events, if there are HGCM commands. */
524 if ( pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT
525 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT
526 || pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
527 {
528 LogFunc(("u32HGCMEnabled = %d\n", pThisCC->u32HGCMEnabled));
529 if (ASMAtomicCmpXchgU32(&pThisCC->u32HGCMEnabled, 1, 0))
530 VMMDevCtlSetGuestFilterMask(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM, 0);
531 }
532
533 vmmdevR3HgcmCmdListUnlock(pThisCC);
534 return rc;
535}
536
537/** Remove VBOXHGCMCMD from the list of pending commands.
538 *
539 * @returns VBox status code.
540 * @param pThisCC The VMMDev ring-3 instance data.
541 * @param pCmd Command to remove.
542 */
543static int vmmdevR3HgcmRemoveCommand(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd)
544{
545 int rc = vmmdevR3HgcmCmdListLock(pThisCC);
546 AssertRCReturn(rc, rc);
547
548 LogFlowFunc(("%p\n", pCmd));
549
550 RTListNodeRemove(&pCmd->node);
551
552 vmmdevR3HgcmCmdListUnlock(pThisCC);
553 return rc;
554}
555
556/**
557 * Find a HGCM command by its physical address.
558 *
559 * The caller is responsible for taking the command list lock before calling
560 * this function.
561 *
562 * @returns Pointer to the command on success, NULL otherwise.
563 * @param pThisCC The VMMDev ring-3 instance data.
564 * @param GCPhys The physical address of the command we're looking for.
565 */
566DECLINLINE(PVBOXHGCMCMD) vmmdevR3HgcmFindCommandLocked(PVMMDEVCC pThisCC, RTGCPHYS GCPhys)
567{
568 PVBOXHGCMCMD pCmd;
569 RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
570 {
571 if (pCmd->GCPhys == GCPhys)
572 return pCmd;
573 }
574 return NULL;
575}
576
577/** Copy VMMDevHGCMConnect request data from the guest to VBOXHGCMCMD command.
578 *
579 * @param pHGCMConnect The source guest request (cached in host memory).
580 * @param pCmd Destination command.
581 */
582static void vmmdevR3HgcmConnectFetch(const VMMDevHGCMConnect *pHGCMConnect, PVBOXHGCMCMD pCmd)
583{
584 pCmd->enmRequestType = pHGCMConnect->header.header.requestType;
585 pCmd->u.connect.u32ClientID = pHGCMConnect->u32ClientID;
586 *pCmd->u.connect.pLoc = pHGCMConnect->loc;
587}
588
589/** Handle VMMDevHGCMConnect request.
590 *
591 * @param pDevIns The device instance.
592 * @param pThis The VMMDev shared instance data.
593 * @param pThisCC The VMMDev ring-3 instance data.
594 * @param pHGCMConnect The guest request (cached in host memory).
595 * @param GCPhys The physical address of the request.
596 */
597int vmmdevR3HgcmConnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
598 const VMMDevHGCMConnect *pHGCMConnect, RTGCPHYS GCPhys)
599{
600 int rc;
601 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CONNECT, GCPhys, pHGCMConnect->header.header.size, 0,
602 pHGCMConnect->header.header.fRequestor);
603 if (pCmd)
604 {
605 vmmdevR3HgcmConnectFetch(pHGCMConnect, pCmd);
606
607 /* Only allow the guest to use existing services! */
608 ASSERT_GUEST(pHGCMConnect->loc.type == VMMDevHGCMLoc_LocalHost_Existing);
609 pCmd->u.connect.pLoc->type = VMMDevHGCMLoc_LocalHost_Existing;
610
611 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
612 rc = pThisCC->pHGCMDrv->pfnConnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.connect.pLoc, &pCmd->u.connect.u32ClientID);
613 if (RT_FAILURE(rc))
614 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
615 }
616 else
617 rc = VERR_NO_MEMORY;
618
619 return rc;
620}
621
622/** Copy VMMDevHGCMDisconnect request data from the guest to VBOXHGCMCMD command.
623 *
624 * @param pHGCMDisconnect The source guest request (cached in host memory).
625 * @param pCmd Destination command.
626 */
627static void vmmdevR3HgcmDisconnectFetch(const VMMDevHGCMDisconnect *pHGCMDisconnect, PVBOXHGCMCMD pCmd)
628{
629 pCmd->enmRequestType = pHGCMDisconnect->header.header.requestType;
630 pCmd->u.disconnect.u32ClientID = pHGCMDisconnect->u32ClientID;
631}
632
633/** Handle VMMDevHGCMDisconnect request.
634 *
635 * @param pDevIns The device instance.
636 * @param pThis The VMMDev shared instance data.
637 * @param pThisCC The VMMDev ring-3 instance data.
638 * @param pHGCMDisconnect The guest request (cached in host memory).
639 * @param GCPhys The physical address of the request.
640 */
641int vmmdevR3HgcmDisconnect(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC,
642 const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPhys)
643{
644 int rc;
645 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_DISCONNECT, GCPhys, pHGCMDisconnect->header.header.size, 0,
646 pHGCMDisconnect->header.header.fRequestor);
647 if (pCmd)
648 {
649 vmmdevR3HgcmDisconnectFetch(pHGCMDisconnect, pCmd);
650
651 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
652 rc = pThisCC->pHGCMDrv->pfnDisconnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
653 if (RT_FAILURE(rc))
654 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
655 }
656 else
657 rc = VERR_NO_MEMORY;
658
659 return rc;
660}
661
662/** Translate LinAddr parameter type to the direction of data transfer.
663 *
664 * @returns VBOX_HGCM_F_PARM_DIRECTION_* flags.
665 * @param enmType Type of the LinAddr parameter.
666 */
667static uint32_t vmmdevR3HgcmParmTypeToDirection(HGCMFunctionParameterType enmType)
668{
669 if (enmType == VMMDevHGCMParmType_LinAddr_In) return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
670 if (enmType == VMMDevHGCMParmType_LinAddr_Out) return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
671 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
672}
673
674/** Check if list of pages in a HGCM pointer parameter corresponds to a contiguous buffer.
675 *
676 * @returns true if pages are contiguous, false otherwise.
677 * @param pPtr Information about a pointer HGCM parameter.
678 */
679DECLINLINE(bool) vmmdevR3HgcmGuestBufferIsContiguous(const VBOXHGCMPARMPTR *pPtr)
680{
681 if (pPtr->cPages == 1)
682 return true;
683 RTGCPHYS64 Phys = pPtr->paPages[0] + PAGE_SIZE;
684 if (Phys != pPtr->paPages[1])
685 return false;
686 if (pPtr->cPages > 2)
687 {
688 uint32_t iPage = 2;
689 do
690 {
691 Phys += PAGE_SIZE;
692 if (Phys != pPtr->paPages[iPage])
693 return false;
694 ++iPage;
695 } while (iPage < pPtr->cPages);
696 }
697 return true;
698}
699
700/** Copy data from guest memory to the host buffer.
701 *
702 * @returns VBox status code.
703 * @param pDevIns The device instance for PDMDevHlp.
704 * @param pvDst The destination host buffer.
705 * @param cbDst Size of the destination host buffer.
706 * @param pPtr Description of the source HGCM pointer parameter.
707 */
708static int vmmdevR3HgcmGuestBufferRead(PPDMDEVINSR3 pDevIns, void *pvDst, uint32_t cbDst, const VBOXHGCMPARMPTR *pPtr)
709{
710 /*
711 * Try detect contiguous buffers.
712 */
713 /** @todo We need a flag for indicating this. */
714 if (vmmdevR3HgcmGuestBufferIsContiguous(pPtr))
715 return PDMDevHlpPhysRead(pDevIns, pPtr->paPages[0] | pPtr->offFirstPage, pvDst, cbDst);
716
717 /*
718 * Page by page fallback.
719 */
720 uint8_t *pu8Dst = (uint8_t *)pvDst;
721 uint32_t offPage = pPtr->offFirstPage;
722 uint32_t cbRemaining = cbDst;
723
724 for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
725 {
726 uint32_t cbToRead = PAGE_SIZE - offPage;
727 if (cbToRead > cbRemaining)
728 cbToRead = cbRemaining;
729
730 /* Skip invalid pages. */
731 const RTGCPHYS GCPhys = pPtr->paPages[iPage];
732 if (GCPhys != NIL_RTGCPHYS)
733 {
734 int rc = PDMDevHlpPhysRead(pDevIns, GCPhys + offPage, pu8Dst, cbToRead);
735 AssertMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp offPage=%#x cbToRead=%#x\n", rc, GCPhys, offPage, cbToRead), rc);
736 }
737
738 offPage = 0; /* A next page is read from 0 offset. */
739 cbRemaining -= cbToRead;
740 pu8Dst += cbToRead;
741 }
742
743 return VINF_SUCCESS;
744}
745
746/** Copy data from the host buffer to guest memory.
747 *
748 * @returns VBox status code.
749 * @param pDevIns The device instance for PDMDevHlp.
750 * @param pPtr Description of the destination HGCM pointer parameter.
751 * @param pvSrc The source host buffer.
752 * @param cbSrc Size of the source host buffer.
753 */
754static int vmmdevR3HgcmGuestBufferWrite(PPDMDEVINSR3 pDevIns, const VBOXHGCMPARMPTR *pPtr, const void *pvSrc, uint32_t cbSrc)
755{
756 int rc = VINF_SUCCESS;
757
758 uint8_t *pu8Src = (uint8_t *)pvSrc;
759 uint32_t offPage = pPtr->offFirstPage;
760 uint32_t cbRemaining = RT_MIN(cbSrc, pPtr->cbData);
761
762 uint32_t iPage;
763 for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
764 {
765 uint32_t cbToWrite = PAGE_SIZE - offPage;
766 if (cbToWrite > cbRemaining)
767 cbToWrite = cbRemaining;
768
769 /* Skip invalid pages. */
770 const RTGCPHYS GCPhys = pPtr->paPages[iPage];
771 if (GCPhys != NIL_RTGCPHYS)
772 {
773 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys + offPage, pu8Src, cbToWrite);
774 AssertRCBreak(rc);
775 }
776
777 offPage = 0; /* A next page is written at 0 offset. */
778 cbRemaining -= cbToWrite;
779 pu8Src += cbToWrite;
780 }
781
782 return rc;
783}
784
785/** Initializes pCmd->paHostParms from already initialized pCmd->paGuestParms.
786 * Allocates memory for pointer parameters and copies data from the guest.
787 *
788 * @returns VBox status code that the guest should see.
789 * @param pDevIns The device instance.
790 * @param pThisCC The VMMDev ring-3 instance data.
791 * @param pCmd Command structure where host parameters needs initialization.
792 * @param pbReq The request buffer.
793 */
794static int vmmdevR3HgcmInitHostParameters(PPDMDEVINS pDevIns, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, uint8_t const *pbReq)
795{
796 AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
797
798 for (uint32_t i = 0; i < pCmd->u.call.cParms; ++i)
799 {
800 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
801 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
802
803 switch (pGuestParm->enmType)
804 {
805 case VMMDevHGCMParmType_32bit:
806 {
807 pHostParm->type = VBOX_HGCM_SVC_PARM_32BIT;
808 pHostParm->u.uint32 = (uint32_t)pGuestParm->u.val.u64Value;
809
810 break;
811 }
812
813 case VMMDevHGCMParmType_64bit:
814 {
815 pHostParm->type = VBOX_HGCM_SVC_PARM_64BIT;
816 pHostParm->u.uint64 = pGuestParm->u.val.u64Value;
817
818 break;
819 }
820
821 case VMMDevHGCMParmType_PageList:
822 case VMMDevHGCMParmType_LinAddr_In:
823 case VMMDevHGCMParmType_LinAddr_Out:
824 case VMMDevHGCMParmType_LinAddr:
825 case VMMDevHGCMParmType_Embedded:
826 case VMMDevHGCMParmType_ContiguousPageList:
827 {
828 const uint32_t cbData = pGuestParm->u.ptr.cbData;
829
830 pHostParm->type = VBOX_HGCM_SVC_PARM_PTR;
831 pHostParm->u.pointer.size = cbData;
832
833 if (cbData)
834 {
835 /* Zero memory, the buffer content is potentially copied to the guest. */
836 void *pv = vmmdevR3HgcmCallMemAllocZ(pThisCC, pCmd, cbData);
837 AssertReturn(pv, VERR_NO_MEMORY);
838 pHostParm->u.pointer.addr = pv;
839
840 if (pGuestParm->u.ptr.fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
841 {
842 if (pGuestParm->enmType != VMMDevHGCMParmType_Embedded)
843 {
844 if (pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList)
845 {
846 int rc = vmmdevR3HgcmGuestBufferRead(pDevIns, pv, cbData, &pGuestParm->u.ptr);
847 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
848 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
849 }
850 else
851 {
852 int rc = PDMDevHlpPhysRead(pDevIns,
853 pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
854 pv, cbData);
855 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc);
856 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
857 }
858 }
859 else
860 {
861 memcpy(pv, &pbReq[pGuestParm->u.ptr.offFirstPage], cbData);
862 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
863 }
864 }
865 }
866 else
867 {
868 pHostParm->u.pointer.addr = NULL;
869 }
870
871 break;
872 }
873
874 case VMMDevHGCMParmType_NoBouncePageList:
875 {
876 pHostParm->type = VBOX_HGCM_SVC_PARM_PAGES;
877 pHostParm->u.Pages.cb = pGuestParm->u.Pages.cbData;
878 pHostParm->u.Pages.cPages = pGuestParm->u.Pages.cPages;
879 pHostParm->u.Pages.papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[pGuestParm->u.Pages.cPages];
880
881 break;
882 }
883
884 default:
885 ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
886 }
887 }
888
889 return VINF_SUCCESS;
890}
891
892
893/** Allocate and initialize VBOXHGCMCMD structure for a HGCMCall request.
894 *
895 * @returns VBox status code that the guest should see.
896 * @param pThisCC The VMMDev ring-3 instance data.
897 * @param pHGCMCall The HGCMCall request (cached in host memory).
898 * @param cbHGCMCall Size of the request.
899 * @param GCPhys Guest physical address of the request.
900 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
901 * @param ppCmd Where to store pointer to allocated command.
902 * @param pcbHGCMParmStruct Where to store size of used HGCM parameter structure.
903 */
904static int vmmdevR3HgcmCallAlloc(PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys,
905 VMMDevRequestType enmRequestType, PVBOXHGCMCMD *ppCmd, uint32_t *pcbHGCMParmStruct)
906{
907#ifdef VBOX_WITH_64_BITS_GUESTS
908 const uint32_t cbHGCMParmStruct = enmRequestType == VMMDevReq_HGCMCall64 ? sizeof(HGCMFunctionParameter64)
909 : sizeof(HGCMFunctionParameter32);
910#else
911 const uint32_t cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
912#endif
913
914 const uint32_t cParms = pHGCMCall->cParms;
915
916 /* Whether there is enough space for parameters and sane upper limit. */
917 ASSERT_GUEST_STMT_RETURN( cParms <= (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct
918 && cParms <= VMMDEV_MAX_HGCM_PARMS,
919 LogRelMax(50, ("VMMDev: request packet with invalid number of HGCM parameters: %d vs %d. Refusing operation.\n",
920 (cbHGCMCall - sizeof(VMMDevHGCMCall)) / cbHGCMParmStruct, cParms)),
921 VERR_INVALID_PARAMETER);
922 RT_UNTRUSTED_VALIDATED_FENCE();
923
924 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CALL, GCPhys, cbHGCMCall, cParms,
925 pHGCMCall->header.header.fRequestor);
926 if (pCmd == NULL)
927 return VERR_NO_MEMORY;
928
929 /* Request type has been validated in vmmdevReqDispatcher. */
930 pCmd->enmRequestType = enmRequestType;
931 pCmd->u.call.u32ClientID = pHGCMCall->u32ClientID;
932 pCmd->u.call.u32Function = pHGCMCall->u32Function;
933
934 *ppCmd = pCmd;
935 *pcbHGCMParmStruct = cbHGCMParmStruct;
936 return VINF_SUCCESS;
937}
938
939/**
940 * Heap budget wrapper around RTMemAlloc and RTMemAllocZ.
941 */
942static void *vmmdevR3HgcmCallMemAllocEx(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested, bool fZero)
943{
944 uintptr_t idx = pCmd->idxHeapAcc;
945 AssertStmt(idx < RT_ELEMENTS(pThisCC->aHgcmAcc), idx %= RT_ELEMENTS(pThisCC->aHgcmAcc));
946
947 /* Check against max heap costs for this request. */
948 Assert(pCmd->cbHeapCost <= VMMDEV_MAX_HGCM_DATA_SIZE);
949 if (cbRequested <= VMMDEV_MAX_HGCM_DATA_SIZE - pCmd->cbHeapCost)
950 {
951 /* Check heap budget (we're under lock). */
952 if (cbRequested <= pThisCC->aHgcmAcc[idx].cbHeapBudget)
953 {
954 /* Do the actual allocation. */
955 void *pv = fZero ? RTMemAllocZ(cbRequested) : RTMemAlloc(cbRequested);
956 if (pv)
957 {
958 /* Update the request cost and heap budget. */
959 Log5Func(("aHgcmAcc[%zu] %#RX64 += %#x (%p)\n", idx, pThisCC->aHgcmAcc[idx].cbHeapBudget, cbRequested, pCmd));
960 pThisCC->aHgcmAcc[idx].cbHeapBudget -= cbRequested;
961 pCmd->cbHeapCost += (uint32_t)cbRequested;
962 return pv;
963 }
964 LogFunc(("Heap alloc failed: cbRequested=%#zx - enmCmdType=%d\n", cbRequested, pCmd->enmCmdType));
965 }
966 else
967 LogFunc(("Heap budget overrun: cbRequested=%#zx cbHeapCost=%#x aHgcmAcc[%u].cbHeapBudget=%#RX64 - enmCmdType=%d\n",
968 cbRequested, pCmd->cbHeapCost, pCmd->idxHeapAcc, pThisCC->aHgcmAcc[idx].cbHeapBudget, pCmd->enmCmdType));
969 }
970 else
971 LogFunc(("Request too big: cbRequested=%#zx cbHeapCost=%#x - enmCmdType=%d\n",
972 cbRequested, pCmd->cbHeapCost, pCmd->enmCmdType));
973 STAM_REL_COUNTER_INC(&pThisCC->aHgcmAcc[idx].StatBudgetOverruns);
974 return NULL;
975}
976
977/**
978 * Heap budget wrapper around RTMemAlloc.
979 */
980DECLINLINE(void *) vmmdevR3HgcmCallMemAlloc(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested)
981{
982 return vmmdevR3HgcmCallMemAllocEx(pThisCC, pCmd, cbRequested, false /*fZero*/);
983}
984
985/**
986 * Heap budget wrapper around RTMemAllocZ.
987 */
988DECLINLINE(void *) vmmdevR3HgcmCallMemAllocZ(PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd, size_t cbRequested)
989{
990 return vmmdevR3HgcmCallMemAllocEx(pThisCC, pCmd, cbRequested, true /*fZero*/);
991}
992
993/** Copy VMMDevHGCMCall request data from the guest to VBOXHGCMCMD command.
994 *
995 * @returns VBox status code that the guest should see.
996 * @param pDevIns The device instance.
997 * @param pThisCC The VMMDev ring-3 instance data.
998 * @param pCmd The destination command.
999 * @param pHGCMCall The HGCMCall request (cached in host memory).
1000 * @param cbHGCMCall Size of the request.
1001 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
1002 * @param cbHGCMParmStruct Size of used HGCM parameter structure.
1003 */
1004static int vmmdevR3HgcmCallFetchGuestParms(PPDMDEVINS pDevIns, PVMMDEVCC pThisCC, PVBOXHGCMCMD pCmd,
1005 const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
1006 VMMDevRequestType enmRequestType, uint32_t cbHGCMParmStruct)
1007{
1008 /*
1009 * Go over all guest parameters and initialize relevant VBOXHGCMCMD fields.
1010 * VBOXHGCMCMD must contain all information about the request,
1011 * the request will be not read from the guest memory again.
1012 */
1013#ifdef VBOX_WITH_64_BITS_GUESTS
1014 const bool f64Bits = (enmRequestType == VMMDevReq_HGCMCall64);
1015#endif
1016
1017 const uint32_t cParms = pCmd->u.call.cParms;
1018
1019 /* Offsets in the request buffer to HGCM parameters and additional data. */
1020 const uint32_t offHGCMParms = sizeof(VMMDevHGCMCall);
1021 const uint32_t offExtra = offHGCMParms + cParms * cbHGCMParmStruct;
1022
1023 /* Pointer to the next HGCM parameter of the request. */
1024 const uint8_t *pu8HGCMParm = (uint8_t *)pHGCMCall + offHGCMParms;
1025
1026 for (uint32_t i = 0; i < cParms; ++i, pu8HGCMParm += cbHGCMParmStruct)
1027 {
1028 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1029
1030#ifdef VBOX_WITH_64_BITS_GUESTS
1031 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, type, HGCMFunctionParameter32, type);
1032 pGuestParm->enmType = ((HGCMFunctionParameter64 *)pu8HGCMParm)->type;
1033#else
1034 pGuestParm->enmType = ((HGCMFunctionParameter *)pu8HGCMParm)->type;
1035#endif
1036
1037 switch (pGuestParm->enmType)
1038 {
1039 case VMMDevHGCMParmType_32bit:
1040 {
1041#ifdef VBOX_WITH_64_BITS_GUESTS
1042 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value32, HGCMFunctionParameter32, u.value32);
1043 uint32_t *pu32 = &((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value32;
1044#else
1045 uint32_t *pu32 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value32;
1046#endif
1047 LogFunc(("uint32 guest parameter %RI32\n", *pu32));
1048
1049 pGuestParm->u.val.u64Value = *pu32;
1050 pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu32 - (uintptr_t)pHGCMCall);
1051 pGuestParm->u.val.cbValue = sizeof(uint32_t);
1052
1053 break;
1054 }
1055
1056 case VMMDevHGCMParmType_64bit:
1057 {
1058#ifdef VBOX_WITH_64_BITS_GUESTS
1059 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.value64, HGCMFunctionParameter32, u.value64);
1060 uint64_t *pu64 = (uint64_t *)(uintptr_t)&((HGCMFunctionParameter64 *)pu8HGCMParm)->u.value64; /* MSC detect misalignment, thus casts. */
1061#else
1062 uint64_t *pu64 = &((HGCMFunctionParameter *)pu8HGCMParm)->u.value64;
1063#endif
1064 LogFunc(("uint64 guest parameter %RI64\n", *pu64));
1065
1066 pGuestParm->u.val.u64Value = *pu64;
1067 pGuestParm->u.val.offValue = (uint32_t)((uintptr_t)pu64 - (uintptr_t)pHGCMCall);
1068 pGuestParm->u.val.cbValue = sizeof(uint64_t);
1069
1070 break;
1071 }
1072
1073 case VMMDevHGCMParmType_LinAddr_In: /* In (read) */
1074 case VMMDevHGCMParmType_LinAddr_Out: /* Out (write) */
1075 case VMMDevHGCMParmType_LinAddr: /* In & Out */
1076 {
1077#ifdef VBOX_WITH_64_BITS_GUESTS
1078 uint32_t cbData = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.size
1079 : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.size;
1080 RTGCPTR GCPtr = f64Bits ? ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Pointer.u.linearAddr
1081 : ((HGCMFunctionParameter32 *)pu8HGCMParm)->u.Pointer.u.linearAddr;
1082#else
1083 uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.size;
1084 RTGCPTR GCPtr = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Pointer.u.linearAddr;
1085#endif
1086 LogFunc(("LinAddr guest parameter %RGv, cb %u\n", GCPtr, cbData));
1087
1088 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
1089
1090 const uint32_t offFirstPage = cbData > 0 ? GCPtr & PAGE_OFFSET_MASK : 0;
1091 const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + PAGE_SIZE - 1) / PAGE_SIZE : 0;
1092
1093 pGuestParm->u.ptr.cbData = cbData;
1094 pGuestParm->u.ptr.offFirstPage = offFirstPage;
1095 pGuestParm->u.ptr.cPages = cPages;
1096 pGuestParm->u.ptr.fu32Direction = vmmdevR3HgcmParmTypeToDirection(pGuestParm->enmType);
1097
1098 if (cbData > 0)
1099 {
1100 if (cPages == 1)
1101 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1102 else
1103 {
1104 /* (Max 262144 bytes with current limits.) */
1105 pGuestParm->u.ptr.paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
1106 cPages * sizeof(RTGCPHYS));
1107 AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
1108 }
1109
1110 /* Gonvert the guest linear pointers of pages to physical addresses. */
1111 GCPtr &= PAGE_BASE_GC_MASK;
1112 for (uint32_t iPage = 0; iPage < cPages; ++iPage)
1113 {
1114 /* The guest might specify invalid GCPtr, just skip such addresses.
1115 * Also if the guest parameters are fetched when restoring an old saved state,
1116 * then GCPtr may become invalid and do not have a corresponding GCPhys.
1117 * The command restoration routine will take care of this.
1118 */
1119 RTGCPHYS GCPhys;
1120 int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pDevIns, GCPtr, &GCPhys);
1121 if (RT_FAILURE(rc2))
1122 GCPhys = NIL_RTGCPHYS;
1123 LogFunc(("Page %d: %RGv -> %RGp. %Rrc\n", iPage, GCPtr, GCPhys, rc2));
1124
1125 pGuestParm->u.ptr.paPages[iPage] = GCPhys;
1126 GCPtr += PAGE_SIZE;
1127 }
1128 }
1129
1130 break;
1131 }
1132
1133 case VMMDevHGCMParmType_PageList:
1134 case VMMDevHGCMParmType_ContiguousPageList:
1135 case VMMDevHGCMParmType_NoBouncePageList:
1136 {
1137#ifdef VBOX_WITH_64_BITS_GUESTS
1138 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1139 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.offset, HGCMFunctionParameter32, u.PageList.offset);
1140 uint32_t cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.size;
1141 uint32_t offPageListInfo = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.PageList.offset;
1142#else
1143 uint32_t cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.size;
1144 uint32_t offPageListInfo = ((HGCMFunctionParameter *)pu8HGCMParm)->u.PageList.offset;
1145#endif
1146 LogFunc(("PageList guest parameter cb %u, offset %u\n", cbData, offPageListInfo));
1147
1148 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
1149
1150/** @todo respect zero byte page lists... */
1151 /* Check that the page list info is within the request. */
1152 ASSERT_GUEST_RETURN( offPageListInfo >= offExtra
1153 && cbHGCMCall >= sizeof(HGCMPageListInfo)
1154 && offPageListInfo <= cbHGCMCall - sizeof(HGCMPageListInfo),
1155 VERR_INVALID_PARAMETER);
1156 RT_UNTRUSTED_VALIDATED_FENCE();
1157
1158 /* The HGCMPageListInfo structure is within the request. */
1159 const HGCMPageListInfo *pPageListInfo = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offPageListInfo);
1160
1161 /* Enough space for page pointers? */
1162 const uint32_t cMaxPages = 1 + (cbHGCMCall - offPageListInfo - sizeof(HGCMPageListInfo)) / sizeof(RTGCPHYS);
1163 ASSERT_GUEST_RETURN( pPageListInfo->cPages > 0
1164 && pPageListInfo->cPages <= cMaxPages,
1165 VERR_INVALID_PARAMETER);
1166
1167 /* Flags. */
1168 ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(pPageListInfo->flags),
1169 ("%#x\n", pPageListInfo->flags), VERR_INVALID_FLAGS);
1170 /* First page offset. */
1171 ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < PAGE_SIZE,
1172 ("%#x\n", pPageListInfo->offFirstPage), VERR_INVALID_PARAMETER);
1173
1174 /* Contiguous page lists only ever have a single page and
1175 no-bounce page list requires cPages to match the size exactly.
1176 Plain page list does not impose any restrictions on cPages currently. */
1177 ASSERT_GUEST_MSG_RETURN( pPageListInfo->cPages
1178 == (pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList ? 1
1179 : RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, PAGE_SIZE) >> PAGE_SHIFT)
1180 || pGuestParm->enmType == VMMDevHGCMParmType_PageList,
1181 ("offFirstPage=%#x cbData=%#x cPages=%#x enmType=%d\n",
1182 pPageListInfo->offFirstPage, cbData, pPageListInfo->cPages, pGuestParm->enmType),
1183 VERR_INVALID_PARAMETER);
1184
1185 RT_UNTRUSTED_VALIDATED_FENCE();
1186
1187 /*
1188 * Deal with no-bounce buffers first, as
1189 * VMMDevHGCMParmType_PageList is the fallback.
1190 */
1191 if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
1192 {
1193 /* Validate page offsets */
1194 ASSERT_GUEST_MSG_RETURN( !(pPageListInfo->aPages[0] & PAGE_OFFSET_MASK)
1195 || (pPageListInfo->aPages[0] & PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,
1196 ("%#RX64 offFirstPage=%#x\n", pPageListInfo->aPages[0], pPageListInfo->offFirstPage),
1197 VERR_INVALID_POINTER);
1198 uint32_t const cPages = pPageListInfo->cPages;
1199 for (uint32_t iPage = 1; iPage < cPages; iPage++)
1200 ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & PAGE_OFFSET_MASK),
1201 ("[%#zx]=%#RX64\n", iPage, pPageListInfo->aPages[iPage]), VERR_INVALID_POINTER);
1202 RT_UNTRUSTED_VALIDATED_FENCE();
1203
1204 pGuestParm->u.Pages.cbData = cbData;
1205 pGuestParm->u.Pages.offFirstPage = pPageListInfo->offFirstPage;
1206 pGuestParm->u.Pages.fFlags = pPageListInfo->flags;
1207 pGuestParm->u.Pages.cPages = (uint16_t)cPages;
1208 pGuestParm->u.Pages.fLocked = false;
1209 pGuestParm->u.Pages.paPgLocks = (PPGMPAGEMAPLOCK)vmmdevR3HgcmCallMemAllocZ(pThisCC, pCmd,
1210 ( sizeof(PGMPAGEMAPLOCK)
1211 + sizeof(void *)) * cPages);
1212 AssertReturn(pGuestParm->u.Pages.paPgLocks, VERR_NO_MEMORY);
1213
1214 /* Make sure the page offsets are sensible. */
1215 int rc = VINF_SUCCESS;
1216 void **papvPages = (void **)&pGuestParm->u.Pages.paPgLocks[cPages];
1217 if (pPageListInfo->flags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST)
1218 rc = PDMDevHlpPhysBulkGCPhys2CCPtr(pDevIns, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
1219 papvPages, pGuestParm->u.Pages.paPgLocks);
1220 else
1221 rc = PDMDevHlpPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, pPageListInfo->aPages, 0 /*fFlags*/,
1222 (void const **)papvPages, pGuestParm->u.Pages.paPgLocks);
1223 if (RT_SUCCESS(rc))
1224 {
1225 papvPages[0] = (void *)((uintptr_t)papvPages[0] | pPageListInfo->offFirstPage);
1226 pGuestParm->u.Pages.fLocked = true;
1227 break;
1228 }
1229
1230 /* Locking failed, bail out. In case of MMIO we fall back on regular page list handling. */
1231 RTMemFree(pGuestParm->u.Pages.paPgLocks);
1232 pGuestParm->u.Pages.paPgLocks = NULL;
1233 STAM_REL_COUNTER_INC(&pThisCC->StatHgcmFailedPageListLocking);
1234 ASSERT_GUEST_MSG_RETURN(rc == VERR_PGM_PHYS_PAGE_RESERVED, ("cPages=%u %Rrc\n", cPages, rc), rc);
1235 pGuestParm->enmType = VMMDevHGCMParmType_PageList;
1236 }
1237
1238 /*
1239 * Regular page list or contiguous page list.
1240 */
1241 pGuestParm->u.ptr.cbData = cbData;
1242 pGuestParm->u.ptr.offFirstPage = pPageListInfo->offFirstPage;
1243 pGuestParm->u.ptr.cPages = pPageListInfo->cPages;
1244 pGuestParm->u.ptr.fu32Direction = pPageListInfo->flags;
1245 if (pPageListInfo->cPages == 1)
1246 {
1247 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1248 pGuestParm->u.ptr.GCPhysSinglePage = pPageListInfo->aPages[0];
1249 }
1250 else
1251 {
1252 pGuestParm->u.ptr.paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
1253 pPageListInfo->cPages * sizeof(RTGCPHYS));
1254 AssertReturn(pGuestParm->u.ptr.paPages, VERR_NO_MEMORY);
1255
1256 for (uint32_t iPage = 0; iPage < pGuestParm->u.ptr.cPages; ++iPage)
1257 pGuestParm->u.ptr.paPages[iPage] = pPageListInfo->aPages[iPage];
1258 }
1259 break;
1260 }
1261
1262 case VMMDevHGCMParmType_Embedded:
1263 {
1264#ifdef VBOX_WITH_64_BITS_GUESTS
1265 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
1266 uint32_t const cbData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.cbData;
1267 uint32_t const offData = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.offData;
1268 uint32_t const fFlags = ((HGCMFunctionParameter64 *)pu8HGCMParm)->u.Embedded.fFlags;
1269#else
1270 uint32_t const cbData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.cbData;
1271 uint32_t const offData = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.offData;
1272 uint32_t const fFlags = ((HGCMFunctionParameter *)pu8HGCMParm)->u.Embedded.fFlags;
1273#endif
1274 LogFunc(("Embedded guest parameter cb %u, offset %u, flags %#x\n", cbData, offData, fFlags));
1275
1276 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
1277
1278 /* Check flags and buffer range. */
1279 ASSERT_GUEST_MSG_RETURN(VBOX_HGCM_F_PARM_ARE_VALID(fFlags), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
1280 ASSERT_GUEST_MSG_RETURN( offData >= offExtra
1281 && offData <= cbHGCMCall
1282 && cbData <= cbHGCMCall - offData,
1283 ("offData=%#x cbData=%#x cbHGCMCall=%#x offExtra=%#x\n", offData, cbData, cbHGCMCall, offExtra),
1284 VERR_INVALID_PARAMETER);
1285 RT_UNTRUSTED_VALIDATED_FENCE();
1286
1287 /* We use part of the ptr member. */
1288 pGuestParm->u.ptr.fu32Direction = fFlags;
1289 pGuestParm->u.ptr.cbData = cbData;
1290 pGuestParm->u.ptr.offFirstPage = offData;
1291 pGuestParm->u.ptr.GCPhysSinglePage = pCmd->GCPhys + offData;
1292 pGuestParm->u.ptr.cPages = 1;
1293 pGuestParm->u.ptr.paPages = &pGuestParm->u.ptr.GCPhysSinglePage;
1294 break;
1295 }
1296
1297 default:
1298 ASSERT_GUEST_FAILED_RETURN(VERR_INVALID_PARAMETER);
1299 }
1300 }
1301
1302 return VINF_SUCCESS;
1303}
1304
1305/**
1306 * Handles VMMDevHGCMCall request.
1307 *
1308 * @returns VBox status code that the guest should see.
1309 * @param pDevIns The device instance.
1310 * @param pThis The VMMDev shared instance data.
1311 * @param pThisCC The VMMDev ring-3 instance data.
1312 * @param pHGCMCall The request to handle (cached in host memory).
1313 * @param cbHGCMCall Size of the entire request (including HGCM parameters).
1314 * @param GCPhys The guest physical address of the request.
1315 * @param enmRequestType The request type. Distinguishes 64 and 32 bit calls.
1316 * @param tsArrival The STAM_GET_TS() value when the request arrived.
1317 * @param ppLock Pointer to the lock info pointer (latter can be
1318 * NULL). Set to NULL if HGCM takes lock ownership.
1319 */
1320int vmmdevR3HgcmCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall,
1321 RTGCPHYS GCPhys, VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock)
1322{
1323 LogFunc(("client id = %d, function = %d, cParms = %d, enmRequestType = %d, fRequestor = %#x\n", pHGCMCall->u32ClientID,
1324 pHGCMCall->u32Function, pHGCMCall->cParms, enmRequestType, pHGCMCall->header.header.fRequestor));
1325
1326 /*
1327 * Validation.
1328 */
1329 ASSERT_GUEST_RETURN(cbHGCMCall >= sizeof(VMMDevHGCMCall), VERR_INVALID_PARAMETER);
1330#ifdef VBOX_WITH_64_BITS_GUESTS
1331 ASSERT_GUEST_RETURN( enmRequestType == VMMDevReq_HGCMCall32
1332 || enmRequestType == VMMDevReq_HGCMCall64, VERR_INVALID_PARAMETER);
1333#else
1334 ASSERT_GUEST_RETURN(enmRequestType == VMMDevReq_HGCMCall32, VERR_INVALID_PARAMETER);
1335#endif
1336 RT_UNTRUSTED_VALIDATED_FENCE();
1337
1338 /*
1339 * Create a command structure.
1340 */
1341 PVBOXHGCMCMD pCmd;
1342 uint32_t cbHGCMParmStruct;
1343 int rc = vmmdevR3HgcmCallAlloc(pThisCC, pHGCMCall, cbHGCMCall, GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
1344 if (RT_SUCCESS(rc))
1345 {
1346 pCmd->tsArrival = tsArrival;
1347 PVMMDEVREQLOCK pLock = *ppLock;
1348 if (pLock)
1349 {
1350 pCmd->ReqMapLock = pLock->Lock;
1351 pCmd->pvReqLocked = pLock->pvReq;
1352 *ppLock = NULL;
1353 }
1354
1355 rc = vmmdevR3HgcmCallFetchGuestParms(pDevIns, pThisCC, pCmd, pHGCMCall, cbHGCMCall, enmRequestType, cbHGCMParmStruct);
1356 if (RT_SUCCESS(rc))
1357 {
1358 /* Copy guest data to host parameters, so HGCM services can use the data. */
1359 rc = vmmdevR3HgcmInitHostParameters(pDevIns, pThisCC, pCmd, (uint8_t const *)pHGCMCall);
1360 if (RT_SUCCESS(rc))
1361 {
1362 /*
1363 * Pass the function call to HGCM connector for actual processing
1364 */
1365 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
1366
1367#if 0 /* DONT ENABLE - for performance hacking. */
1368 if ( pCmd->u.call.u32Function == 9
1369 && pCmd->u.call.cParms == 5)
1370 {
1371 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
1372
1373 if (pCmd->pvReqLocked)
1374 {
1375 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
1376 pHeader->header.rc = VINF_SUCCESS;
1377 pHeader->result = VINF_SUCCESS;
1378 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1379 }
1380 else
1381 {
1382 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)pHGCMCall;
1383 pHeader->header.rc = VINF_SUCCESS;
1384 pHeader->result = VINF_SUCCESS;
1385 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1386 PDMDevHlpPhysWrite(pDevIns, GCPhys, pHeader, sizeof(*pHeader));
1387 }
1388 vmmdevR3HgcmCmdFree(pDevIns, pThisCC, pCmd);
1389 return VINF_HGCM_ASYNC_EXECUTE; /* ignored, but avoids assertions. */
1390 }
1391#endif
1392
1393 rc = pThisCC->pHGCMDrv->pfnCall(pThisCC->pHGCMDrv, pCmd,
1394 pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
1395 pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsArrival);
1396
1397 if (rc == VINF_HGCM_ASYNC_EXECUTE)
1398 {
1399 /*
1400 * Done. Just update statistics and return.
1401 */
1402#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1403 uint64_t tsNow;
1404 STAM_GET_TS(tsNow);
1405 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdArrival, tsNow - tsArrival);
1406#endif
1407 return rc;
1408 }
1409
1410 /*
1411 * Failed, bail out.
1412 */
1413 LogFunc(("pfnCall rc = %Rrc\n", rc));
1414 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
1415 }
1416 }
1417 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
1418 }
1419 return rc;
1420}
1421
1422/**
1423 * VMMDevReq_HGCMCancel worker.
1424 *
1425 * @returns VBox status code that the guest should see.
1426 * @param pThisCC The VMMDev ring-3 instance data.
1427 * @param pHGCMCancel The request to handle (cached in host memory).
1428 * @param GCPhys The address of the request.
1429 *
1430 * @thread EMT
1431 */
1432int vmmdevR3HgcmCancel(PVMMDEVCC pThisCC, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPhys)
1433{
1434 NOREF(pHGCMCancel);
1435 int rc = vmmdevR3HgcmCancel2(pThisCC, GCPhys);
1436 return rc == VERR_NOT_FOUND ? VERR_INVALID_PARAMETER : rc;
1437}
1438
1439/**
1440 * VMMDevReq_HGCMCancel2 worker.
1441 *
1442 * @retval VINF_SUCCESS on success.
1443 * @retval VERR_NOT_FOUND if the request was not found.
1444 * @retval VERR_INVALID_PARAMETER if the request address is invalid.
1445 *
1446 * @param pThisCC The VMMDev ring-3 instance data.
1447 * @param GCPhys The address of the request that should be cancelled.
1448 *
1449 * @thread EMT
1450 */
1451int vmmdevR3HgcmCancel2(PVMMDEVCC pThisCC, RTGCPHYS GCPhys)
1452{
1453 if ( GCPhys == 0
1454 || GCPhys == NIL_RTGCPHYS
1455 || GCPhys == NIL_RTGCPHYS32)
1456 {
1457 Log(("vmmdevR3HgcmCancel2: GCPhys=%#x\n", GCPhys));
1458 return VERR_INVALID_PARAMETER;
1459 }
1460
1461 /*
1462 * Locate the command and cancel it while under the protection of
1463 * the lock. hgcmCompletedWorker makes assumptions about this.
1464 */
1465 int rc = vmmdevR3HgcmCmdListLock(pThisCC);
1466 AssertRCReturn(rc, rc);
1467
1468 PVBOXHGCMCMD pCmd = vmmdevR3HgcmFindCommandLocked(pThisCC, GCPhys);
1469 if (pCmd)
1470 {
1471 pCmd->fCancelled = true;
1472
1473 Log(("vmmdevR3HgcmCancel2: Cancelled pCmd=%p / GCPhys=%#x\n", pCmd, GCPhys));
1474 if (pThisCC->pHGCMDrv)
1475 pThisCC->pHGCMDrv->pfnCancelled(pThisCC->pHGCMDrv, pCmd,
1476 pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.u32ClientID
1477 : pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT ? pCmd->u.connect.u32ClientID
1478 : pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT ? pCmd->u.disconnect.u32ClientID
1479 : 0);
1480 }
1481 else
1482 rc = VERR_NOT_FOUND;
1483
1484 vmmdevR3HgcmCmdListUnlock(pThisCC);
1485 return rc;
1486}
1487
1488/** Write HGCM call parameters and buffers back to the guest request and memory.
1489 *
1490 * @returns VBox status code that the guest should see.
1491 * @param pDevIns The device instance.
1492 * @param pCmd Completed call command.
1493 * @param pHGCMCall The guestrequest which needs updating (cached in the host memory).
1494 * @param pbReq The request copy or locked memory for handling
1495 * embedded buffers.
1496 */
1497static int vmmdevR3HgcmCompleteCallRequest(PPDMDEVINS pDevIns, PVBOXHGCMCMD pCmd, VMMDevHGCMCall *pHGCMCall, uint8_t *pbReq)
1498{
1499 AssertReturn(pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_INTERNAL_ERROR);
1500
1501 /*
1502 * Go over parameter descriptions saved in pCmd.
1503 */
1504#ifdef VBOX_WITH_64_BITS_GUESTS
1505 HGCMFunctionParameter64 *pReqParm = (HGCMFunctionParameter64 *)(pbReq + sizeof(VMMDevHGCMCall));
1506 size_t const cbHGCMParmStruct = pCmd->enmRequestType == VMMDevReq_HGCMCall64
1507 ? sizeof(HGCMFunctionParameter64) : sizeof(HGCMFunctionParameter32);
1508#else
1509 HGCMFunctionParameter *pReqParm = (HGCMFunctionParameter *)(pbReq + sizeof(VMMDevHGCMCall));
1510 size_t const cbHGCMParmStruct = sizeof(HGCMFunctionParameter);
1511#endif
1512 for (uint32_t i = 0;
1513 i < pCmd->u.call.cParms;
1514#ifdef VBOX_WITH_64_BITS_GUESTS
1515 ++i, pReqParm = (HGCMFunctionParameter64 *)((uint8_t *)pReqParm + cbHGCMParmStruct)
1516#else
1517 ++i, pReqParm = (HGCMFunctionParameter *)((uint8_t *)pReqParm + cbHGCMParmStruct)
1518#endif
1519 )
1520 {
1521 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1522 VBOXHGCMSVCPARM * const pHostParm = &pCmd->u.call.paHostParms[i];
1523
1524 const HGCMFunctionParameterType enmType = pGuestParm->enmType;
1525 switch (enmType)
1526 {
1527 case VMMDevHGCMParmType_32bit:
1528 case VMMDevHGCMParmType_64bit:
1529 {
1530 const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
1531 const void *pvSrc = enmType == VMMDevHGCMParmType_32bit ? (void *)&pHostParm->u.uint32
1532 : (void *)&pHostParm->u.uint64;
1533/** @todo optimize memcpy away here. */
1534 memcpy((uint8_t *)pHGCMCall + pVal->offValue, pvSrc, pVal->cbValue);
1535 break;
1536 }
1537
1538 case VMMDevHGCMParmType_LinAddr_In:
1539 case VMMDevHGCMParmType_LinAddr_Out:
1540 case VMMDevHGCMParmType_LinAddr:
1541 case VMMDevHGCMParmType_PageList:
1542 {
1543/** @todo Update the return buffer size? */
1544 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1545 if ( pPtr->cbData > 0
1546 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1547 {
1548 const void *pvSrc = pHostParm->u.pointer.addr;
1549 uint32_t cbSrc = pHostParm->u.pointer.size;
1550 int rc = vmmdevR3HgcmGuestBufferWrite(pDevIns, pPtr, pvSrc, cbSrc);
1551 if (RT_FAILURE(rc))
1552 break;
1553 }
1554 break;
1555 }
1556
1557 case VMMDevHGCMParmType_Embedded:
1558 {
1559 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1560
1561 /* Update size. */
1562#ifdef VBOX_WITH_64_BITS_GUESTS
1563 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.Embedded.cbData, HGCMFunctionParameter32, u.Embedded.cbData);
1564#endif
1565 pReqParm->u.Embedded.cbData = pHostParm->u.pointer.size;
1566
1567 /* Copy out data. */
1568 if ( pPtr->cbData > 0
1569 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1570 {
1571 const void *pvSrc = pHostParm->u.pointer.addr;
1572 uint32_t cbSrc = pHostParm->u.pointer.size;
1573 uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
1574 memcpy(pbReq + pPtr->offFirstPage, pvSrc, cbToCopy);
1575 }
1576 break;
1577 }
1578
1579 case VMMDevHGCMParmType_ContiguousPageList:
1580 {
1581 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
1582
1583 /* Update size. */
1584#ifdef VBOX_WITH_64_BITS_GUESTS
1585 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1586#endif
1587 pReqParm->u.PageList.size = pHostParm->u.pointer.size;
1588
1589 /* Copy out data. */
1590 if ( pPtr->cbData > 0
1591 && (pPtr->fu32Direction & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
1592 {
1593 const void *pvSrc = pHostParm->u.pointer.addr;
1594 uint32_t cbSrc = pHostParm->u.pointer.size;
1595 uint32_t cbToCopy = RT_MIN(cbSrc, pPtr->cbData);
1596 int rc = PDMDevHlpPhysWrite(pDevIns, pGuestParm->u.ptr.paPages[0] | pGuestParm->u.ptr.offFirstPage,
1597 pvSrc, cbToCopy);
1598 if (RT_FAILURE(rc))
1599 break;
1600 }
1601 break;
1602 }
1603
1604 case VMMDevHGCMParmType_NoBouncePageList:
1605 {
1606 /* Update size. */
1607#ifdef VBOX_WITH_64_BITS_GUESTS
1608 AssertCompileMembersSameSizeAndOffset(HGCMFunctionParameter64, u.PageList.size, HGCMFunctionParameter32, u.PageList.size);
1609#endif
1610 pReqParm->u.PageList.size = pHostParm->u.Pages.cb;
1611
1612 /* unlock early. */
1613 if (pGuestParm->u.Pages.fLocked)
1614 {
1615 PDMDevHlpPhysBulkReleasePageMappingLocks(pDevIns, pGuestParm->u.Pages.cPages,
1616 pGuestParm->u.Pages.paPgLocks);
1617 pGuestParm->u.Pages.fLocked = false;
1618 }
1619 break;
1620 }
1621
1622 default:
1623 break;
1624 }
1625 }
1626
1627 return VINF_SUCCESS;
1628}
1629
1630/** Update HGCM request in the guest memory and mark it as completed.
1631 *
1632 * @returns VINF_SUCCESS or VERR_CANCELLED.
1633 * @param pInterface Pointer to this PDM interface.
1634 * @param result HGCM completion status code (VBox status code).
1635 * @param pCmd Completed command, which contains updated host parameters.
1636 *
1637 * @thread EMT
1638 */
1639static int hgcmCompletedWorker(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
1640{
1641 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1642 PPDMDEVINS pDevIns = pThisCC->pDevIns;
1643 PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
1644#ifdef VBOX_WITH_DTRACE
1645 uint32_t idFunction = 0;
1646 uint32_t idClient = 0;
1647#endif
1648
1649 if (result == VINF_HGCM_SAVE_STATE)
1650 {
1651 /* If the completion routine was called while the HGCM service saves its state,
1652 * then currently nothing to be done here. The pCmd stays in the list and will
1653 * be saved later when the VMMDev state will be saved and re-submitted on load.
1654 *
1655 * It it assumed that VMMDev saves state after the HGCM services (VMMDev driver
1656 * attached by constructor before it registers its SSM state), and, therefore,
1657 * VBOXHGCMCMD structures are not removed by vmmdevR3HgcmSaveState from the list,
1658 * while HGCM uses them.
1659 */
1660 LogFlowFunc(("VINF_HGCM_SAVE_STATE for command %p\n", pCmd));
1661 return VINF_SUCCESS;
1662 }
1663
1664 VBOXDD_HGCMCALL_COMPLETED_EMT(pCmd, result);
1665
1666 int rc = VINF_SUCCESS;
1667
1668 /*
1669 * The cancellation protocol requires us to remove the command here
1670 * and then check the flag. Cancelled commands must not be written
1671 * back to guest memory.
1672 */
1673 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
1674
1675 if (RT_LIKELY(!pCmd->fCancelled))
1676 {
1677 if (!pCmd->pvReqLocked)
1678 {
1679 /*
1680 * Request is not locked:
1681 */
1682 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
1683 if (pHeader)
1684 {
1685 /*
1686 * Read the request from the guest memory for updating.
1687 * The request data is not be used for anything but checking the request type.
1688 */
1689 PDMDevHlpPhysRead(pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);
1690 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1691
1692 /* Verify the request type. This is the only field which is used from the guest memory. */
1693 const VMMDevRequestType enmRequestType = pHeader->header.requestType;
1694 if ( enmRequestType == pCmd->enmRequestType
1695 || enmRequestType == VMMDevReq_HGCMCancel)
1696 {
1697 RT_UNTRUSTED_VALIDATED_FENCE();
1698
1699 /*
1700 * Update parameters and data buffers.
1701 */
1702 switch (enmRequestType)
1703 {
1704#ifdef VBOX_WITH_64_BITS_GUESTS
1705 case VMMDevReq_HGCMCall64:
1706#endif
1707 case VMMDevReq_HGCMCall32:
1708 {
1709 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
1710 rc = vmmdevR3HgcmCompleteCallRequest(pDevIns, pCmd, pHGCMCall, (uint8_t *)pHeader);
1711#ifdef VBOX_WITH_DTRACE
1712 idFunction = pCmd->u.call.u32Function;
1713 idClient = pCmd->u.call.u32ClientID;
1714#endif
1715 break;
1716 }
1717
1718 case VMMDevReq_HGCMConnect:
1719 {
1720 /* save the client id in the guest request packet */
1721 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
1722 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
1723 break;
1724 }
1725
1726 default:
1727 /* make compiler happy */
1728 break;
1729 }
1730 }
1731 else
1732 {
1733 /* Guest has changed the command type. */
1734 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
1735 pCmd->enmCmdType, pHeader->header.requestType));
1736
1737 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
1738 }
1739
1740 /* Setup return code for the guest. */
1741 if (RT_SUCCESS(rc))
1742 pHeader->result = result;
1743 else
1744 pHeader->result = rc;
1745
1746 /* First write back the request. */
1747 PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);
1748
1749 /* Mark request as processed. */
1750 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE;
1751
1752 /* Second write the flags to mark the request as processed. */
1753 PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags),
1754 &pHeader->fu32Flags, sizeof(pHeader->fu32Flags));
1755
1756 /* Now, when the command was removed from the internal list, notify the guest. */
1757 VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
1758
1759 RTMemFreeZ(pHeader, pCmd->cbRequest);
1760 }
1761 else
1762 {
1763 LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest));
1764 }
1765 }
1766 /*
1767 * Request was locked:
1768 */
1769 else
1770 {
1771 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked;
1772
1773 /* Verify the request type. This is the only field which is used from the guest memory. */
1774 const VMMDevRequestType enmRequestType = pHeader->header.requestType;
1775 if ( enmRequestType == pCmd->enmRequestType
1776 || enmRequestType == VMMDevReq_HGCMCancel)
1777 {
1778 RT_UNTRUSTED_VALIDATED_FENCE();
1779
1780 /*
1781 * Update parameters and data buffers.
1782 */
1783 switch (enmRequestType)
1784 {
1785#ifdef VBOX_WITH_64_BITS_GUESTS
1786 case VMMDevReq_HGCMCall64:
1787#endif
1788 case VMMDevReq_HGCMCall32:
1789 {
1790 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader;
1791 rc = vmmdevR3HgcmCompleteCallRequest(pDevIns, pCmd, pHGCMCall, (uint8_t *)pHeader);
1792#ifdef VBOX_WITH_DTRACE
1793 idFunction = pCmd->u.call.u32Function;
1794 idClient = pCmd->u.call.u32ClientID;
1795#endif
1796 break;
1797 }
1798
1799 case VMMDevReq_HGCMConnect:
1800 {
1801 /* save the client id in the guest request packet */
1802 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader;
1803 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID;
1804 break;
1805 }
1806
1807 default:
1808 /* make compiler happy */
1809 break;
1810 }
1811 }
1812 else
1813 {
1814 /* Guest has changed the command type. */
1815 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n",
1816 pCmd->enmCmdType, pHeader->header.requestType));
1817
1818 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER);
1819 }
1820
1821 /* Setup return code for the guest. */
1822 if (RT_SUCCESS(rc))
1823 pHeader->result = result;
1824 else
1825 pHeader->result = rc;
1826
1827 /* Mark request as processed. */
1828 ASMAtomicOrU32(&pHeader->fu32Flags, VBOX_HGCM_REQ_DONE);
1829
1830 /* Now, when the command was removed from the internal list, notify the guest. */
1831 VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
1832 }
1833
1834 /* Set the status to success for now, though we might consider passing
1835 along the vmmdevR3HgcmCompleteCallRequest errors... */
1836 rc = VINF_SUCCESS;
1837 }
1838 else
1839 {
1840 LogFlowFunc(("Cancelled command %p\n", pCmd));
1841 rc = VERR_CANCELLED;
1842 }
1843
1844#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1845 /* Save for final stats. */
1846 uint64_t const tsArrival = pCmd->tsArrival;
1847 uint64_t const tsComplete = pCmd->tsComplete;
1848#endif
1849
1850 /* Deallocate the command memory. Enter the critsect for proper */
1851 VBOXDD_HGCMCALL_COMPLETED_DONE(pCmd, idFunction, idClient, result);
1852 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
1853
1854#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
1855 /* Update stats. */
1856 uint64_t tsNow;
1857 STAM_GET_TS(tsNow);
1858 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdCompletion, tsNow - tsComplete);
1859 if (tsArrival != 0)
1860 STAM_REL_PROFILE_ADD_PERIOD(&pThisCC->StatHgcmCmdTotal, tsNow - tsArrival);
1861#endif
1862
1863 return rc;
1864}
1865
1866/**
1867 * HGCM callback for request completion. Forwards to hgcmCompletedWorker.
1868 *
1869 * @returns VINF_SUCCESS or VERR_CANCELLED.
1870 * @param pInterface Pointer to this PDM interface.
1871 * @param result HGCM completion status code (VBox status code).
1872 * @param pCmd Completed command, which contains updated host parameters.
1873 */
1874DECLCALLBACK(int) hgcmR3Completed(PPDMIHGCMPORT pInterface, int32_t result, PVBOXHGCMCMD pCmd)
1875{
1876#if 0 /* This seems to be significantly slower. Half of MsgTotal time seems to be spend here. */
1877 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1878 STAM_GET_TS(pCmd->tsComplete);
1879
1880 VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
1881
1882/** @todo no longer necessary to forward to EMT, but it might be more
1883 * efficient...? */
1884 /* Not safe to execute asynchronously; forward to EMT */
1885 int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pDevIns), VMCPUID_ANY,
1886 (PFNRT)hgcmCompletedWorker, 3, pInterface, result, pCmd);
1887 AssertRC(rc);
1888 return VINF_SUCCESS; /* cannot tell if canceled or not... */
1889#else
1890 STAM_GET_TS(pCmd->tsComplete);
1891 VBOXDD_HGCMCALL_COMPLETED_REQ(pCmd, result);
1892 return hgcmCompletedWorker(pInterface, result, pCmd);
1893#endif
1894}
1895
1896/**
1897 * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdRestored}
1898 */
1899DECLCALLBACK(bool) hgcmR3IsCmdRestored(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1900{
1901 RT_NOREF(pInterface);
1902 return pCmd && pCmd->fRestored;
1903}
1904
1905/**
1906 * @interface_method_impl{PDMIHGCMPORT,pfnIsCmdCancelled}
1907 */
1908DECLCALLBACK(bool) hgcmR3IsCmdCancelled(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1909{
1910 RT_NOREF(pInterface);
1911 return pCmd && pCmd->fCancelled;
1912}
1913
1914/**
1915 * @interface_method_impl{PDMIHGCMPORT,pfnGetRequestor}
1916 */
1917DECLCALLBACK(uint32_t) hgcmR3GetRequestor(PPDMIHGCMPORT pInterface, PVBOXHGCMCMD pCmd)
1918{
1919 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1920 PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
1921 AssertPtrReturn(pCmd, VMMDEV_REQUESTOR_LOWEST);
1922 if (pThis->guestInfo2.fFeatures & VBOXGSTINFO2_F_REQUESTOR_INFO)
1923 return pCmd->fRequestor;
1924 return VMMDEV_REQUESTOR_LEGACY;
1925}
1926
1927/**
1928 * @interface_method_impl{PDMIHGCMPORT,pfnGetVMMDevSessionId}
1929 */
1930DECLCALLBACK(uint64_t) hgcmR3GetVMMDevSessionId(PPDMIHGCMPORT pInterface)
1931{
1932 PVMMDEVCC pThisCC = RT_FROM_MEMBER(pInterface, VMMDEVCC, IHGCMPort);
1933 PVMMDEV pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVMMDEV);
1934 return pThis->idSession;
1935}
1936
1937/** Save information about pending HGCM requests from pThisCC->listHGCMCmd.
1938 *
1939 * @returns VBox status code that the guest should see.
1940 * @param pThisCC The VMMDev ring-3 instance data.
1941 * @param pSSM SSM handle for SSM functions.
1942 *
1943 * @thread EMT
1944 */
1945int vmmdevR3HgcmSaveState(PVMMDEVCC pThisCC, PSSMHANDLE pSSM)
1946{
1947 PCPDMDEVHLPR3 pHlp = pThisCC->pDevIns->pHlpR3;
1948
1949 LogFlowFunc(("\n"));
1950
1951 /* Compute how many commands are pending. */
1952 uint32_t cCmds = 0;
1953 PVBOXHGCMCMD pCmd;
1954 RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
1955 {
1956 LogFlowFunc(("pCmd %p\n", pCmd));
1957 ++cCmds;
1958 }
1959 LogFlowFunc(("cCmds = %d\n", cCmds));
1960
1961 /* Save number of commands. */
1962 int rc = pHlp->pfnSSMPutU32(pSSM, cCmds);
1963 AssertRCReturn(rc, rc);
1964
1965 if (cCmds > 0)
1966 {
1967 RTListForEach(&pThisCC->listHGCMCmd, pCmd, VBOXHGCMCMD, node)
1968 {
1969 LogFlowFunc(("Saving %RGp, size %d\n", pCmd->GCPhys, pCmd->cbRequest));
1970
1971 /** @todo Don't save cancelled requests! It serves no purpose. See restore and
1972 * @bugref{4032#c4} for details. */
1973 pHlp->pfnSSMPutU32 (pSSM, (uint32_t)pCmd->enmCmdType);
1974 pHlp->pfnSSMPutBool (pSSM, pCmd->fCancelled);
1975 pHlp->pfnSSMPutGCPhys (pSSM, pCmd->GCPhys);
1976 pHlp->pfnSSMPutU32 (pSSM, pCmd->cbRequest);
1977 pHlp->pfnSSMPutU32 (pSSM, (uint32_t)pCmd->enmRequestType);
1978 const uint32_t cParms = pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL ? pCmd->u.call.cParms : 0;
1979 rc = pHlp->pfnSSMPutU32(pSSM, cParms);
1980 AssertRCReturn(rc, rc);
1981
1982 if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL)
1983 {
1984 pHlp->pfnSSMPutU32 (pSSM, pCmd->u.call.u32ClientID);
1985 rc = pHlp->pfnSSMPutU32(pSSM, pCmd->u.call.u32Function);
1986 AssertRCReturn(rc, rc);
1987
1988 /* Guest parameters. */
1989 uint32_t i;
1990 for (i = 0; i < pCmd->u.call.cParms; ++i)
1991 {
1992 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
1993
1994 rc = pHlp->pfnSSMPutU32(pSSM, (uint32_t)pGuestParm->enmType);
1995 AssertRCReturn(rc, rc);
1996
1997 if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
1998 || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
1999 {
2000 const VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
2001 pHlp->pfnSSMPutU64 (pSSM, pVal->u64Value);
2002 pHlp->pfnSSMPutU32 (pSSM, pVal->offValue);
2003 rc = pHlp->pfnSSMPutU32(pSSM, pVal->cbValue);
2004 }
2005 else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2006 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2007 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
2008 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
2009 || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
2010 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
2011 {
2012 const VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
2013 pHlp->pfnSSMPutU32 (pSSM, pPtr->cbData);
2014 pHlp->pfnSSMPutU32 (pSSM, pPtr->offFirstPage);
2015 pHlp->pfnSSMPutU32 (pSSM, pPtr->cPages);
2016 rc = pHlp->pfnSSMPutU32(pSSM, pPtr->fu32Direction);
2017
2018 uint32_t iPage;
2019 for (iPage = 0; RT_SUCCESS(rc) && iPage < pPtr->cPages; ++iPage)
2020 rc = pHlp->pfnSSMPutGCPhys(pSSM, pPtr->paPages[iPage]);
2021 }
2022 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
2023 {
2024 /* We don't have the page addresses here, so it will need to be
2025 restored from guest memory. This isn't an issue as it is only
2026 use with services which won't survive a save/restore anyway. */
2027 }
2028 else
2029 {
2030 AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
2031 }
2032 AssertRCReturn(rc, rc);
2033 }
2034 }
2035 else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
2036 {
2037 pHlp->pfnSSMPutU32(pSSM, pCmd->u.connect.u32ClientID);
2038 pHlp->pfnSSMPutMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
2039 }
2040 else if (pCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
2041 {
2042 pHlp->pfnSSMPutU32(pSSM, pCmd->u.disconnect.u32ClientID);
2043 }
2044 else
2045 {
2046 AssertFailedReturn(VERR_INTERNAL_ERROR);
2047 }
2048
2049 /* A reserved field, will allow to extend saved data for a command. */
2050 rc = pHlp->pfnSSMPutU32(pSSM, 0);
2051 AssertRCReturn(rc, rc);
2052 }
2053 }
2054
2055 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2056 rc = pHlp->pfnSSMPutU32(pSSM, 0);
2057 AssertRCReturn(rc, rc);
2058
2059 return rc;
2060}
2061
2062/** Load information about pending HGCM requests.
2063 *
2064 * Allocate VBOXHGCMCMD commands and add them to pThisCC->listHGCMCmd
2065 * temporarily. vmmdevR3HgcmLoadStateDone will process the temporary list. This
2066 * includes loading the correct fRequestor fields.
2067 *
2068 * @returns VBox status code that the guest should see.
2069 * @param pDevIns The device instance.
2070 * @param pThis The VMMDev shared instance data.
2071 * @param pThisCC The VMMDev ring-3 instance data.
2072 * @param pSSM SSM handle for SSM functions.
2073 * @param uVersion Saved state version.
2074 *
2075 * @thread EMT
2076 */
2077int vmmdevR3HgcmLoadState(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, PSSMHANDLE pSSM, uint32_t uVersion)
2078{
2079 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
2080
2081 LogFlowFunc(("\n"));
2082
2083 pThisCC->uSavedStateVersion = uVersion; /* For vmmdevR3HgcmLoadStateDone */
2084
2085 /* Read how many commands were pending. */
2086 uint32_t cCmds = 0;
2087 int rc = pHlp->pfnSSMGetU32(pSSM, &cCmds);
2088 AssertRCReturn(rc, rc);
2089
2090 LogFlowFunc(("cCmds = %d\n", cCmds));
2091
2092 if (uVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
2093 {
2094 /* Saved information about all HGCM parameters. */
2095 uint32_t u32;
2096
2097 uint32_t iCmd;
2098 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2099 {
2100 /* Command fields. */
2101 VBOXHGCMCMDTYPE enmCmdType;
2102 bool fCancelled;
2103 RTGCPHYS GCPhys;
2104 uint32_t cbRequest;
2105 VMMDevRequestType enmRequestType;
2106 uint32_t cParms;
2107
2108 pHlp->pfnSSMGetU32 (pSSM, &u32);
2109 enmCmdType = (VBOXHGCMCMDTYPE)u32;
2110 pHlp->pfnSSMGetBool (pSSM, &fCancelled);
2111 pHlp->pfnSSMGetGCPhys (pSSM, &GCPhys);
2112 pHlp->pfnSSMGetU32 (pSSM, &cbRequest);
2113 pHlp->pfnSSMGetU32 (pSSM, &u32);
2114 enmRequestType = (VMMDevRequestType)u32;
2115 rc = pHlp->pfnSSMGetU32(pSSM, &cParms);
2116 AssertRCReturn(rc, rc);
2117
2118 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, enmCmdType, GCPhys, cbRequest, cParms, 0 /*fRequestor*/);
2119 AssertReturn(pCmd, VERR_NO_MEMORY);
2120
2121 pCmd->fCancelled = fCancelled;
2122 pCmd->GCPhys = GCPhys;
2123 pCmd->cbRequest = cbRequest;
2124 pCmd->enmRequestType = enmRequestType;
2125
2126 if (enmCmdType == VBOXHGCMCMDTYPE_CALL)
2127 {
2128 pHlp->pfnSSMGetU32 (pSSM, &pCmd->u.call.u32ClientID);
2129 rc = pHlp->pfnSSMGetU32(pSSM, &pCmd->u.call.u32Function);
2130 AssertRCReturn(rc, rc);
2131
2132 /* Guest parameters. */
2133 uint32_t i;
2134 for (i = 0; i < cParms; ++i)
2135 {
2136 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[i];
2137
2138 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2139 AssertRCReturn(rc, rc);
2140 pGuestParm->enmType = (HGCMFunctionParameterType)u32;
2141
2142 if ( pGuestParm->enmType == VMMDevHGCMParmType_32bit
2143 || pGuestParm->enmType == VMMDevHGCMParmType_64bit)
2144 {
2145 VBOXHGCMPARMVAL * const pVal = &pGuestParm->u.val;
2146 pHlp->pfnSSMGetU64 (pSSM, &pVal->u64Value);
2147 pHlp->pfnSSMGetU32 (pSSM, &pVal->offValue);
2148 rc = pHlp->pfnSSMGetU32(pSSM, &pVal->cbValue);
2149 }
2150 else if ( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2151 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2152 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr
2153 || pGuestParm->enmType == VMMDevHGCMParmType_PageList
2154 || pGuestParm->enmType == VMMDevHGCMParmType_Embedded
2155 || pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList)
2156 {
2157 VBOXHGCMPARMPTR * const pPtr = &pGuestParm->u.ptr;
2158 pHlp->pfnSSMGetU32 (pSSM, &pPtr->cbData);
2159 pHlp->pfnSSMGetU32 (pSSM, &pPtr->offFirstPage);
2160 pHlp->pfnSSMGetU32 (pSSM, &pPtr->cPages);
2161 rc = pHlp->pfnSSMGetU32(pSSM, &pPtr->fu32Direction);
2162 if (RT_SUCCESS(rc))
2163 {
2164 if (pPtr->cPages == 1)
2165 pPtr->paPages = &pPtr->GCPhysSinglePage;
2166 else
2167 {
2168 AssertReturn( pGuestParm->enmType != VMMDevHGCMParmType_Embedded
2169 && pGuestParm->enmType != VMMDevHGCMParmType_ContiguousPageList, VERR_INTERNAL_ERROR_3);
2170 pPtr->paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd,
2171 pPtr->cPages * sizeof(RTGCPHYS));
2172 AssertStmt(pPtr->paPages, rc = VERR_NO_MEMORY);
2173 }
2174
2175 if (RT_SUCCESS(rc))
2176 {
2177 uint32_t iPage;
2178 for (iPage = 0; iPage < pPtr->cPages; ++iPage)
2179 rc = pHlp->pfnSSMGetGCPhys(pSSM, &pPtr->paPages[iPage]);
2180 }
2181 }
2182 }
2183 else if (pGuestParm->enmType == VMMDevHGCMParmType_NoBouncePageList)
2184 {
2185 /* This request type can only be stored from guest memory for now. */
2186 pCmd->fRestoreFromGuestMem = true;
2187 }
2188 else
2189 {
2190 AssertFailedStmt(rc = VERR_INTERNAL_ERROR);
2191 }
2192 AssertRCReturn(rc, rc);
2193 }
2194 }
2195 else if (enmCmdType == VBOXHGCMCMDTYPE_CONNECT)
2196 {
2197 pHlp->pfnSSMGetU32(pSSM, &pCmd->u.connect.u32ClientID);
2198 rc = pHlp->pfnSSMGetMem(pSSM, pCmd->u.connect.pLoc, sizeof(*pCmd->u.connect.pLoc));
2199 AssertRCReturn(rc, rc);
2200 }
2201 else if (enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT)
2202 {
2203 rc = pHlp->pfnSSMGetU32(pSSM, &pCmd->u.disconnect.u32ClientID);
2204 AssertRCReturn(rc, rc);
2205 }
2206 else
2207 {
2208 AssertFailedReturn(VERR_INTERNAL_ERROR);
2209 }
2210
2211 /* A reserved field, will allow to extend saved data for a command. */
2212 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2213 AssertRCReturn(rc, rc);
2214
2215 /*
2216 * Do not restore cancelled calls. Why do we save them to start with?
2217 *
2218 * The guest memory no longer contains a valid request! So, it is not
2219 * possible to restore it. The memory is often reused for a new request
2220 * by now and we will end up trying to complete that more than once if
2221 * we restore a cancelled call. In some cases VERR_HGCM_INVALID_CLIENT_ID
2222 * is returned, though it might just be silent memory corruption.
2223 */
2224 /* See current version above. */
2225 if (!fCancelled)
2226 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2227 else
2228 {
2229 Log(("vmmdevR3HgcmLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
2230 enmCmdType, GCPhys, cbRequest));
2231 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2232 }
2233 }
2234
2235 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2236 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2237 AssertRCReturn(rc, rc);
2238 }
2239 else if (uVersion >= 9)
2240 {
2241 /* Version 9+: Load information about commands. Pre-rewrite. */
2242 uint32_t u32;
2243
2244 uint32_t iCmd;
2245 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2246 {
2247 VBOXHGCMCMDTYPE enmCmdType;
2248 bool fCancelled;
2249 RTGCPHYS GCPhys;
2250 uint32_t cbRequest;
2251 uint32_t cLinAddrs;
2252
2253 pHlp->pfnSSMGetGCPhys (pSSM, &GCPhys);
2254 rc = pHlp->pfnSSMGetU32(pSSM, &cbRequest);
2255 AssertRCReturn(rc, rc);
2256
2257 LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
2258
2259 /* For uVersion <= 12, this was the size of entire command.
2260 * Now the command is reconstructed in vmmdevR3HgcmLoadStateDone.
2261 */
2262 if (uVersion <= 12)
2263 pHlp->pfnSSMSkip(pSSM, sizeof (uint32_t));
2264
2265 pHlp->pfnSSMGetU32 (pSSM, &u32);
2266 enmCmdType = (VBOXHGCMCMDTYPE)u32;
2267 pHlp->pfnSSMGetBool (pSSM, &fCancelled);
2268 /* How many linear pointers. Always 0 if not a call command. */
2269 rc = pHlp->pfnSSMGetU32(pSSM, &cLinAddrs);
2270 AssertRCReturn(rc, rc);
2271
2272 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, enmCmdType, GCPhys, cbRequest, cLinAddrs, 0 /*fRequestor*/);
2273 AssertReturn(pCmd, VERR_NO_MEMORY);
2274
2275 pCmd->fCancelled = fCancelled;
2276 pCmd->GCPhys = GCPhys;
2277 pCmd->cbRequest = cbRequest;
2278
2279 if (cLinAddrs > 0)
2280 {
2281 /* Skip number of pages for all LinAddrs in this command. */
2282 pHlp->pfnSSMSkip(pSSM, sizeof(uint32_t));
2283
2284 uint32_t i;
2285 for (i = 0; i < cLinAddrs; ++i)
2286 {
2287 VBOXHGCMPARMPTR * const pPtr = &pCmd->u.call.paGuestParms[i].u.ptr;
2288
2289 /* Index of the parameter. Use cbData field to store the index. */
2290 pHlp->pfnSSMGetU32 (pSSM, &pPtr->cbData);
2291 pHlp->pfnSSMGetU32 (pSSM, &pPtr->offFirstPage);
2292 rc = pHlp->pfnSSMGetU32(pSSM, &pPtr->cPages);
2293 AssertRCReturn(rc, rc);
2294
2295 pPtr->paPages = (RTGCPHYS *)vmmdevR3HgcmCallMemAlloc(pThisCC, pCmd, pPtr->cPages * sizeof(RTGCPHYS));
2296 AssertReturn(pPtr->paPages, VERR_NO_MEMORY);
2297
2298 uint32_t iPage;
2299 for (iPage = 0; iPage < pPtr->cPages; ++iPage)
2300 rc = pHlp->pfnSSMGetGCPhys(pSSM, &pPtr->paPages[iPage]);
2301 }
2302 }
2303
2304 /* A reserved field, will allow to extend saved data for a command. */
2305 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2306 AssertRCReturn(rc, rc);
2307
2308 /* See current version above. */
2309 if (!fCancelled)
2310 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2311 else
2312 {
2313 Log(("vmmdevR3HgcmLoadState: Skipping cancelled request: enmCmdType=%d GCPhys=%#RX32 LB %#x\n",
2314 enmCmdType, GCPhys, cbRequest));
2315 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2316 }
2317 }
2318
2319 /* A reserved field, will allow to extend saved data for VMMDevHGCM. */
2320 rc = pHlp->pfnSSMGetU32(pSSM, &u32);
2321 AssertRCReturn(rc, rc);
2322 }
2323 else
2324 {
2325 /* Ancient. Only the guest physical address is saved. */
2326 uint32_t iCmd;
2327 for (iCmd = 0; iCmd < cCmds; ++iCmd)
2328 {
2329 RTGCPHYS GCPhys;
2330 uint32_t cbRequest;
2331
2332 pHlp->pfnSSMGetGCPhys(pSSM, &GCPhys);
2333 rc = pHlp->pfnSSMGetU32(pSSM, &cbRequest);
2334 AssertRCReturn(rc, rc);
2335
2336 LogFlowFunc(("Restoring %RGp size %x bytes\n", GCPhys, cbRequest));
2337
2338 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_LOADSTATE, GCPhys, cbRequest, 0, 0 /*fRequestor*/);
2339 AssertReturn(pCmd, VERR_NO_MEMORY);
2340
2341 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2342 }
2343 }
2344
2345 return rc;
2346}
2347
2348/** Restore HGCM connect command loaded from old saved state.
2349 *
2350 * @returns VBox status code that the guest should see.
2351 * @param pThisCC The VMMDev ring-3 instance data.
2352 * @param uSavedStateVersion The saved state version the command has been loaded from.
2353 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2354 * @param pReq The guest request (cached in host memory).
2355 * @param cbReq Size of the guest request.
2356 * @param enmRequestType Type of the HGCM request.
2357 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2358 */
2359static int vmmdevR3HgcmRestoreConnect(PVMMDEVCC pThisCC, uint32_t uSavedStateVersion, const VBOXHGCMCMD *pLoadedCmd,
2360 VMMDevHGCMConnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2361 VBOXHGCMCMD **ppRestoredCmd)
2362{
2363 /* Verify the request. */
2364 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2365 if (uSavedStateVersion >= 9)
2366 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CONNECT, VERR_MISMATCH);
2367
2368 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_CONNECT, pLoadedCmd->GCPhys, cbReq, 0,
2369 pReq->header.header.fRequestor);
2370 AssertReturn(pCmd, VERR_NO_MEMORY);
2371
2372 Assert(pLoadedCmd->fCancelled == false);
2373 pCmd->fCancelled = false;
2374 pCmd->fRestored = true;
2375 pCmd->enmRequestType = enmRequestType;
2376
2377 vmmdevR3HgcmConnectFetch(pReq, pCmd);
2378
2379 *ppRestoredCmd = pCmd;
2380 return VINF_SUCCESS;
2381}
2382
2383/** Restore HGCM disconnect command loaded from old saved state.
2384 *
2385 * @returns VBox status code that the guest should see.
2386 * @param pThisCC The VMMDev ring-3 instance data.
2387 * @param uSavedStateVersion The saved state version the command has been loaded from.
2388 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2389 * @param pReq The guest request (cached in host memory).
2390 * @param cbReq Size of the guest request.
2391 * @param enmRequestType Type of the HGCM request.
2392 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2393 */
2394static int vmmdevR3HgcmRestoreDisconnect(PVMMDEVCC pThisCC, uint32_t uSavedStateVersion, const VBOXHGCMCMD *pLoadedCmd,
2395 VMMDevHGCMDisconnect *pReq, uint32_t cbReq, VMMDevRequestType enmRequestType,
2396 VBOXHGCMCMD **ppRestoredCmd)
2397{
2398 /* Verify the request. */
2399 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2400 if (uSavedStateVersion >= 9)
2401 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_DISCONNECT, VERR_MISMATCH);
2402
2403 PVBOXHGCMCMD pCmd = vmmdevR3HgcmCmdAlloc(pThisCC, VBOXHGCMCMDTYPE_DISCONNECT, pLoadedCmd->GCPhys, cbReq, 0,
2404 pReq->header.header.fRequestor);
2405 AssertReturn(pCmd, VERR_NO_MEMORY);
2406
2407 Assert(pLoadedCmd->fCancelled == false);
2408 pCmd->fCancelled = false;
2409 pCmd->fRestored = true;
2410 pCmd->enmRequestType = enmRequestType;
2411
2412 vmmdevR3HgcmDisconnectFetch(pReq, pCmd);
2413
2414 *ppRestoredCmd = pCmd;
2415 return VINF_SUCCESS;
2416}
2417
2418/** Restore HGCM call command loaded from old saved state.
2419 *
2420 * @returns VBox status code that the guest should see.
2421 * @param pDevIns The device instance.
2422 * @param pThis The VMMDev shared instance data.
2423 * @param pThisCC The VMMDev ring-3 instance data.
2424 * @param uSavedStateVersion The saved state version the command has been loaded from.
2425 * @param pLoadedCmd Command loaded from saved state, it is imcomplete and needs restoration.
2426 * @param pReq The guest request (cached in host memory).
2427 * @param cbReq Size of the guest request.
2428 * @param enmRequestType Type of the HGCM request.
2429 * @param ppRestoredCmd Where to store pointer to newly allocated restored command.
2430 */
2431static int vmmdevR3HgcmRestoreCall(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t uSavedStateVersion,
2432 const VBOXHGCMCMD *pLoadedCmd, VMMDevHGCMCall *pReq, uint32_t cbReq,
2433 VMMDevRequestType enmRequestType, VBOXHGCMCMD **ppRestoredCmd)
2434{
2435 /* Verify the request. */
2436 ASSERT_GUEST_RETURN(cbReq >= sizeof(*pReq), VERR_MISMATCH);
2437 if (uSavedStateVersion >= 9)
2438 {
2439 ASSERT_GUEST_RETURN(pLoadedCmd->enmCmdType == VBOXHGCMCMDTYPE_CALL, VERR_MISMATCH);
2440 Assert(pLoadedCmd->fCancelled == false);
2441 }
2442
2443 PVBOXHGCMCMD pCmd;
2444 uint32_t cbHGCMParmStruct;
2445 int rc = vmmdevR3HgcmCallAlloc(pThisCC, pReq, cbReq, pLoadedCmd->GCPhys, enmRequestType, &pCmd, &cbHGCMParmStruct);
2446 if (RT_FAILURE(rc))
2447 return rc;
2448
2449 /* pLoadedCmd is fake, it does not contain actual call parameters. Only pagelists for LinAddr. */
2450 pCmd->fCancelled = false;
2451 pCmd->fRestored = true;
2452 pCmd->enmRequestType = enmRequestType;
2453
2454 rc = vmmdevR3HgcmCallFetchGuestParms(pDevIns, pThisCC, pCmd, pReq, cbReq, enmRequestType, cbHGCMParmStruct);
2455 if (RT_SUCCESS(rc))
2456 {
2457 /* Update LinAddr parameters from pLoadedCmd.
2458 * pLoadedCmd->u.call.cParms is actually the number of LinAddrs, see vmmdevR3HgcmLoadState.
2459 */
2460 uint32_t iLinAddr;
2461 for (iLinAddr = 0; iLinAddr < pLoadedCmd->u.call.cParms; ++iLinAddr)
2462 {
2463 VBOXHGCMGUESTPARM * const pLoadedParm = &pLoadedCmd->u.call.paGuestParms[iLinAddr];
2464 /* pLoadedParm->cbData is actually index of the LinAddr parameter, see vmmdevR3HgcmLoadState. */
2465 const uint32_t iParm = pLoadedParm->u.ptr.cbData;
2466 ASSERT_GUEST_STMT_BREAK(iParm < pCmd->u.call.cParms, rc = VERR_MISMATCH);
2467
2468 VBOXHGCMGUESTPARM * const pGuestParm = &pCmd->u.call.paGuestParms[iParm];
2469 ASSERT_GUEST_STMT_BREAK( pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_In
2470 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr_Out
2471 || pGuestParm->enmType == VMMDevHGCMParmType_LinAddr,
2472 rc = VERR_MISMATCH);
2473 ASSERT_GUEST_STMT_BREAK( pLoadedParm->u.ptr.offFirstPage == pGuestParm->u.ptr.offFirstPage
2474 && pLoadedParm->u.ptr.cPages == pGuestParm->u.ptr.cPages,
2475 rc = VERR_MISMATCH);
2476 memcpy(pGuestParm->u.ptr.paPages, pLoadedParm->u.ptr.paPages, pGuestParm->u.ptr.cPages * sizeof(RTGCPHYS));
2477 }
2478 }
2479
2480 if (RT_SUCCESS(rc))
2481 *ppRestoredCmd = pCmd;
2482 else
2483 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2484
2485 return rc;
2486}
2487
2488/** Allocate and initialize a HGCM command using the given request (pReqHdr)
2489 * and command loaded from saved state (pCmd).
2490 *
2491 * @returns VBox status code that the guest should see.
2492 * @param pDevIns The device instance.
2493 * @param pThis The VMMDev shared instance data.
2494 * @param pThisCC The VMMDev ring-3 instance data.
2495 * @param uSavedStateVersion Saved state version.
2496 * @param pLoadedCmd HGCM command which needs restoration.
2497 * @param pReqHdr The request (cached in host memory).
2498 * @param cbReq Size of the entire request (including HGCM parameters).
2499 * @param ppRestoredCmd Where to store pointer to restored command.
2500 */
2501static int vmmdevR3HgcmRestoreCommand(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC, uint32_t uSavedStateVersion,
2502 const VBOXHGCMCMD *pLoadedCmd, const VMMDevHGCMRequestHeader *pReqHdr, uint32_t cbReq,
2503 VBOXHGCMCMD **ppRestoredCmd)
2504{
2505 int rc;
2506
2507 /* Verify the request. */
2508 ASSERT_GUEST_RETURN(cbReq >= sizeof(VMMDevHGCMRequestHeader), VERR_MISMATCH);
2509 ASSERT_GUEST_RETURN(cbReq == pReqHdr->header.size, VERR_MISMATCH);
2510
2511 const VMMDevRequestType enmRequestType = pReqHdr->header.requestType;
2512 switch (enmRequestType)
2513 {
2514 case VMMDevReq_HGCMConnect:
2515 {
2516 VMMDevHGCMConnect *pReq = (VMMDevHGCMConnect *)pReqHdr;
2517 rc = vmmdevR3HgcmRestoreConnect(pThisCC, uSavedStateVersion, pLoadedCmd, pReq, cbReq, enmRequestType, ppRestoredCmd);
2518 break;
2519 }
2520
2521 case VMMDevReq_HGCMDisconnect:
2522 {
2523 VMMDevHGCMDisconnect *pReq = (VMMDevHGCMDisconnect *)pReqHdr;
2524 rc = vmmdevR3HgcmRestoreDisconnect(pThisCC, uSavedStateVersion, pLoadedCmd, pReq, cbReq, enmRequestType, ppRestoredCmd);
2525 break;
2526 }
2527
2528#ifdef VBOX_WITH_64_BITS_GUESTS
2529 case VMMDevReq_HGCMCall64:
2530#endif
2531 case VMMDevReq_HGCMCall32:
2532 {
2533 VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr;
2534 rc = vmmdevR3HgcmRestoreCall(pDevIns, pThis, pThisCC, uSavedStateVersion, pLoadedCmd,
2535 pReq, cbReq, enmRequestType, ppRestoredCmd);
2536 break;
2537 }
2538
2539 default:
2540 ASSERT_GUEST_FAILED_RETURN(VERR_MISMATCH);
2541 }
2542
2543 return rc;
2544}
2545
2546/** Resubmit pending HGCM commands which were loaded form saved state.
2547 *
2548 * @returns VBox status code.
2549 * @param pDevIns The device instance.
2550 * @param pThis The VMMDev shared instance data.
2551 * @param pThisCC The VMMDev ring-3 instance data.
2552 *
2553 * @thread EMT
2554 */
2555int vmmdevR3HgcmLoadStateDone(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
2556{
2557 /*
2558 * Resubmit pending HGCM commands to services.
2559 *
2560 * pThisCC->pHGCMCmdList contains commands loaded by vmmdevR3HgcmLoadState.
2561 *
2562 * Legacy saved states (pre VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS)
2563 * do not have enough information about the command parameters,
2564 * therefore it is necessary to reload at least some data from the
2565 * guest memory to construct commands.
2566 *
2567 * There are two types of legacy saved states which contain:
2568 * 1) the guest physical address and size of request;
2569 * 2) additionally page lists for LinAddr parameters.
2570 *
2571 * Legacy commands have enmCmdType = VBOXHGCMCMDTYPE_LOADSTATE?
2572 */
2573
2574 int rcFunc = VINF_SUCCESS; /* This status code will make the function fail. I.e. VM will not start. */
2575
2576 /* Get local copy of the list of loaded commands. */
2577 RTLISTANCHOR listLoadedCommands;
2578 RTListMove(&listLoadedCommands, &pThisCC->listHGCMCmd);
2579
2580 /* Resubmit commands. */
2581 PVBOXHGCMCMD pCmd, pNext;
2582 RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
2583 {
2584 int rcCmd = VINF_SUCCESS; /* This status code will make the HGCM command fail for the guest. */
2585
2586 RTListNodeRemove(&pCmd->node);
2587
2588 /*
2589 * Re-read the request from the guest memory.
2590 * It will be used to:
2591 * * reconstruct commands if legacy saved state has been restored;
2592 * * report an error to the guest if resubmit failed.
2593 */
2594 VMMDevHGCMRequestHeader *pReqHdr = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest);
2595 AssertBreakStmt(pReqHdr, vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd); rcFunc = VERR_NO_MEMORY);
2596
2597 PDMDevHlpPhysRead(pDevIns, pCmd->GCPhys, pReqHdr, pCmd->cbRequest);
2598 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2599
2600 if (pThisCC->pHGCMDrv)
2601 {
2602 /*
2603 * Reconstruct legacy commands.
2604 */
2605 if (RT_LIKELY( pThisCC->uSavedStateVersion >= VMMDEV_SAVED_STATE_VERSION_HGCM_PARAMS
2606 && !pCmd->fRestoreFromGuestMem))
2607 { /* likely */ }
2608 else
2609 {
2610 PVBOXHGCMCMD pRestoredCmd = NULL;
2611 rcCmd = vmmdevR3HgcmRestoreCommand(pDevIns, pThis, pThisCC, pThisCC->uSavedStateVersion, pCmd,
2612 pReqHdr, pCmd->cbRequest, &pRestoredCmd);
2613 if (RT_SUCCESS(rcCmd))
2614 {
2615 Assert(pCmd != pRestoredCmd); /* vmmdevR3HgcmRestoreCommand must allocate restored command. */
2616 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2617 pCmd = pRestoredCmd;
2618 }
2619 }
2620
2621 /* Resubmit commands. */
2622 if (RT_SUCCESS(rcCmd))
2623 {
2624 switch (pCmd->enmCmdType)
2625 {
2626 case VBOXHGCMCMDTYPE_CONNECT:
2627 {
2628 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2629 rcCmd = pThisCC->pHGCMDrv->pfnConnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.connect.pLoc,
2630 &pCmd->u.connect.u32ClientID);
2631 if (RT_FAILURE(rcCmd))
2632 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2633 break;
2634 }
2635
2636 case VBOXHGCMCMDTYPE_DISCONNECT:
2637 {
2638 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2639 rcCmd = pThisCC->pHGCMDrv->pfnDisconnect(pThisCC->pHGCMDrv, pCmd, pCmd->u.disconnect.u32ClientID);
2640 if (RT_FAILURE(rcCmd))
2641 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2642 break;
2643 }
2644
2645 case VBOXHGCMCMDTYPE_CALL:
2646 {
2647 rcCmd = vmmdevR3HgcmInitHostParameters(pDevIns, pThisCC, pCmd, (uint8_t const *)pReqHdr);
2648 if (RT_SUCCESS(rcCmd))
2649 {
2650 vmmdevR3HgcmAddCommand(pDevIns, pThis, pThisCC, pCmd);
2651
2652 /* Pass the function call to HGCM connector for actual processing */
2653 uint64_t tsNow;
2654 STAM_GET_TS(tsNow);
2655 rcCmd = pThisCC->pHGCMDrv->pfnCall(pThisCC->pHGCMDrv, pCmd,
2656 pCmd->u.call.u32ClientID, pCmd->u.call.u32Function,
2657 pCmd->u.call.cParms, pCmd->u.call.paHostParms, tsNow);
2658 if (RT_FAILURE(rcCmd))
2659 {
2660 LogFunc(("pfnCall rc = %Rrc\n", rcCmd));
2661 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2662 }
2663 }
2664 break;
2665 }
2666
2667 default:
2668 AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
2669 }
2670 }
2671 }
2672 else
2673 AssertFailedStmt(rcCmd = VERR_INTERNAL_ERROR);
2674
2675 if (RT_SUCCESS(rcCmd))
2676 { /* likely */ }
2677 else
2678 {
2679 /* Return the error to the guest. Guest may try to repeat the call. */
2680 pReqHdr->result = rcCmd;
2681 pReqHdr->header.rc = rcCmd;
2682 pReqHdr->fu32Flags |= VBOX_HGCM_REQ_DONE;
2683
2684 /* Write back only the header. */
2685 PDMDevHlpPhysWrite(pDevIns, pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr));
2686
2687 VMMDevNotifyGuest(pDevIns, pThis, pThisCC, VMMDEV_EVENT_HGCM);
2688
2689 /* Deallocate the command memory. */
2690 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2691 }
2692
2693 RTMemFree(pReqHdr);
2694 }
2695
2696 if (RT_FAILURE(rcFunc))
2697 {
2698 RTListForEachSafe(&listLoadedCommands, pCmd, pNext, VBOXHGCMCMD, node)
2699 {
2700 RTListNodeRemove(&pCmd->node);
2701 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2702 }
2703 }
2704
2705 return rcFunc;
2706}
2707
2708
2709/**
2710 * Counterpart to vmmdevR3HgcmInit().
2711 *
2712 * @param pDevIns The device instance.
2713 * @param pThis The VMMDev shared instance data.
2714 * @param pThisCC The VMMDev ring-3 instance data.
2715 */
2716void vmmdevR3HgcmDestroy(PPDMDEVINS pDevIns, PVMMDEV pThis, PVMMDEVCC pThisCC)
2717{
2718 LogFlowFunc(("\n"));
2719
2720 if (RTCritSectIsInitialized(&pThisCC->critsectHGCMCmdList))
2721 {
2722 PVBOXHGCMCMD pCmd, pNext;
2723 RTListForEachSafe(&pThisCC->listHGCMCmd, pCmd, pNext, VBOXHGCMCMD, node)
2724 {
2725 vmmdevR3HgcmRemoveCommand(pThisCC, pCmd);
2726 vmmdevR3HgcmCmdFree(pDevIns, pThis, pThisCC, pCmd);
2727 }
2728
2729 RTCritSectDelete(&pThisCC->critsectHGCMCmdList);
2730 }
2731
2732 AssertCompile(NIL_RTMEMCACHE == (RTMEMCACHE)0);
2733 if (pThisCC->hHgcmCmdCache != NIL_RTMEMCACHE)
2734 {
2735 RTMemCacheDestroy(pThisCC->hHgcmCmdCache);
2736 pThisCC->hHgcmCmdCache = NIL_RTMEMCACHE;
2737 }
2738}
2739
2740
2741/**
2742 * Initializes the HGCM specific state.
2743 *
2744 * Keeps VBOXHGCMCMDCACHED and friends local.
2745 *
2746 * @returns VBox status code.
2747 * @param pThisCC The VMMDev ring-3 instance data.
2748 */
2749int vmmdevR3HgcmInit(PVMMDEVCC pThisCC)
2750{
2751 LogFlowFunc(("\n"));
2752
2753 RTListInit(&pThisCC->listHGCMCmd);
2754
2755 int rc = RTCritSectInit(&pThisCC->critsectHGCMCmdList);
2756 AssertLogRelRCReturn(rc, rc);
2757
2758 rc = RTMemCacheCreate(&pThisCC->hHgcmCmdCache, sizeof(VBOXHGCMCMDCACHED), 64, _1M, NULL, NULL, NULL, 0);
2759 AssertLogRelRCReturn(rc, rc);
2760
2761 pThisCC->u32HGCMEnabled = 0;
2762
2763 return VINF_SUCCESS;
2764}
2765
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette