VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibHGCMInternal.cpp@ 72352

Last change on this file since 72352 was 70873, checked in by vboxsync, 7 years ago

VMMDev,VBoxGuest: Classify who is calling the host (part 1). bugref:9105

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.5 KB
Line 
1/* $Id: VBoxGuestR0LibHGCMInternal.cpp 70873 2018-02-05 18:13:55Z vboxsync $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_HGCM
32
33#include "VBoxGuestR0LibInternal.h"
34#include <iprt/alloca.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/mem.h>
38#include <iprt/memobj.h>
39#include <iprt/string.h>
40#include <iprt/thread.h>
41#include <iprt/time.h>
42
43#ifndef VBGL_VBOXGUEST
44# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
45#endif
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51/** The max parameter buffer size for a user request. */
52#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
53/** The max parameter buffer size for a kernel request. */
54#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
55#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
56/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
57 * side effects.
58 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
59# define USE_BOUNCE_BUFFERS
60#endif
61
62
63/*********************************************************************************************************************************
64* Structures and Typedefs *
65*********************************************************************************************************************************/
66/**
67 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
68 */
69struct VbglR0ParmInfo
70{
71 uint32_t cLockBufs;
72 struct
73 {
74 uint32_t iParm;
75 RTR0MEMOBJ hObj;
76#ifdef USE_BOUNCE_BUFFERS
77 void *pvSmallBuf;
78#endif
79 } aLockBufs[10];
80};
81
82
83
84/* These functions can be only used by VBoxGuest. */
85
86DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, uint32_t fRequestor, HGCMCLIENTID *pidClient,
87 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
88{
89 int rc;
90 if ( RT_VALID_PTR(pLoc)
91 && RT_VALID_PTR(pidClient)
92 && RT_VALID_PTR(pfnAsyncCallback))
93 {
94 /* Allocate request */
95 VMMDevHGCMConnect *pHGCMConnect = NULL;
96 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
97 if (RT_SUCCESS(rc))
98 {
99 /* Initialize request memory */
100 pHGCMConnect->header.header.fRequestor = fRequestor;
101
102 pHGCMConnect->header.fu32Flags = 0;
103
104 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
105 pHGCMConnect->u32ClientID = 0;
106
107 /* Issue request */
108 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
109 if (RT_SUCCESS(rc))
110 {
111 /* Check if host decides to process the request asynchronously. */
112 if (rc == VINF_HGCM_ASYNC_EXECUTE)
113 {
114 /* Wait for request completion interrupt notification from host */
115 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
116 }
117
118 rc = pHGCMConnect->header.result;
119 if (RT_SUCCESS(rc))
120 *pidClient = pHGCMConnect->u32ClientID;
121 }
122 VbglR0GRFree(&pHGCMConnect->header.header);
123 }
124 }
125 else
126 rc = VERR_INVALID_PARAMETER;
127 return rc;
128}
129
130
131DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient, uint32_t fRequestor,
132 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
133{
134 int rc;
135 if ( idClient != 0
136 && pfnAsyncCallback)
137 {
138 /* Allocate request */
139 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
140 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
141 if (RT_SUCCESS(rc))
142 {
143 /* Initialize request memory */
144 pHGCMDisconnect->header.header.fRequestor = fRequestor;
145
146 pHGCMDisconnect->header.fu32Flags = 0;
147
148 pHGCMDisconnect->u32ClientID = idClient;
149
150 /* Issue request */
151 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
152 if (RT_SUCCESS(rc))
153 {
154 /* Check if host decides to process the request asynchronously. */
155 if (rc == VINF_HGCM_ASYNC_EXECUTE)
156 {
157 /* Wait for request completion interrupt notification from host */
158 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
159 }
160
161 rc = pHGCMDisconnect->header.result;
162 }
163
164 VbglR0GRFree(&pHGCMDisconnect->header.header);
165 }
166 }
167 else
168 rc = VERR_INVALID_PARAMETER;
169 return rc;
170}
171
172
173/**
174 * Preprocesses the HGCM call, validating and locking/buffering parameters.
175 *
176 * @returns VBox status code.
177 *
178 * @param pCallInfo The call info.
179 * @param cbCallInfo The size of the call info structure.
180 * @param fIsUser Is it a user request or kernel request.
181 * @param pcbExtra Where to return the extra request space needed for
182 * physical page lists.
183 */
184static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
185 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
186{
187 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
188 uint32_t const cParms = pCallInfo->cParms;
189 uint32_t iParm;
190 uint32_t cb;
191
192 /*
193 * Lock down the any linear buffers so we can get their addresses
194 * and figure out how much extra storage we need for page lists.
195 *
196 * Note! With kernel mode users we can be assertive. For user mode users
197 * we should just (debug) log it and fail without any fanfare.
198 */
199 *pcbExtra = 0;
200 pParmInfo->cLockBufs = 0;
201 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
202 {
203 switch (pSrcParm->type)
204 {
205 case VMMDevHGCMParmType_32bit:
206 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
207 break;
208
209 case VMMDevHGCMParmType_64bit:
210 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
211 break;
212
213 case VMMDevHGCMParmType_PageList:
214 if (fIsUser)
215 return VERR_INVALID_PARAMETER;
216 cb = pSrcParm->u.PageList.size;
217 if (cb)
218 {
219 uint32_t off = pSrcParm->u.PageList.offset;
220 HGCMPageListInfo *pPgLst;
221 uint32_t cPages;
222 uint32_t u32;
223
224 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
225 VERR_OUT_OF_RANGE);
226 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
227 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
228 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
229 VERR_INVALID_PARAMETER);
230
231 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
232 cPages = pPgLst->cPages;
233 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
234 AssertMsgReturn(u32 <= cbCallInfo,
235 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
236 VERR_INVALID_PARAMETER);
237 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
238 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
239 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
240 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
241 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
242 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
243 u32 = cPages;
244 while (u32-- > 0)
245 {
246 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
247 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
248 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
249 VERR_INVALID_PARAMETER);
250 }
251
252 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
253 }
254 else
255 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
256 break;
257
258 case VMMDevHGCMParmType_LinAddr_Locked_In:
259 case VMMDevHGCMParmType_LinAddr_Locked_Out:
260 case VMMDevHGCMParmType_LinAddr_Locked:
261 if (fIsUser)
262 return VERR_INVALID_PARAMETER;
263 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
264 {
265 cb = pSrcParm->u.Pointer.size;
266 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
267 VERR_OUT_OF_RANGE);
268 if (cb != 0)
269 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
270 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
271 else
272 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
273 break;
274 }
275 RT_FALL_THRU();
276
277 case VMMDevHGCMParmType_LinAddr_In:
278 case VMMDevHGCMParmType_LinAddr_Out:
279 case VMMDevHGCMParmType_LinAddr:
280 cb = pSrcParm->u.Pointer.size;
281 if (cb != 0)
282 {
283#ifdef USE_BOUNCE_BUFFERS
284 void *pvSmallBuf = NULL;
285#endif
286 uint32_t iLockBuf = pParmInfo->cLockBufs;
287 RTR0MEMOBJ hObj;
288 int rc;
289 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
290 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
291 ? RTMEM_PROT_READ
292 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
293
294 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
295 if (!fIsUser)
296 {
297 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
298 VERR_OUT_OF_RANGE);
299 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
300 if (RT_FAILURE(rc))
301 {
302 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
303 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
304 return rc;
305 }
306 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
307 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
308 }
309 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
310 {
311 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
312 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
313 cb, VBGLR0_MAX_HGCM_USER_PARM));
314 return VERR_OUT_OF_RANGE;
315 }
316 else
317 {
318#ifndef USE_BOUNCE_BUFFERS
319 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
320 if (RT_FAILURE(rc))
321 {
322 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
323 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
324 return rc;
325 }
326 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
327 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
328
329#else /* USE_BOUNCE_BUFFERS */
330 /*
331 * This is a bit massive, but we don't want to waste a
332 * whole page for a 3 byte string buffer (guest props).
333 *
334 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
335 * the system is using some power of two allocator.
336 */
337 /** @todo A more efficient strategy would be to combine buffers. However it
338 * is probably going to be more massive than the current code, so
339 * it can wait till later. */
340 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
341 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
342 if (cb <= PAGE_SIZE / 2 - 16)
343 {
344 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
345 if (RT_UNLIKELY(!pvSmallBuf))
346 return VERR_NO_MEMORY;
347 if (fCopyIn)
348 {
349 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
350 if (RT_FAILURE(rc))
351 {
352 RTMemTmpFree(pvSmallBuf);
353 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
354 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
355 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
356 return rc;
357 }
358 }
359 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
360 if (RT_FAILURE(rc))
361 {
362 RTMemTmpFree(pvSmallBuf);
363 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
364 rc, pvSmallBuf, cb));
365 return rc;
366 }
367 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
368 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
369 }
370 else
371 {
372 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
373 if (RT_FAILURE(rc))
374 return rc;
375 if (!fCopyIn)
376 memset(RTR0MemObjAddress(hObj), '\0', cb);
377 else
378 {
379 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
380 if (RT_FAILURE(rc))
381 {
382 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
383 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
384 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
385 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
386 return rc;
387 }
388 }
389 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
390 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
391 }
392#endif /* USE_BOUNCE_BUFFERS */
393 }
394
395 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
396 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
397#ifdef USE_BOUNCE_BUFFERS
398 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
399#endif
400 pParmInfo->cLockBufs = iLockBuf + 1;
401
402 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
403 {
404 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
405 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
406 }
407 }
408 else
409 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
410 break;
411
412 default:
413 return VERR_INVALID_PARAMETER;
414 }
415 }
416
417 return VINF_SUCCESS;
418}
419
420
421/**
422 * Translates locked linear address to the normal type.
423 * The locked types are only for the guest side and not handled by the host.
424 *
425 * @returns normal linear address type.
426 * @param enmType The type.
427 */
428static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
429{
430 switch (enmType)
431 {
432 case VMMDevHGCMParmType_LinAddr_Locked_In:
433 return VMMDevHGCMParmType_LinAddr_In;
434 case VMMDevHGCMParmType_LinAddr_Locked_Out:
435 return VMMDevHGCMParmType_LinAddr_Out;
436 case VMMDevHGCMParmType_LinAddr_Locked:
437 return VMMDevHGCMParmType_LinAddr;
438 default:
439 return enmType;
440 }
441}
442
443
444/**
445 * Translates linear address types to page list direction flags.
446 *
447 * @returns page list flags.
448 * @param enmType The type.
449 */
450static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
451{
452 switch (enmType)
453 {
454 case VMMDevHGCMParmType_LinAddr_In:
455 case VMMDevHGCMParmType_LinAddr_Locked_In:
456 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
457
458 case VMMDevHGCMParmType_LinAddr_Out:
459 case VMMDevHGCMParmType_LinAddr_Locked_Out:
460 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
461
462 default: AssertFailed();
463 case VMMDevHGCMParmType_LinAddr:
464 case VMMDevHGCMParmType_LinAddr_Locked:
465 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
466 }
467}
468
469
470/**
471 * Initializes the call request that we're sending to the host.
472 *
473 * @returns VBox status code.
474 *
475 * @param pCallInfo The call info.
476 * @param cbCallInfo The size of the call info structure.
477 * @param fRequestor VMMDEV_REQUESTOR_XXX.
478 * @param fIsUser Is it a user request or kernel request.
479 * @param pcbExtra Where to return the extra request space needed for
480 * physical page lists.
481 */
482static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
483 uint32_t cbCallInfo, uint32_t fRequestor, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
484{
485 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
486 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
487 uint32_t const cParms = pCallInfo->cParms;
488 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
489 uint32_t iLockBuf = 0;
490 uint32_t iParm;
491 RT_NOREF1(cbCallInfo);
492#ifndef USE_BOUNCE_BUFFERS
493 RT_NOREF1(fIsUser);
494#endif
495
496 /*
497 * The call request headers.
498 */
499 pHGCMCall->header.header.fRequestor = !fIsUser || (fRequestor & VMMDEV_REQUESTOR_USERMODE) ? fRequestor
500 : VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_USR_NOT_GIVEN
501 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN | VMMDEV_REQUESTOR_CON_DONT_KNOW;
502
503 pHGCMCall->header.fu32Flags = 0;
504 pHGCMCall->header.result = VINF_SUCCESS;
505
506 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
507 pHGCMCall->u32Function = pCallInfo->u32Function;
508 pHGCMCall->cParms = cParms;
509
510 /*
511 * The parameters.
512 */
513 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
514 {
515 switch (pSrcParm->type)
516 {
517 case VMMDevHGCMParmType_32bit:
518 case VMMDevHGCMParmType_64bit:
519 *pDstParm = *pSrcParm;
520 break;
521
522 case VMMDevHGCMParmType_PageList:
523 pDstParm->type = VMMDevHGCMParmType_PageList;
524 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
525 if (pSrcParm->u.PageList.size)
526 {
527 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
528 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
529 uint32_t const cPages = pSrcPgLst->cPages;
530 uint32_t iPage;
531
532 pDstParm->u.PageList.offset = offExtra;
533 pDstPgLst->flags = pSrcPgLst->flags;
534 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
535 pDstPgLst->cPages = cPages;
536 for (iPage = 0; iPage < cPages; iPage++)
537 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
538
539 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
540 }
541 else
542 pDstParm->u.PageList.offset = 0;
543 break;
544
545 case VMMDevHGCMParmType_LinAddr_Locked_In:
546 case VMMDevHGCMParmType_LinAddr_Locked_Out:
547 case VMMDevHGCMParmType_LinAddr_Locked:
548 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
549 {
550 *pDstParm = *pSrcParm;
551 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
552 break;
553 }
554 RT_FALL_THRU();
555
556 case VMMDevHGCMParmType_LinAddr_In:
557 case VMMDevHGCMParmType_LinAddr_Out:
558 case VMMDevHGCMParmType_LinAddr:
559 if (pSrcParm->u.Pointer.size != 0)
560 {
561#ifdef USE_BOUNCE_BUFFERS
562 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
563#endif
564 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
565 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
566
567 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
568 {
569 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
570 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
571 size_t iPage;
572
573 pDstParm->type = VMMDevHGCMParmType_PageList;
574 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
575 pDstParm->u.PageList.offset = offExtra;
576 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
577#ifdef USE_BOUNCE_BUFFERS
578 if (fIsUser)
579 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
580 else
581#endif
582 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
583 pDstPgLst->cPages = (uint32_t)cPages; Assert(pDstPgLst->cPages == cPages);
584 for (iPage = 0; iPage < cPages; iPage++)
585 {
586 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
587 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
588 }
589
590 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
591 }
592 else
593 {
594 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
595 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
596#ifdef USE_BOUNCE_BUFFERS
597 if (fIsUser)
598 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
599 ? (uintptr_t)pvSmallBuf
600 : (uintptr_t)RTR0MemObjAddress(hObj);
601 else
602#endif
603 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
604 }
605 iLockBuf++;
606 }
607 else
608 {
609 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
610 pDstParm->u.Pointer.size = 0;
611 pDstParm->u.Pointer.u.linearAddr = 0;
612 }
613 break;
614
615 default:
616 AssertFailed();
617 pDstParm->type = VMMDevHGCMParmType_Invalid;
618 break;
619 }
620 }
621}
622
623
624/**
625 * Performs the call and completion wait.
626 *
627 * @returns VBox status code of this operation, not necessarily the call.
628 *
629 * @param pHGCMCall The HGCM call info.
630 * @param pfnAsyncCallback The async callback that will wait for the call
631 * to complete.
632 * @param pvAsyncData Argument for the callback.
633 * @param u32AsyncData Argument for the callback.
634 * @param pfLeakIt Where to return the leak it / free it,
635 * indicator. Cancellation fun.
636 */
637static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
638 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
639{
640 int rc;
641
642 Log(("calling VbglR0GRPerform\n"));
643 rc = VbglR0GRPerform(&pHGCMCall->header.header);
644 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
645
646 /*
647 * If the call failed, but as a result of the request itself, then pretend
648 * success. Upper layers will interpret the result code in the packet.
649 */
650 if ( RT_FAILURE(rc)
651 && rc == pHGCMCall->header.result)
652 {
653 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
654 rc = VINF_SUCCESS;
655 }
656
657 /*
658 * Check if host decides to process the request asynchronously,
659 * if so, we wait for it to complete using the caller supplied callback.
660 */
661 *pfLeakIt = false;
662 if (rc == VINF_HGCM_ASYNC_EXECUTE)
663 {
664 Log(("Processing HGCM call asynchronously\n"));
665 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
666 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
667 {
668 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
669 rc = VINF_SUCCESS;
670 }
671 else
672 {
673 /*
674 * The request didn't complete in time or the call was interrupted,
675 * the RC from the callback indicates which. Try cancel the request.
676 *
677 * This is a bit messy because we're racing request completion. Sorry.
678 */
679 /** @todo It would be nice if we could use the waiter callback to do further
680 * waiting in case of a completion race. If it wasn't for WINNT having its own
681 * version of all that stuff, I would've done it already. */
682 VMMDevHGCMCancel2 *pCancelReq;
683 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
684 if (RT_SUCCESS(rc2))
685 {
686 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
687 rc2 = VbglR0GRPerform(&pCancelReq->header);
688 VbglR0GRFree(&pCancelReq->header);
689 }
690#if 1 /** @todo ADDVER: Remove this on next minor version change. */
691 if (rc2 == VERR_NOT_IMPLEMENTED)
692 {
693 /* host is too old, or we're out of heap. */
694 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
695 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
696 rc2 = VbglR0GRPerform(&pHGCMCall->header.header);
697 if (rc2 == VERR_INVALID_PARAMETER)
698 rc2 = VERR_NOT_FOUND;
699 else if (RT_SUCCESS(rc))
700 RTThreadSleep(1);
701 }
702#endif
703 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
704 if (RT_SUCCESS(rc2))
705 {
706 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
707 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
708 }
709 else
710 {
711 /*
712 * Wait for a bit while the host (hopefully) completes it.
713 */
714 uint64_t u64Start = RTTimeSystemMilliTS();
715 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
716 uint64_t cElapsed = 0;
717 if (rc2 != VERR_NOT_FOUND)
718 {
719 static unsigned s_cErrors = 0;
720 if (s_cErrors++ < 32)
721 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
722 }
723 else
724 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
725
726 do
727 {
728 ASMCompilerBarrier(); /* paranoia */
729 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
730 break;
731 RTThreadSleep(1);
732 cElapsed = RTTimeSystemMilliTS() - u64Start;
733 } while (cElapsed < cMilliesToWait);
734
735 ASMCompilerBarrier(); /* paranoia^2 */
736 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
737 rc = VINF_SUCCESS;
738 else
739 {
740 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
741 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
742 *pfLeakIt = true;
743 }
744 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
745 }
746 }
747 }
748
749 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
750 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
751 return rc;
752}
753
754
755/**
756 * Copies the result of the call back to the caller info structure and user
757 * buffers (if using bounce buffers).
758 *
759 * @returns rc, unless RTR0MemUserCopyTo fails.
760 * @param pCallInfo Call info structure to update.
761 * @param pHGCMCall HGCM call request.
762 * @param pParmInfo Parameter locking/buffering info.
763 * @param fIsUser Is it a user (true) or kernel request.
764 * @param rc The current result code. Passed along to
765 * preserve informational status codes.
766 */
767static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, VMMDevHGCMCall const *pHGCMCall,
768 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
769{
770 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
771 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
772 uint32_t const cParms = pCallInfo->cParms;
773#ifdef USE_BOUNCE_BUFFERS
774 uint32_t iLockBuf = 0;
775#endif
776 uint32_t iParm;
777 RT_NOREF1(pParmInfo);
778#ifndef USE_BOUNCE_BUFFERS
779 RT_NOREF1(fIsUser);
780#endif
781
782 /*
783 * The call result.
784 */
785 pCallInfo->Hdr.rc = pHGCMCall->header.result;
786
787 /*
788 * Copy back parameters.
789 */
790 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
791 {
792 switch (pDstParm->type)
793 {
794 case VMMDevHGCMParmType_32bit:
795 case VMMDevHGCMParmType_64bit:
796 *pDstParm = *pSrcParm;
797 break;
798
799 case VMMDevHGCMParmType_PageList:
800 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
801 break;
802
803 case VMMDevHGCMParmType_LinAddr_Locked_In:
804 case VMMDevHGCMParmType_LinAddr_In:
805#ifdef USE_BOUNCE_BUFFERS
806 if ( fIsUser
807 && iLockBuf < pParmInfo->cLockBufs
808 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
809 iLockBuf++;
810#endif
811 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
812 break;
813
814 case VMMDevHGCMParmType_LinAddr_Locked_Out:
815 case VMMDevHGCMParmType_LinAddr_Locked:
816 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
817 {
818 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
819 break;
820 }
821 RT_FALL_THRU();
822
823 case VMMDevHGCMParmType_LinAddr_Out:
824 case VMMDevHGCMParmType_LinAddr:
825 {
826#ifdef USE_BOUNCE_BUFFERS
827 if (fIsUser)
828 {
829 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
830 if (cbOut)
831 {
832 int rc2;
833 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
834 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
835 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
836 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
837 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
838 cbOut);
839 if (RT_FAILURE(rc2))
840 return rc2;
841 iLockBuf++;
842 }
843 else if ( iLockBuf < pParmInfo->cLockBufs
844 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
845 iLockBuf++;
846 }
847#endif
848 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
849 break;
850 }
851
852 default:
853 AssertFailed();
854 rc = VERR_INTERNAL_ERROR_4;
855 break;
856 }
857 }
858
859#ifdef USE_BOUNCE_BUFFERS
860 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
861#endif
862 return rc;
863}
864
865
866DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
867 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
868{
869 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
870 struct VbglR0ParmInfo ParmInfo;
871 size_t cbExtra;
872 int rc;
873
874 /*
875 * Basic validation.
876 */
877 AssertMsgReturn( !pCallInfo
878 || !pfnAsyncCallback
879 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
880 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
881 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
882 VERR_INVALID_PARAMETER);
883 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
884 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
885 VERR_INVALID_PARAMETER);
886
887 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
888 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
889
890 /*
891 * Validate, lock and buffer the parameters for the call.
892 * This will calculate the amount of extra space for physical page list.
893 */
894 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
895 if (RT_SUCCESS(rc))
896 {
897 /*
898 * Allocate the request buffer and recreate the call request.
899 */
900 VMMDevHGCMCall *pHGCMCall;
901 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall,
902 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
903 VMMDevReq_HGCMCall);
904 if (RT_SUCCESS(rc))
905 {
906 bool fLeakIt;
907 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fRequestor, fIsUser, &ParmInfo);
908
909 /*
910 * Perform the call.
911 */
912 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
913 if (RT_SUCCESS(rc))
914 {
915 /*
916 * Copy back the result (parameters and buffers that changed).
917 */
918 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
919 }
920 else
921 {
922 if ( rc != VERR_INTERRUPTED
923 && rc != VERR_TIMEOUT)
924 {
925 static unsigned s_cErrors = 0;
926 if (s_cErrors++ < 32)
927 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
928 }
929 }
930
931 if (!fLeakIt)
932 VbglR0GRFree(&pHGCMCall->header.header);
933 }
934 }
935 else
936 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
937
938 /*
939 * Release locks and free bounce buffers.
940 */
941 if (ParmInfo.cLockBufs)
942 while (ParmInfo.cLockBufs-- > 0)
943 {
944 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
945#ifdef USE_BOUNCE_BUFFERS
946 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
947#endif
948 }
949
950 return rc;
951}
952
953
954#if ARCH_BITS == 64
955DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
956 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
957{
958 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
959 HGCMFunctionParameter *pParm64 = NULL;
960 HGCMFunctionParameter32 *pParm32 = NULL;
961 uint32_t cParms = 0;
962 uint32_t iParm = 0;
963 int rc = VINF_SUCCESS;
964
965 /*
966 * Input validation.
967 */
968 AssertMsgReturn( !pCallInfo
969 || !pfnAsyncCallback
970 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
971 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
972 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
973 VERR_INVALID_PARAMETER);
974 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
975 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
976 VERR_INVALID_PARAMETER);
977
978 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
979#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
980 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
981#endif
982
983 cParms = pCallInfo->cParms;
984 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
985
986 /*
987 * The simple approach, allocate a temporary request and convert the parameters.
988 */
989 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
990 if (!pCallInfo64)
991 return VERR_NO_TMP_MEMORY;
992
993 *pCallInfo64 = *pCallInfo;
994 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
995 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
996 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
997 {
998 switch (pParm32->type)
999 {
1000 case VMMDevHGCMParmType_32bit:
1001 pParm64->type = VMMDevHGCMParmType_32bit;
1002 pParm64->u.value32 = pParm32->u.value32;
1003 break;
1004
1005 case VMMDevHGCMParmType_64bit:
1006 pParm64->type = VMMDevHGCMParmType_64bit;
1007 pParm64->u.value64 = pParm32->u.value64;
1008 break;
1009
1010 case VMMDevHGCMParmType_LinAddr_Out:
1011 case VMMDevHGCMParmType_LinAddr:
1012 case VMMDevHGCMParmType_LinAddr_In:
1013 pParm64->type = pParm32->type;
1014 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1015 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1016 break;
1017
1018 default:
1019 rc = VERR_INVALID_PARAMETER;
1020 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1021 break;
1022 }
1023 if (RT_FAILURE(rc))
1024 break;
1025 }
1026 if (RT_SUCCESS(rc))
1027 {
1028 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1029 fRequestor, pfnAsyncCallback, pvAsyncData, u32AsyncData);
1030
1031 if (RT_SUCCESS(rc))
1032 {
1033 *pCallInfo = *pCallInfo64;
1034
1035 /*
1036 * Copy back.
1037 */
1038 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1039 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1040 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1041 {
1042 switch (pParm64->type)
1043 {
1044 case VMMDevHGCMParmType_32bit:
1045 pParm32->u.value32 = pParm64->u.value32;
1046 break;
1047
1048 case VMMDevHGCMParmType_64bit:
1049 pParm32->u.value64 = pParm64->u.value64;
1050 break;
1051
1052 case VMMDevHGCMParmType_LinAddr_Out:
1053 case VMMDevHGCMParmType_LinAddr:
1054 case VMMDevHGCMParmType_LinAddr_In:
1055 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1056 break;
1057
1058 default:
1059 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1060 rc = VERR_INTERNAL_ERROR_3;
1061 break;
1062 }
1063 }
1064 }
1065 else
1066 {
1067 static unsigned s_cErrors = 0;
1068 if (s_cErrors++ < 32)
1069 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1070 }
1071 }
1072 else
1073 {
1074 static unsigned s_cErrors = 0;
1075 if (s_cErrors++ < 32)
1076 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1077 }
1078
1079 RTMemTmpFree(pCallInfo64);
1080 return rc;
1081}
1082#endif /* ARCH_BITS == 64 */
1083
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette