VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibHGCMInternal.cpp@ 74526

Last change on this file since 74526 was 73097, checked in by vboxsync, 6 years ago

*: Made RT_UOFFSETOF, RT_OFFSETOF, RT_UOFFSETOF_ADD and RT_OFFSETOF_ADD work like builtin_offsetof() and require compile time resolvable requests, adding RT_UOFFSETOF_DYN for the dynamic questions that can only be answered at runtime.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.7 KB
Line 
1/* $Id: VBoxGuestR0LibHGCMInternal.cpp 73097 2018-07-12 21:06:33Z vboxsync $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2018 Oracle Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_HGCM
36
37#include "VBoxGuestR0LibInternal.h"
38#include <iprt/alloca.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/mem.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45#include <iprt/time.h>
46
47#ifndef VBGL_VBOXGUEST
48# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
49#endif
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55/** The max parameter buffer size for a user request. */
56#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
57/** The max parameter buffer size for a kernel request. */
58#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
59#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
60/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
61 * side effects.
62 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
63# define USE_BOUNCE_BUFFERS
64#endif
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70/**
71 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
72 */
73struct VbglR0ParmInfo
74{
75 uint32_t cLockBufs;
76 struct
77 {
78 uint32_t iParm;
79 RTR0MEMOBJ hObj;
80#ifdef USE_BOUNCE_BUFFERS
81 void *pvSmallBuf;
82#endif
83 } aLockBufs[10];
84};
85
86
87
88/* These functions can be only used by VBoxGuest. */
89
90DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, uint32_t fRequestor, HGCMCLIENTID *pidClient,
91 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
92{
93 int rc;
94 if ( RT_VALID_PTR(pLoc)
95 && RT_VALID_PTR(pidClient)
96 && RT_VALID_PTR(pfnAsyncCallback))
97 {
98 /* Allocate request */
99 VMMDevHGCMConnect *pHGCMConnect = NULL;
100 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
101 if (RT_SUCCESS(rc))
102 {
103 /* Initialize request memory */
104 pHGCMConnect->header.header.fRequestor = fRequestor;
105
106 pHGCMConnect->header.fu32Flags = 0;
107
108 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
109 pHGCMConnect->u32ClientID = 0;
110
111 /* Issue request */
112 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
113 if (RT_SUCCESS(rc))
114 {
115 /* Check if host decides to process the request asynchronously. */
116 if (rc == VINF_HGCM_ASYNC_EXECUTE)
117 {
118 /* Wait for request completion interrupt notification from host */
119 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
120 }
121
122 rc = pHGCMConnect->header.result;
123 if (RT_SUCCESS(rc))
124 *pidClient = pHGCMConnect->u32ClientID;
125 }
126 VbglR0GRFree(&pHGCMConnect->header.header);
127 }
128 }
129 else
130 rc = VERR_INVALID_PARAMETER;
131 return rc;
132}
133
134
135DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient, uint32_t fRequestor,
136 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
137{
138 int rc;
139 if ( idClient != 0
140 && pfnAsyncCallback)
141 {
142 /* Allocate request */
143 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
144 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
145 if (RT_SUCCESS(rc))
146 {
147 /* Initialize request memory */
148 pHGCMDisconnect->header.header.fRequestor = fRequestor;
149
150 pHGCMDisconnect->header.fu32Flags = 0;
151
152 pHGCMDisconnect->u32ClientID = idClient;
153
154 /* Issue request */
155 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
156 if (RT_SUCCESS(rc))
157 {
158 /* Check if host decides to process the request asynchronously. */
159 if (rc == VINF_HGCM_ASYNC_EXECUTE)
160 {
161 /* Wait for request completion interrupt notification from host */
162 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
163 }
164
165 rc = pHGCMDisconnect->header.result;
166 }
167
168 VbglR0GRFree(&pHGCMDisconnect->header.header);
169 }
170 }
171 else
172 rc = VERR_INVALID_PARAMETER;
173 return rc;
174}
175
176
177/**
178 * Preprocesses the HGCM call, validating and locking/buffering parameters.
179 *
180 * @returns VBox status code.
181 *
182 * @param pCallInfo The call info.
183 * @param cbCallInfo The size of the call info structure.
184 * @param fIsUser Is it a user request or kernel request.
185 * @param pcbExtra Where to return the extra request space needed for
186 * physical page lists.
187 */
188static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
189 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
190{
191 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
192 uint32_t const cParms = pCallInfo->cParms;
193 uint32_t iParm;
194 uint32_t cb;
195
196 /*
197 * Lock down the any linear buffers so we can get their addresses
198 * and figure out how much extra storage we need for page lists.
199 *
200 * Note! With kernel mode users we can be assertive. For user mode users
201 * we should just (debug) log it and fail without any fanfare.
202 */
203 *pcbExtra = 0;
204 pParmInfo->cLockBufs = 0;
205 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
206 {
207 switch (pSrcParm->type)
208 {
209 case VMMDevHGCMParmType_32bit:
210 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
211 break;
212
213 case VMMDevHGCMParmType_64bit:
214 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
215 break;
216
217 case VMMDevHGCMParmType_PageList:
218 if (fIsUser)
219 return VERR_INVALID_PARAMETER;
220 cb = pSrcParm->u.PageList.size;
221 if (cb)
222 {
223 uint32_t off = pSrcParm->u.PageList.offset;
224 HGCMPageListInfo *pPgLst;
225 uint32_t cPages;
226 uint32_t u32;
227
228 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
229 VERR_OUT_OF_RANGE);
230 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
231 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
232 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
233 VERR_INVALID_PARAMETER);
234
235 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
236 cPages = pPgLst->cPages;
237 u32 = RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]) + off;
238 AssertMsgReturn(u32 <= cbCallInfo,
239 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
240 VERR_INVALID_PARAMETER);
241 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
242 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
243 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
244 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
245 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
246 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
247 u32 = cPages;
248 while (u32-- > 0)
249 {
250 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
251 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
252 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
253 VERR_INVALID_PARAMETER);
254 }
255
256 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[pPgLst->cPages]);
257 }
258 else
259 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
260 break;
261
262 case VMMDevHGCMParmType_LinAddr_Locked_In:
263 case VMMDevHGCMParmType_LinAddr_Locked_Out:
264 case VMMDevHGCMParmType_LinAddr_Locked:
265 if (fIsUser)
266 return VERR_INVALID_PARAMETER;
267 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
268 {
269 cb = pSrcParm->u.Pointer.size;
270 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
271 VERR_OUT_OF_RANGE);
272 if (cb != 0)
273 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
274 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
275 else
276 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
277 break;
278 }
279 RT_FALL_THRU();
280
281 case VMMDevHGCMParmType_LinAddr_In:
282 case VMMDevHGCMParmType_LinAddr_Out:
283 case VMMDevHGCMParmType_LinAddr:
284 cb = pSrcParm->u.Pointer.size;
285 if (cb != 0)
286 {
287#ifdef USE_BOUNCE_BUFFERS
288 void *pvSmallBuf = NULL;
289#endif
290 uint32_t iLockBuf = pParmInfo->cLockBufs;
291 RTR0MEMOBJ hObj;
292 int rc;
293 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
294 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
295 ? RTMEM_PROT_READ
296 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
297
298 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
299 if (!fIsUser)
300 {
301 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
302 VERR_OUT_OF_RANGE);
303 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
304 if (RT_FAILURE(rc))
305 {
306 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
307 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
308 return rc;
309 }
310 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
311 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
312 }
313 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
314 {
315 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
316 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
317 cb, VBGLR0_MAX_HGCM_USER_PARM));
318 return VERR_OUT_OF_RANGE;
319 }
320 else
321 {
322#ifndef USE_BOUNCE_BUFFERS
323 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
324 if (RT_FAILURE(rc))
325 {
326 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
327 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
328 return rc;
329 }
330 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
331 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
332
333#else /* USE_BOUNCE_BUFFERS */
334 /*
335 * This is a bit massive, but we don't want to waste a
336 * whole page for a 3 byte string buffer (guest props).
337 *
338 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
339 * the system is using some power of two allocator.
340 */
341 /** @todo A more efficient strategy would be to combine buffers. However it
342 * is probably going to be more massive than the current code, so
343 * it can wait till later. */
344 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
345 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
346 if (cb <= PAGE_SIZE / 2 - 16)
347 {
348 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
349 if (RT_UNLIKELY(!pvSmallBuf))
350 return VERR_NO_MEMORY;
351 if (fCopyIn)
352 {
353 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
354 if (RT_FAILURE(rc))
355 {
356 RTMemTmpFree(pvSmallBuf);
357 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
358 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
359 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
360 return rc;
361 }
362 }
363 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
364 if (RT_FAILURE(rc))
365 {
366 RTMemTmpFree(pvSmallBuf);
367 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
368 rc, pvSmallBuf, cb));
369 return rc;
370 }
371 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
372 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
373 }
374 else
375 {
376 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
377 if (RT_FAILURE(rc))
378 return rc;
379 if (!fCopyIn)
380 memset(RTR0MemObjAddress(hObj), '\0', cb);
381 else
382 {
383 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
384 if (RT_FAILURE(rc))
385 {
386 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
387 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
388 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
389 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
390 return rc;
391 }
392 }
393 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
394 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
395 }
396#endif /* USE_BOUNCE_BUFFERS */
397 }
398
399 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
400 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
401#ifdef USE_BOUNCE_BUFFERS
402 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
403#endif
404 pParmInfo->cLockBufs = iLockBuf + 1;
405
406 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
407 {
408 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
409 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
410 }
411 }
412 else
413 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
414 break;
415
416 default:
417 return VERR_INVALID_PARAMETER;
418 }
419 }
420
421 return VINF_SUCCESS;
422}
423
424
425/**
426 * Translates locked linear address to the normal type.
427 * The locked types are only for the guest side and not handled by the host.
428 *
429 * @returns normal linear address type.
430 * @param enmType The type.
431 */
432static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
433{
434 switch (enmType)
435 {
436 case VMMDevHGCMParmType_LinAddr_Locked_In:
437 return VMMDevHGCMParmType_LinAddr_In;
438 case VMMDevHGCMParmType_LinAddr_Locked_Out:
439 return VMMDevHGCMParmType_LinAddr_Out;
440 case VMMDevHGCMParmType_LinAddr_Locked:
441 return VMMDevHGCMParmType_LinAddr;
442 default:
443 return enmType;
444 }
445}
446
447
448/**
449 * Translates linear address types to page list direction flags.
450 *
451 * @returns page list flags.
452 * @param enmType The type.
453 */
454static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
455{
456 switch (enmType)
457 {
458 case VMMDevHGCMParmType_LinAddr_In:
459 case VMMDevHGCMParmType_LinAddr_Locked_In:
460 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
461
462 case VMMDevHGCMParmType_LinAddr_Out:
463 case VMMDevHGCMParmType_LinAddr_Locked_Out:
464 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
465
466 default: AssertFailed();
467 case VMMDevHGCMParmType_LinAddr:
468 case VMMDevHGCMParmType_LinAddr_Locked:
469 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
470 }
471}
472
473
474/**
475 * Initializes the call request that we're sending to the host.
476 *
477 * @returns VBox status code.
478 *
479 * @param pCallInfo The call info.
480 * @param cbCallInfo The size of the call info structure.
481 * @param fRequestor VMMDEV_REQUESTOR_XXX.
482 * @param fIsUser Is it a user request or kernel request.
483 * @param pcbExtra Where to return the extra request space needed for
484 * physical page lists.
485 */
486static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
487 uint32_t cbCallInfo, uint32_t fRequestor, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
488{
489 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
490 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
491 uint32_t const cParms = pCallInfo->cParms;
492 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
493 uint32_t iLockBuf = 0;
494 uint32_t iParm;
495 RT_NOREF1(cbCallInfo);
496#ifndef USE_BOUNCE_BUFFERS
497 RT_NOREF1(fIsUser);
498#endif
499
500 /*
501 * The call request headers.
502 */
503 pHGCMCall->header.header.fRequestor = !fIsUser || (fRequestor & VMMDEV_REQUESTOR_USERMODE) ? fRequestor
504 : VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_USR_NOT_GIVEN
505 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN | VMMDEV_REQUESTOR_CON_DONT_KNOW;
506
507 pHGCMCall->header.fu32Flags = 0;
508 pHGCMCall->header.result = VINF_SUCCESS;
509
510 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
511 pHGCMCall->u32Function = pCallInfo->u32Function;
512 pHGCMCall->cParms = cParms;
513
514 /*
515 * The parameters.
516 */
517 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
518 {
519 switch (pSrcParm->type)
520 {
521 case VMMDevHGCMParmType_32bit:
522 case VMMDevHGCMParmType_64bit:
523 *pDstParm = *pSrcParm;
524 break;
525
526 case VMMDevHGCMParmType_PageList:
527 pDstParm->type = VMMDevHGCMParmType_PageList;
528 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
529 if (pSrcParm->u.PageList.size)
530 {
531 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
532 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
533 uint32_t const cPages = pSrcPgLst->cPages;
534 uint32_t iPage;
535
536 pDstParm->u.PageList.offset = offExtra;
537 pDstPgLst->flags = pSrcPgLst->flags;
538 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
539 pDstPgLst->cPages = cPages;
540 for (iPage = 0; iPage < cPages; iPage++)
541 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
542
543 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
544 }
545 else
546 pDstParm->u.PageList.offset = 0;
547 break;
548
549 case VMMDevHGCMParmType_LinAddr_Locked_In:
550 case VMMDevHGCMParmType_LinAddr_Locked_Out:
551 case VMMDevHGCMParmType_LinAddr_Locked:
552 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
553 {
554 *pDstParm = *pSrcParm;
555 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
556 break;
557 }
558 RT_FALL_THRU();
559
560 case VMMDevHGCMParmType_LinAddr_In:
561 case VMMDevHGCMParmType_LinAddr_Out:
562 case VMMDevHGCMParmType_LinAddr:
563 if (pSrcParm->u.Pointer.size != 0)
564 {
565#ifdef USE_BOUNCE_BUFFERS
566 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
567#endif
568 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
569 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
570
571 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
572 {
573 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
574 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
575 size_t iPage;
576
577 pDstParm->type = VMMDevHGCMParmType_PageList;
578 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
579 pDstParm->u.PageList.offset = offExtra;
580 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
581#ifdef USE_BOUNCE_BUFFERS
582 if (fIsUser)
583 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
584 else
585#endif
586 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
587 pDstPgLst->cPages = (uint32_t)cPages; Assert(pDstPgLst->cPages == cPages);
588 for (iPage = 0; iPage < cPages; iPage++)
589 {
590 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
591 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
592 }
593
594 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
595 }
596 else
597 {
598 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
599 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
600#ifdef USE_BOUNCE_BUFFERS
601 if (fIsUser)
602 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
603 ? (uintptr_t)pvSmallBuf
604 : (uintptr_t)RTR0MemObjAddress(hObj);
605 else
606#endif
607 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
608 }
609 iLockBuf++;
610 }
611 else
612 {
613 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
614 pDstParm->u.Pointer.size = 0;
615 pDstParm->u.Pointer.u.linearAddr = 0;
616 }
617 break;
618
619 default:
620 AssertFailed();
621 pDstParm->type = VMMDevHGCMParmType_Invalid;
622 break;
623 }
624 }
625}
626
627
628/**
629 * Performs the call and completion wait.
630 *
631 * @returns VBox status code of this operation, not necessarily the call.
632 *
633 * @param pHGCMCall The HGCM call info.
634 * @param pfnAsyncCallback The async callback that will wait for the call
635 * to complete.
636 * @param pvAsyncData Argument for the callback.
637 * @param u32AsyncData Argument for the callback.
638 * @param pfLeakIt Where to return the leak it / free it,
639 * indicator. Cancellation fun.
640 */
641static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
642 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
643{
644 int rc;
645
646 Log(("calling VbglR0GRPerform\n"));
647 rc = VbglR0GRPerform(&pHGCMCall->header.header);
648 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
649
650 /*
651 * If the call failed, but as a result of the request itself, then pretend
652 * success. Upper layers will interpret the result code in the packet.
653 */
654 if ( RT_FAILURE(rc)
655 && rc == pHGCMCall->header.result)
656 {
657 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
658 rc = VINF_SUCCESS;
659 }
660
661 /*
662 * Check if host decides to process the request asynchronously,
663 * if so, we wait for it to complete using the caller supplied callback.
664 */
665 *pfLeakIt = false;
666 if (rc == VINF_HGCM_ASYNC_EXECUTE)
667 {
668 Log(("Processing HGCM call asynchronously\n"));
669 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
670 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
671 {
672 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
673 rc = VINF_SUCCESS;
674 }
675 else
676 {
677 /*
678 * The request didn't complete in time or the call was interrupted,
679 * the RC from the callback indicates which. Try cancel the request.
680 *
681 * This is a bit messy because we're racing request completion. Sorry.
682 */
683 /** @todo It would be nice if we could use the waiter callback to do further
684 * waiting in case of a completion race. If it wasn't for WINNT having its own
685 * version of all that stuff, I would've done it already. */
686 VMMDevHGCMCancel2 *pCancelReq;
687 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
688 if (RT_SUCCESS(rc2))
689 {
690 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
691 rc2 = VbglR0GRPerform(&pCancelReq->header);
692 VbglR0GRFree(&pCancelReq->header);
693 }
694#if 1 /** @todo ADDVER: Remove this on next minor version change. */
695 if (rc2 == VERR_NOT_IMPLEMENTED)
696 {
697 /* host is too old, or we're out of heap. */
698 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
699 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
700 rc2 = VbglR0GRPerform(&pHGCMCall->header.header);
701 if (rc2 == VERR_INVALID_PARAMETER)
702 rc2 = VERR_NOT_FOUND;
703 else if (RT_SUCCESS(rc))
704 RTThreadSleep(1);
705 }
706#endif
707 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
708 if (RT_SUCCESS(rc2))
709 {
710 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
711 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
712 }
713 else
714 {
715 /*
716 * Wait for a bit while the host (hopefully) completes it.
717 */
718 uint64_t u64Start = RTTimeSystemMilliTS();
719 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
720 uint64_t cElapsed = 0;
721 if (rc2 != VERR_NOT_FOUND)
722 {
723 static unsigned s_cErrors = 0;
724 if (s_cErrors++ < 32)
725 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
726 }
727 else
728 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
729
730 do
731 {
732 ASMCompilerBarrier(); /* paranoia */
733 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
734 break;
735 RTThreadSleep(1);
736 cElapsed = RTTimeSystemMilliTS() - u64Start;
737 } while (cElapsed < cMilliesToWait);
738
739 ASMCompilerBarrier(); /* paranoia^2 */
740 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
741 rc = VINF_SUCCESS;
742 else
743 {
744 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
745 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
746 *pfLeakIt = true;
747 }
748 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
749 }
750 }
751 }
752
753 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
754 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
755 return rc;
756}
757
758
759/**
760 * Copies the result of the call back to the caller info structure and user
761 * buffers (if using bounce buffers).
762 *
763 * @returns rc, unless RTR0MemUserCopyTo fails.
764 * @param pCallInfo Call info structure to update.
765 * @param pHGCMCall HGCM call request.
766 * @param pParmInfo Parameter locking/buffering info.
767 * @param fIsUser Is it a user (true) or kernel request.
768 * @param rc The current result code. Passed along to
769 * preserve informational status codes.
770 */
771static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, VMMDevHGCMCall const *pHGCMCall,
772 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
773{
774 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
775 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
776 uint32_t const cParms = pCallInfo->cParms;
777#ifdef USE_BOUNCE_BUFFERS
778 uint32_t iLockBuf = 0;
779#endif
780 uint32_t iParm;
781 RT_NOREF1(pParmInfo);
782#ifndef USE_BOUNCE_BUFFERS
783 RT_NOREF1(fIsUser);
784#endif
785
786 /*
787 * The call result.
788 */
789 pCallInfo->Hdr.rc = pHGCMCall->header.result;
790
791 /*
792 * Copy back parameters.
793 */
794 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
795 {
796 switch (pDstParm->type)
797 {
798 case VMMDevHGCMParmType_32bit:
799 case VMMDevHGCMParmType_64bit:
800 *pDstParm = *pSrcParm;
801 break;
802
803 case VMMDevHGCMParmType_PageList:
804 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
805 break;
806
807 case VMMDevHGCMParmType_LinAddr_Locked_In:
808 case VMMDevHGCMParmType_LinAddr_In:
809#ifdef USE_BOUNCE_BUFFERS
810 if ( fIsUser
811 && iLockBuf < pParmInfo->cLockBufs
812 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
813 iLockBuf++;
814#endif
815 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
816 break;
817
818 case VMMDevHGCMParmType_LinAddr_Locked_Out:
819 case VMMDevHGCMParmType_LinAddr_Locked:
820 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
821 {
822 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
823 break;
824 }
825 RT_FALL_THRU();
826
827 case VMMDevHGCMParmType_LinAddr_Out:
828 case VMMDevHGCMParmType_LinAddr:
829 {
830#ifdef USE_BOUNCE_BUFFERS
831 if (fIsUser)
832 {
833 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
834 if (cbOut)
835 {
836 int rc2;
837 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
838 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
839 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
840 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
841 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
842 cbOut);
843 if (RT_FAILURE(rc2))
844 return rc2;
845 iLockBuf++;
846 }
847 else if ( iLockBuf < pParmInfo->cLockBufs
848 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
849 iLockBuf++;
850 }
851#endif
852 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
853 break;
854 }
855
856 default:
857 AssertFailed();
858 rc = VERR_INTERNAL_ERROR_4;
859 break;
860 }
861 }
862
863#ifdef USE_BOUNCE_BUFFERS
864 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
865#endif
866 return rc;
867}
868
869
870DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
871 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
872{
873 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
874 struct VbglR0ParmInfo ParmInfo;
875 size_t cbExtra;
876 int rc;
877
878 /*
879 * Basic validation.
880 */
881 AssertMsgReturn( !pCallInfo
882 || !pfnAsyncCallback
883 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
884 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
885 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
886 VERR_INVALID_PARAMETER);
887 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
888 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
889 VERR_INVALID_PARAMETER);
890
891 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
892 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
893
894 /*
895 * Validate, lock and buffer the parameters for the call.
896 * This will calculate the amount of extra space for physical page list.
897 */
898 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
899 if (RT_SUCCESS(rc))
900 {
901 /*
902 * Allocate the request buffer and recreate the call request.
903 */
904 VMMDevHGCMCall *pHGCMCall;
905 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall,
906 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
907 VMMDevReq_HGCMCall);
908 if (RT_SUCCESS(rc))
909 {
910 bool fLeakIt;
911 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fRequestor, fIsUser, &ParmInfo);
912
913 /*
914 * Perform the call.
915 */
916 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
917 if (RT_SUCCESS(rc))
918 {
919 /*
920 * Copy back the result (parameters and buffers that changed).
921 */
922 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
923 }
924 else
925 {
926 if ( rc != VERR_INTERRUPTED
927 && rc != VERR_TIMEOUT)
928 {
929 static unsigned s_cErrors = 0;
930 if (s_cErrors++ < 32)
931 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
932 }
933 }
934
935 if (!fLeakIt)
936 VbglR0GRFree(&pHGCMCall->header.header);
937 }
938 }
939 else
940 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
941
942 /*
943 * Release locks and free bounce buffers.
944 */
945 if (ParmInfo.cLockBufs)
946 while (ParmInfo.cLockBufs-- > 0)
947 {
948 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
949#ifdef USE_BOUNCE_BUFFERS
950 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
951#endif
952 }
953
954 return rc;
955}
956
957
958#if ARCH_BITS == 64
959DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
960 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
961{
962 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
963 HGCMFunctionParameter *pParm64 = NULL;
964 HGCMFunctionParameter32 *pParm32 = NULL;
965 uint32_t cParms = 0;
966 uint32_t iParm = 0;
967 int rc = VINF_SUCCESS;
968
969 /*
970 * Input validation.
971 */
972 AssertMsgReturn( !pCallInfo
973 || !pfnAsyncCallback
974 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
975 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
976 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
977 VERR_INVALID_PARAMETER);
978 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
979 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
980 VERR_INVALID_PARAMETER);
981
982 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
983#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
984 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
985#endif
986
987 cParms = pCallInfo->cParms;
988 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
989
990 /*
991 * The simple approach, allocate a temporary request and convert the parameters.
992 */
993 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
994 if (!pCallInfo64)
995 return VERR_NO_TMP_MEMORY;
996
997 *pCallInfo64 = *pCallInfo;
998 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
999 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1000 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1001 {
1002 switch (pParm32->type)
1003 {
1004 case VMMDevHGCMParmType_32bit:
1005 pParm64->type = VMMDevHGCMParmType_32bit;
1006 pParm64->u.value32 = pParm32->u.value32;
1007 break;
1008
1009 case VMMDevHGCMParmType_64bit:
1010 pParm64->type = VMMDevHGCMParmType_64bit;
1011 pParm64->u.value64 = pParm32->u.value64;
1012 break;
1013
1014 case VMMDevHGCMParmType_LinAddr_Out:
1015 case VMMDevHGCMParmType_LinAddr:
1016 case VMMDevHGCMParmType_LinAddr_In:
1017 pParm64->type = pParm32->type;
1018 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1019 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1020 break;
1021
1022 default:
1023 rc = VERR_INVALID_PARAMETER;
1024 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1025 break;
1026 }
1027 if (RT_FAILURE(rc))
1028 break;
1029 }
1030 if (RT_SUCCESS(rc))
1031 {
1032 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1033 fRequestor, pfnAsyncCallback, pvAsyncData, u32AsyncData);
1034
1035 if (RT_SUCCESS(rc))
1036 {
1037 *pCallInfo = *pCallInfo64;
1038
1039 /*
1040 * Copy back.
1041 */
1042 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1043 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1044 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1045 {
1046 switch (pParm64->type)
1047 {
1048 case VMMDevHGCMParmType_32bit:
1049 pParm32->u.value32 = pParm64->u.value32;
1050 break;
1051
1052 case VMMDevHGCMParmType_64bit:
1053 pParm32->u.value64 = pParm64->u.value64;
1054 break;
1055
1056 case VMMDevHGCMParmType_LinAddr_Out:
1057 case VMMDevHGCMParmType_LinAddr:
1058 case VMMDevHGCMParmType_LinAddr_In:
1059 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1060 break;
1061
1062 default:
1063 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1064 rc = VERR_INTERNAL_ERROR_3;
1065 break;
1066 }
1067 }
1068 }
1069 else
1070 {
1071 static unsigned s_cErrors = 0;
1072 if (s_cErrors++ < 32)
1073 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1074 }
1075 }
1076 else
1077 {
1078 static unsigned s_cErrors = 0;
1079 if (s_cErrors++ < 32)
1080 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1081 }
1082
1083 RTMemTmpFree(pCallInfo64);
1084 return rc;
1085}
1086#endif /* ARCH_BITS == 64 */
1087
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette