VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibHGCMInternal.cpp@ 75548

Last change on this file since 75548 was 75548, checked in by vboxsync, 6 years ago

VMMDev,VBoxGuestR0Lib: Added a new HGCM parameter type that allows embedding small buffers into the request (similar to the page list info structure). Currently only available to kernel code (i.e. shared folders). bugref:9172

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 49.6 KB
Line 
1/* $Id: VBoxGuestR0LibHGCMInternal.cpp 75548 2018-11-18 04:52:14Z vboxsync $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2018 Oracle Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_HGCM
36
37#include "VBoxGuestR0LibInternal.h"
38#include <iprt/asm.h>
39#include <iprt/assert.h>
40#include <iprt/mem.h>
41#include <iprt/memobj.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46#ifndef VBGL_VBOXGUEST
47# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
48#endif
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54/** The max parameter buffer size for a user request. */
55#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
56/** The max parameter buffer size for a kernel request. */
57#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
58/** The max embedded buffer size. */
59#define VBGLR0_MAX_HGCM_EMBEDDED_BUFFER _64K
60
61#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
62/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
63 * side effects.
64 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
65# define USE_BOUNCE_BUFFERS
66#endif
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72/**
73 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
74 */
75struct VbglR0ParmInfo
76{
77 uint32_t cLockBufs;
78 struct
79 {
80 uint32_t iParm;
81 RTR0MEMOBJ hObj;
82#ifdef USE_BOUNCE_BUFFERS
83 void *pvSmallBuf;
84#endif
85 } aLockBufs[10];
86};
87
88
89
90/* These functions can be only used by VBoxGuest. */
91
92DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, uint32_t fRequestor, HGCMCLIENTID *pidClient,
93 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
94{
95 int rc;
96 if ( RT_VALID_PTR(pLoc)
97 && RT_VALID_PTR(pidClient)
98 && RT_VALID_PTR(pfnAsyncCallback))
99 {
100 /* Allocate request */
101 VMMDevHGCMConnect *pHGCMConnect = NULL;
102 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
103 if (RT_SUCCESS(rc))
104 {
105 /* Initialize request memory */
106 pHGCMConnect->header.header.fRequestor = fRequestor;
107
108 pHGCMConnect->header.fu32Flags = 0;
109
110 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
111 pHGCMConnect->u32ClientID = 0;
112
113 /* Issue request */
114 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
115 if (RT_SUCCESS(rc))
116 {
117 /* Check if host decides to process the request asynchronously. */
118 if (rc == VINF_HGCM_ASYNC_EXECUTE)
119 {
120 /* Wait for request completion interrupt notification from host */
121 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
122 }
123
124 rc = pHGCMConnect->header.result;
125 if (RT_SUCCESS(rc))
126 *pidClient = pHGCMConnect->u32ClientID;
127 }
128 VbglR0GRFree(&pHGCMConnect->header.header);
129 }
130 }
131 else
132 rc = VERR_INVALID_PARAMETER;
133 return rc;
134}
135
136
137DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient, uint32_t fRequestor,
138 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
139{
140 int rc;
141 if ( idClient != 0
142 && pfnAsyncCallback)
143 {
144 /* Allocate request */
145 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
146 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
147 if (RT_SUCCESS(rc))
148 {
149 /* Initialize request memory */
150 pHGCMDisconnect->header.header.fRequestor = fRequestor;
151
152 pHGCMDisconnect->header.fu32Flags = 0;
153
154 pHGCMDisconnect->u32ClientID = idClient;
155
156 /* Issue request */
157 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
158 if (RT_SUCCESS(rc))
159 {
160 /* Check if host decides to process the request asynchronously. */
161 if (rc == VINF_HGCM_ASYNC_EXECUTE)
162 {
163 /* Wait for request completion interrupt notification from host */
164 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
165 }
166
167 rc = pHGCMDisconnect->header.result;
168 }
169
170 VbglR0GRFree(&pHGCMDisconnect->header.header);
171 }
172 }
173 else
174 rc = VERR_INVALID_PARAMETER;
175 return rc;
176}
177
178
179/**
180 * Preprocesses the HGCM call, validating and locking/buffering parameters.
181 *
182 * @returns VBox status code.
183 *
184 * @param pCallInfo The call info.
185 * @param cbCallInfo The size of the call info structure.
186 * @param fIsUser Is it a user request or kernel request.
187 * @param pcbExtra Where to return the extra request space needed for
188 * physical page lists.
189 */
190static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
191 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
192{
193 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
194 uint32_t const cParms = pCallInfo->cParms;
195 uint32_t iParm;
196 uint32_t cb;
197
198 /*
199 * Lock down the any linear buffers so we can get their addresses
200 * and figure out how much extra storage we need for page lists.
201 *
202 * Note! With kernel mode users we can be assertive. For user mode users
203 * we should just (debug) log it and fail without any fanfare.
204 */
205 *pcbExtra = 0;
206 pParmInfo->cLockBufs = 0;
207 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
208 {
209 switch (pSrcParm->type)
210 {
211 case VMMDevHGCMParmType_32bit:
212 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
213 break;
214
215 case VMMDevHGCMParmType_64bit:
216 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
217 break;
218
219 case VMMDevHGCMParmType_PageList:
220 if (fIsUser)
221 return VERR_INVALID_PARAMETER;
222 cb = pSrcParm->u.PageList.size;
223 if (cb)
224 {
225 uint32_t off = pSrcParm->u.PageList.offset;
226 HGCMPageListInfo *pPgLst;
227 uint32_t cPages;
228 uint32_t u32;
229
230 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
231 VERR_OUT_OF_RANGE);
232 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
233 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
234 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
235 VERR_INVALID_PARAMETER);
236
237 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
238 cPages = pPgLst->cPages;
239 u32 = RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]) + off;
240 AssertMsgReturn(u32 <= cbCallInfo,
241 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
242 VERR_INVALID_PARAMETER);
243 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
244 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
245 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
246 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
247 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
248 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
249 u32 = cPages;
250 while (u32-- > 0)
251 {
252 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
253 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
254 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
255 VERR_INVALID_PARAMETER);
256 }
257
258 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[pPgLst->cPages]);
259 }
260 else
261 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
262 break;
263
264 case VMMDevHGCMParmType_Embedded:
265 if (fIsUser) /// @todo relax this.
266 return VERR_INVALID_PARAMETER;
267 cb = pSrcParm->u.Embedded.cbData;
268 if (cb)
269 {
270 uint32_t off = pSrcParm->u.Embedded.offData;
271 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_EMBEDDED_BUFFER, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_EMBEDDED_BUFFER),
272 VERR_INVALID_PARAMETER);
273 AssertMsgReturn(cb <= cbCallInfo - cParms * sizeof(HGCMFunctionParameter),
274 ("cb=%#x cParms=%#x cbCallInfo=%3x\n", cb, cParms, cbCallInfo),
275 VERR_INVALID_PARAMETER);
276 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
277 && off <= cbCallInfo - cb,
278 ("offData=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
279 VERR_INVALID_PARAMETER);
280 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pSrcParm->u.Embedded.fFlags),
281 ("%#x\n", pSrcParm->u.Embedded.fFlags), VERR_INVALID_PARAMETER);
282
283 *pcbExtra += RT_ALIGN_32(cb, 8);
284 }
285 else
286 Log4(("GstHGCMCall: parm=%u type=embed: cb=0\n", iParm));
287 break;
288
289
290 case VMMDevHGCMParmType_LinAddr_Locked_In:
291 case VMMDevHGCMParmType_LinAddr_Locked_Out:
292 case VMMDevHGCMParmType_LinAddr_Locked:
293 if (fIsUser)
294 return VERR_INVALID_PARAMETER;
295 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
296 {
297 cb = pSrcParm->u.Pointer.size;
298 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
299 VERR_OUT_OF_RANGE);
300 if (cb != 0)
301 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
302 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
303 else
304 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
305 break;
306 }
307 RT_FALL_THRU();
308
309 case VMMDevHGCMParmType_LinAddr_In:
310 case VMMDevHGCMParmType_LinAddr_Out:
311 case VMMDevHGCMParmType_LinAddr:
312 cb = pSrcParm->u.Pointer.size;
313 if (cb != 0)
314 {
315#ifdef USE_BOUNCE_BUFFERS
316 void *pvSmallBuf = NULL;
317#endif
318 uint32_t iLockBuf = pParmInfo->cLockBufs;
319 RTR0MEMOBJ hObj;
320 int rc;
321 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
322 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
323 ? RTMEM_PROT_READ
324 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
325
326 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
327 if (!fIsUser)
328 {
329 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
330 VERR_OUT_OF_RANGE);
331 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
332 if (RT_FAILURE(rc))
333 {
334 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
335 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
336 return rc;
337 }
338 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
339 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
340 }
341 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
342 {
343 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
344 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
345 cb, VBGLR0_MAX_HGCM_USER_PARM));
346 return VERR_OUT_OF_RANGE;
347 }
348 else
349 {
350#ifndef USE_BOUNCE_BUFFERS
351 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
352 if (RT_FAILURE(rc))
353 {
354 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
355 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
356 return rc;
357 }
358 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
359 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
360
361#else /* USE_BOUNCE_BUFFERS */
362 /*
363 * This is a bit massive, but we don't want to waste a
364 * whole page for a 3 byte string buffer (guest props).
365 *
366 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
367 * the system is using some power of two allocator.
368 */
369 /** @todo A more efficient strategy would be to combine buffers. However it
370 * is probably going to be more massive than the current code, so
371 * it can wait till later. */
372 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
373 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
374 if (cb <= PAGE_SIZE / 2 - 16)
375 {
376 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
377 if (RT_UNLIKELY(!pvSmallBuf))
378 return VERR_NO_MEMORY;
379 if (fCopyIn)
380 {
381 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
382 if (RT_FAILURE(rc))
383 {
384 RTMemTmpFree(pvSmallBuf);
385 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
386 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
387 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
388 return rc;
389 }
390 }
391 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
392 if (RT_FAILURE(rc))
393 {
394 RTMemTmpFree(pvSmallBuf);
395 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
396 rc, pvSmallBuf, cb));
397 return rc;
398 }
399 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
400 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
401 }
402 else
403 {
404 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
405 if (RT_FAILURE(rc))
406 return rc;
407 if (!fCopyIn)
408 memset(RTR0MemObjAddress(hObj), '\0', cb);
409 else
410 {
411 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
412 if (RT_FAILURE(rc))
413 {
414 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
415 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
416 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
417 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
418 return rc;
419 }
420 }
421 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
422 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
423 }
424#endif /* USE_BOUNCE_BUFFERS */
425 }
426
427 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
428 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
429#ifdef USE_BOUNCE_BUFFERS
430 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
431#endif
432 pParmInfo->cLockBufs = iLockBuf + 1;
433
434 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
435 {
436 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
437 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
438 }
439 }
440 else
441 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
442 break;
443
444 default:
445 return VERR_INVALID_PARAMETER;
446 }
447 }
448
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Translates locked linear address to the normal type.
455 * The locked types are only for the guest side and not handled by the host.
456 *
457 * @returns normal linear address type.
458 * @param enmType The type.
459 */
460static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
461{
462 switch (enmType)
463 {
464 case VMMDevHGCMParmType_LinAddr_Locked_In:
465 return VMMDevHGCMParmType_LinAddr_In;
466 case VMMDevHGCMParmType_LinAddr_Locked_Out:
467 return VMMDevHGCMParmType_LinAddr_Out;
468 case VMMDevHGCMParmType_LinAddr_Locked:
469 return VMMDevHGCMParmType_LinAddr;
470 default:
471 return enmType;
472 }
473}
474
475
476/**
477 * Translates linear address types to page list direction flags.
478 *
479 * @returns page list flags.
480 * @param enmType The type.
481 */
482static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
483{
484 switch (enmType)
485 {
486 case VMMDevHGCMParmType_LinAddr_In:
487 case VMMDevHGCMParmType_LinAddr_Locked_In:
488 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
489
490 case VMMDevHGCMParmType_LinAddr_Out:
491 case VMMDevHGCMParmType_LinAddr_Locked_Out:
492 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
493
494 default: AssertFailed();
495 case VMMDevHGCMParmType_LinAddr:
496 case VMMDevHGCMParmType_LinAddr_Locked:
497 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
498 }
499}
500
501
502/**
503 * Initializes the call request that we're sending to the host.
504 *
505 * @returns VBox status code.
506 *
507 * @param pCallInfo The call info.
508 * @param cbCallInfo The size of the call info structure.
509 * @param fRequestor VMMDEV_REQUESTOR_XXX.
510 * @param fIsUser Is it a user request or kernel request.
511 * @param pcbExtra Where to return the extra request space needed for
512 * physical page lists.
513 */
514static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
515 uint32_t cbCallInfo, uint32_t fRequestor, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
516{
517 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
518 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
519 uint32_t const cParms = pCallInfo->cParms;
520 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
521 uint32_t iLockBuf = 0;
522 uint32_t iParm;
523 RT_NOREF1(cbCallInfo);
524#ifndef USE_BOUNCE_BUFFERS
525 RT_NOREF1(fIsUser);
526#endif
527
528 /*
529 * The call request headers.
530 */
531 pHGCMCall->header.header.fRequestor = !fIsUser || (fRequestor & VMMDEV_REQUESTOR_USERMODE) ? fRequestor
532 : VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_USR_NOT_GIVEN
533 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN | VMMDEV_REQUESTOR_CON_DONT_KNOW;
534
535 pHGCMCall->header.fu32Flags = 0;
536 pHGCMCall->header.result = VINF_SUCCESS;
537
538 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
539 pHGCMCall->u32Function = pCallInfo->u32Function;
540 pHGCMCall->cParms = cParms;
541
542 /*
543 * The parameters.
544 */
545 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
546 {
547 switch (pSrcParm->type)
548 {
549 case VMMDevHGCMParmType_32bit:
550 case VMMDevHGCMParmType_64bit:
551 *pDstParm = *pSrcParm;
552 break;
553
554 case VMMDevHGCMParmType_PageList:
555 pDstParm->type = VMMDevHGCMParmType_PageList;
556 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
557 if (pSrcParm->u.PageList.size)
558 {
559 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
560 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
561 uint32_t const cPages = pSrcPgLst->cPages;
562 uint32_t iPage;
563
564 pDstParm->u.PageList.offset = offExtra;
565 pDstPgLst->flags = pSrcPgLst->flags;
566 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
567 pDstPgLst->cPages = (uint16_t)cPages;
568 for (iPage = 0; iPage < cPages; iPage++)
569 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
570
571 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
572 }
573 else
574 pDstParm->u.PageList.offset = 0; /** @todo will fail on the host side now */
575 break;
576
577 case VMMDevHGCMParmType_Embedded:
578 {
579 uint32_t const cb = pSrcParm->u.Embedded.cbData;
580 pDstParm->type = VMMDevHGCMParmType_Embedded;
581 pDstParm->u.Embedded.cbData = cb;
582 pDstParm->u.Embedded.offData = offExtra;
583 if (cb > 0)
584 {
585 uint8_t *pbDst = (uint8_t *)pHGCMCall + offExtra;
586 if (pSrcParm->u.Embedded.fFlags & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
587 {
588 memcpy(pbDst, (uint8_t const *)pCallInfo + pSrcParm->u.Embedded.offData, cb);
589 if (RT_ALIGN(cb, 8) != cb)
590 memset(pbDst + cb, 0, RT_ALIGN(cb, 8) - cb);
591 }
592 else
593 RT_BZERO(pbDst, RT_ALIGN(cb, 8));
594 offExtra += RT_ALIGN(cb, 8);
595 }
596 break;
597 }
598
599 case VMMDevHGCMParmType_LinAddr_Locked_In:
600 case VMMDevHGCMParmType_LinAddr_Locked_Out:
601 case VMMDevHGCMParmType_LinAddr_Locked:
602 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
603 {
604 *pDstParm = *pSrcParm;
605 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
606 break;
607 }
608 RT_FALL_THRU();
609
610 case VMMDevHGCMParmType_LinAddr_In:
611 case VMMDevHGCMParmType_LinAddr_Out:
612 case VMMDevHGCMParmType_LinAddr:
613 if (pSrcParm->u.Pointer.size != 0)
614 {
615#ifdef USE_BOUNCE_BUFFERS
616 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
617#endif
618 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
619 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
620
621 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
622 {
623 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
624 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
625 size_t iPage;
626
627 pDstParm->type = VMMDevHGCMParmType_PageList;
628 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
629 pDstParm->u.PageList.offset = offExtra;
630 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
631#ifdef USE_BOUNCE_BUFFERS
632 if (fIsUser)
633 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
634 else
635#endif
636 pDstPgLst->offFirstPage = (uint16_t)(pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK);
637 pDstPgLst->cPages = (uint16_t)cPages; Assert(pDstPgLst->cPages == cPages);
638 for (iPage = 0; iPage < cPages; iPage++)
639 {
640 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
641 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
642 }
643
644 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
645 }
646 else
647 {
648 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
649 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
650#ifdef USE_BOUNCE_BUFFERS
651 if (fIsUser)
652 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
653 ? (uintptr_t)pvSmallBuf
654 : (uintptr_t)RTR0MemObjAddress(hObj);
655 else
656#endif
657 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
658 }
659 iLockBuf++;
660 }
661 else
662 {
663 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
664 pDstParm->u.Pointer.size = 0;
665 pDstParm->u.Pointer.u.linearAddr = 0;
666 }
667 break;
668
669 default:
670 AssertFailed();
671 pDstParm->type = VMMDevHGCMParmType_Invalid;
672 break;
673 }
674 }
675}
676
677
678/**
679 * Performs the call and completion wait.
680 *
681 * @returns VBox status code of this operation, not necessarily the call.
682 *
683 * @param pHGCMCall The HGCM call info.
684 * @param pfnAsyncCallback The async callback that will wait for the call
685 * to complete.
686 * @param pvAsyncData Argument for the callback.
687 * @param u32AsyncData Argument for the callback.
688 * @param pfLeakIt Where to return the leak it / free it,
689 * indicator. Cancellation fun.
690 */
691static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
692 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
693{
694 int rc;
695
696 Log(("calling VbglR0GRPerform\n"));
697 rc = VbglR0GRPerform(&pHGCMCall->header.header);
698 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
699
700 /*
701 * If the call failed, but as a result of the request itself, then pretend
702 * success. Upper layers will interpret the result code in the packet.
703 */
704 if ( RT_FAILURE(rc)
705 && rc == pHGCMCall->header.result)
706 {
707 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
708 rc = VINF_SUCCESS;
709 }
710
711 /*
712 * Check if host decides to process the request asynchronously,
713 * if so, we wait for it to complete using the caller supplied callback.
714 */
715 *pfLeakIt = false;
716 if (rc == VINF_HGCM_ASYNC_EXECUTE)
717 {
718 Log(("Processing HGCM call asynchronously\n"));
719 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
720 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
721 {
722 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
723 rc = VINF_SUCCESS;
724 }
725 else
726 {
727 /*
728 * The request didn't complete in time or the call was interrupted,
729 * the RC from the callback indicates which. Try cancel the request.
730 *
731 * This is a bit messy because we're racing request completion. Sorry.
732 */
733 /** @todo It would be nice if we could use the waiter callback to do further
734 * waiting in case of a completion race. If it wasn't for WINNT having its own
735 * version of all that stuff, I would've done it already. */
736 VMMDevHGCMCancel2 *pCancelReq;
737 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
738 if (RT_SUCCESS(rc2))
739 {
740 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
741 rc2 = VbglR0GRPerform(&pCancelReq->header);
742 VbglR0GRFree(&pCancelReq->header);
743 }
744#if 1 /** @todo ADDVER: Remove this on next minor version change. */
745 if (rc2 == VERR_NOT_IMPLEMENTED)
746 {
747 /* host is too old, or we're out of heap. */
748 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
749 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
750 rc2 = VbglR0GRPerform(&pHGCMCall->header.header);
751 if (rc2 == VERR_INVALID_PARAMETER)
752 rc2 = VERR_NOT_FOUND;
753 else if (RT_SUCCESS(rc))
754 RTThreadSleep(1);
755 }
756#endif
757 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
758 if (RT_SUCCESS(rc2))
759 {
760 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
761 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
762 }
763 else
764 {
765 /*
766 * Wait for a bit while the host (hopefully) completes it.
767 */
768 uint64_t u64Start = RTTimeSystemMilliTS();
769 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
770 uint64_t cElapsed = 0;
771 if (rc2 != VERR_NOT_FOUND)
772 {
773 static unsigned s_cErrors = 0;
774 if (s_cErrors++ < 32)
775 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
776 }
777 else
778 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
779
780 do
781 {
782 ASMCompilerBarrier(); /* paranoia */
783 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
784 break;
785 RTThreadSleep(1);
786 cElapsed = RTTimeSystemMilliTS() - u64Start;
787 } while (cElapsed < cMilliesToWait);
788
789 ASMCompilerBarrier(); /* paranoia^2 */
790 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
791 rc = VINF_SUCCESS;
792 else
793 {
794 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
795 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
796 *pfLeakIt = true;
797 }
798 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
799 }
800 }
801 }
802
803 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
804 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
805 return rc;
806}
807
808
809/**
810 * Copies the result of the call back to the caller info structure and user
811 * buffers (if using bounce buffers).
812 *
813 * @returns rc, unless RTR0MemUserCopyTo fails.
814 * @param pCallInfo Call info structure to update.
815 * @param cbCallInfo The size of the client request.
816 * @param pHGCMCall HGCM call request.
817 * @param cbHGCMCall The size of the HGCM call request.
818 * @param pParmInfo Parameter locking/buffering info.
819 * @param fIsUser Is it a user (true) or kernel request.
820 * @param rc The current result code. Passed along to
821 * preserve informational status codes.
822 */
823static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
824 VMMDevHGCMCall const *pHGCMCall, uint32_t cbHGCMCall,
825 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
826{
827 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
828 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
829 uint32_t const cParms = pCallInfo->cParms;
830#ifdef USE_BOUNCE_BUFFERS
831 uint32_t iLockBuf = 0;
832#endif
833 uint32_t iParm;
834 RT_NOREF1(pParmInfo);
835#ifndef USE_BOUNCE_BUFFERS
836 RT_NOREF1(fIsUser);
837#endif
838
839 /*
840 * The call result.
841 */
842 pCallInfo->Hdr.rc = pHGCMCall->header.result;
843
844 /*
845 * Copy back parameters.
846 */
847 /** @todo This is assuming user data (pDstParm) is buffered. Not true
848 * on OS/2, though I'm not sure we care... */
849 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
850 {
851 switch (pDstParm->type)
852 {
853 case VMMDevHGCMParmType_32bit:
854 case VMMDevHGCMParmType_64bit:
855 *pDstParm = *pSrcParm;
856 break;
857
858 case VMMDevHGCMParmType_PageList:
859 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
860 break;
861
862 case VMMDevHGCMParmType_Embedded:
863 {
864 uint32_t cb;
865 pDstParm->u.Embedded.cbData = cb = pSrcParm->u.Embedded.cbData;
866 if ( cb > 0
867 && (pDstParm->u.Embedded.fFlags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
868 {
869 uint32_t const offDst = pDstParm->u.Embedded.offData;
870 uint32_t const offSrc = pDstParm->u.Embedded.offData;
871 AssertReturn(offDst < cbCallInfo, VERR_INTERNAL_ERROR_2);
872 AssertReturn(offDst >= sizeof(*pCallInfo) + cParms * sizeof(*pDstParm), VERR_INTERNAL_ERROR_2);
873 AssertReturn(cb <= cbCallInfo - offDst , VERR_INTERNAL_ERROR_2);
874 AssertReturn(offSrc < cbCallInfo, VERR_INTERNAL_ERROR_2);
875 AssertReturn(offSrc >= sizeof(*pHGCMCall) + cParms * sizeof(*pSrcParm), VERR_INTERNAL_ERROR_2);
876 AssertReturn(cb <= cbHGCMCall - offSrc, VERR_INTERNAL_ERROR_2);
877
878 memcpy((uint8_t *)pCallInfo + offDst, (uint8_t const *)pHGCMCall + offSrc, cb);
879 }
880 break;
881 }
882
883 case VMMDevHGCMParmType_LinAddr_Locked_In:
884 case VMMDevHGCMParmType_LinAddr_In:
885#ifdef USE_BOUNCE_BUFFERS
886 if ( fIsUser
887 && iLockBuf < pParmInfo->cLockBufs
888 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
889 iLockBuf++;
890#endif
891 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
892 break;
893
894 case VMMDevHGCMParmType_LinAddr_Locked_Out:
895 case VMMDevHGCMParmType_LinAddr_Locked:
896 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
897 {
898 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
899 break;
900 }
901 RT_FALL_THRU();
902
903 case VMMDevHGCMParmType_LinAddr_Out:
904 case VMMDevHGCMParmType_LinAddr:
905 {
906#ifdef USE_BOUNCE_BUFFERS
907 if (fIsUser)
908 {
909 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
910 if (cbOut)
911 {
912 int rc2;
913 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
914 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
915 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
916 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
917 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
918 cbOut);
919 if (RT_FAILURE(rc2))
920 return rc2;
921 iLockBuf++;
922 }
923 else if ( iLockBuf < pParmInfo->cLockBufs
924 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
925 iLockBuf++;
926 }
927#endif
928 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
929 break;
930 }
931
932 default:
933 AssertFailed();
934 rc = VERR_INTERNAL_ERROR_4;
935 break;
936 }
937 }
938
939#ifdef USE_BOUNCE_BUFFERS
940 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
941#endif
942 return rc;
943}
944
945
946DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
947 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
948{
949 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
950 struct VbglR0ParmInfo ParmInfo;
951 size_t cbExtra;
952 int rc;
953
954 /*
955 * Basic validation.
956 */
957 AssertMsgReturn( !pCallInfo
958 || !pfnAsyncCallback
959 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
960 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
961 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
962 VERR_INVALID_PARAMETER);
963 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
964 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
965 VERR_INVALID_PARAMETER);
966
967 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
968 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
969
970 /*
971 * Validate, lock and buffer the parameters for the call.
972 * This will calculate the amount of extra space for physical page list.
973 */
974 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
975 if (RT_SUCCESS(rc))
976 {
977 /*
978 * Allocate the request buffer and recreate the call request.
979 */
980 VMMDevHGCMCall *pHGCMCall;
981 uint32_t const cbHGCMCall = sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra;
982 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall, cbHGCMCall, VMMDevReq_HGCMCall);
983 if (RT_SUCCESS(rc))
984 {
985 bool fLeakIt;
986 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fRequestor, fIsUser, &ParmInfo);
987
988 /*
989 * Perform the call.
990 */
991 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
992 if (RT_SUCCESS(rc))
993 {
994 /*
995 * Copy back the result (parameters and buffers that changed).
996 */
997 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, cbCallInfo, pHGCMCall, cbHGCMCall, &ParmInfo, fIsUser, rc);
998 }
999 else
1000 {
1001 if ( rc != VERR_INTERRUPTED
1002 && rc != VERR_TIMEOUT)
1003 {
1004 static unsigned s_cErrors = 0;
1005 if (s_cErrors++ < 32)
1006 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
1007 }
1008 }
1009
1010 if (!fLeakIt)
1011 VbglR0GRFree(&pHGCMCall->header.header);
1012 }
1013 }
1014 else
1015 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
1016
1017 /*
1018 * Release locks and free bounce buffers.
1019 */
1020 if (ParmInfo.cLockBufs)
1021 while (ParmInfo.cLockBufs-- > 0)
1022 {
1023 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
1024#ifdef USE_BOUNCE_BUFFERS
1025 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
1026#endif
1027 }
1028
1029 return rc;
1030}
1031
1032
1033#if ARCH_BITS == 64
1034DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
1035 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
1036{
1037 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
1038 HGCMFunctionParameter *pParm64 = NULL;
1039 HGCMFunctionParameter32 *pParm32 = NULL;
1040 uint32_t cParms = 0;
1041 uint32_t iParm = 0;
1042 int rc = VINF_SUCCESS;
1043
1044 /*
1045 * Input validation.
1046 */
1047 AssertMsgReturn( !pCallInfo
1048 || !pfnAsyncCallback
1049 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
1050 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
1051 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
1052 VERR_INVALID_PARAMETER);
1053 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
1054 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
1055 VERR_INVALID_PARAMETER);
1056
1057 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
1058#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
1059 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
1060#endif
1061
1062 cParms = pCallInfo->cParms;
1063 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
1064
1065 /*
1066 * The simple approach, allocate a temporary request and convert the parameters.
1067 */
1068 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
1069 if (!pCallInfo64)
1070 return VERR_NO_TMP_MEMORY;
1071
1072 *pCallInfo64 = *pCallInfo;
1073 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1074 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1075 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1076 {
1077 switch (pParm32->type)
1078 {
1079 case VMMDevHGCMParmType_32bit:
1080 pParm64->type = VMMDevHGCMParmType_32bit;
1081 pParm64->u.value32 = pParm32->u.value32;
1082 break;
1083
1084 case VMMDevHGCMParmType_64bit:
1085 pParm64->type = VMMDevHGCMParmType_64bit;
1086 pParm64->u.value64 = pParm32->u.value64;
1087 break;
1088
1089 case VMMDevHGCMParmType_LinAddr_Out:
1090 case VMMDevHGCMParmType_LinAddr:
1091 case VMMDevHGCMParmType_LinAddr_In:
1092 pParm64->type = pParm32->type;
1093 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1094 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1095 break;
1096
1097 default:
1098 rc = VERR_INVALID_PARAMETER;
1099 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1100 break;
1101 }
1102 if (RT_FAILURE(rc))
1103 break;
1104 }
1105 if (RT_SUCCESS(rc))
1106 {
1107 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1108 fRequestor, pfnAsyncCallback, pvAsyncData, u32AsyncData);
1109
1110 if (RT_SUCCESS(rc))
1111 {
1112 *pCallInfo = *pCallInfo64;
1113
1114 /*
1115 * Copy back.
1116 */
1117 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1118 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1119 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1120 {
1121 switch (pParm64->type)
1122 {
1123 case VMMDevHGCMParmType_32bit:
1124 pParm32->u.value32 = pParm64->u.value32;
1125 break;
1126
1127 case VMMDevHGCMParmType_64bit:
1128 pParm32->u.value64 = pParm64->u.value64;
1129 break;
1130
1131 case VMMDevHGCMParmType_LinAddr_Out:
1132 case VMMDevHGCMParmType_LinAddr:
1133 case VMMDevHGCMParmType_LinAddr_In:
1134 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1135 break;
1136
1137 default:
1138 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1139 rc = VERR_INTERNAL_ERROR_3;
1140 break;
1141 }
1142 }
1143 }
1144 else
1145 {
1146 static unsigned s_cErrors = 0;
1147 if (s_cErrors++ < 32)
1148 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1149 }
1150 }
1151 else
1152 {
1153 static unsigned s_cErrors = 0;
1154 if (s_cErrors++ < 32)
1155 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1156 }
1157
1158 RTMemTmpFree(pCallInfo64);
1159 return rc;
1160}
1161#endif /* ARCH_BITS == 64 */
1162
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette