VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibHGCMInternal.cpp@ 76196

Last change on this file since 76196 was 76196, checked in by vboxsync, 6 years ago

VMMDev/HGCM,VBoxGuest: New page list variant for more effectively passing physical contiguous buffers (VbglR0PhysHeapAlloc, kmalloc, ++). bugref:9172

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.2 KB
Line 
1/* $Id: VBoxGuestR0LibHGCMInternal.cpp 76196 2018-12-12 19:24:05Z vboxsync $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2018 Oracle Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_HGCM
36
37#include "VBoxGuestR0LibInternal.h"
38#include <iprt/asm.h>
39#include <iprt/assert.h>
40#include <iprt/mem.h>
41#include <iprt/memobj.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46#ifndef VBGL_VBOXGUEST
47# error "This file should only be part of the VBoxGuestR0LibBase library that is linked into VBoxGuest."
48#endif
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54/** The max parameter buffer size for a user request. */
55#define VBGLR0_MAX_HGCM_USER_PARM (24*_1M)
56/** The max parameter buffer size for a kernel request. */
57#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
58/** The max embedded buffer size. */
59#define VBGLR0_MAX_HGCM_EMBEDDED_BUFFER _64K
60
61#if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN)
62/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
63 * side effects.
64 * Darwin 32bit & 64bit also needs this because of 4GB/4GB user/kernel space. */
65# define USE_BOUNCE_BUFFERS
66#endif
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72/**
73 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
74 */
75struct VbglR0ParmInfo
76{
77 uint32_t cLockBufs;
78 struct
79 {
80 uint32_t iParm;
81 RTR0MEMOBJ hObj;
82#ifdef USE_BOUNCE_BUFFERS
83 void *pvSmallBuf;
84#endif
85 } aLockBufs[10];
86};
87
88
89
90/* These functions can be only used by VBoxGuest. */
91
92DECLR0VBGL(int) VbglR0HGCMInternalConnect(HGCMServiceLocation const *pLoc, uint32_t fRequestor, HGCMCLIENTID *pidClient,
93 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
94{
95 int rc;
96 if ( RT_VALID_PTR(pLoc)
97 && RT_VALID_PTR(pidClient)
98 && RT_VALID_PTR(pfnAsyncCallback))
99 {
100 /* Allocate request */
101 VMMDevHGCMConnect *pHGCMConnect = NULL;
102 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMConnect, sizeof(VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
103 if (RT_SUCCESS(rc))
104 {
105 /* Initialize request memory */
106 pHGCMConnect->header.header.fRequestor = fRequestor;
107
108 pHGCMConnect->header.fu32Flags = 0;
109
110 memcpy(&pHGCMConnect->loc, pLoc, sizeof(pHGCMConnect->loc));
111 pHGCMConnect->u32ClientID = 0;
112
113 /* Issue request */
114 rc = VbglR0GRPerform (&pHGCMConnect->header.header);
115 if (RT_SUCCESS(rc))
116 {
117 /* Check if host decides to process the request asynchronously. */
118 if (rc == VINF_HGCM_ASYNC_EXECUTE)
119 {
120 /* Wait for request completion interrupt notification from host */
121 pfnAsyncCallback(&pHGCMConnect->header, pvAsyncData, u32AsyncData);
122 }
123
124 rc = pHGCMConnect->header.result;
125 if (RT_SUCCESS(rc))
126 *pidClient = pHGCMConnect->u32ClientID;
127 }
128 VbglR0GRFree(&pHGCMConnect->header.header);
129 }
130 }
131 else
132 rc = VERR_INVALID_PARAMETER;
133 return rc;
134}
135
136
137DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(HGCMCLIENTID idClient, uint32_t fRequestor,
138 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
139{
140 int rc;
141 if ( idClient != 0
142 && pfnAsyncCallback)
143 {
144 /* Allocate request */
145 VMMDevHGCMDisconnect *pHGCMDisconnect = NULL;
146 rc = VbglR0GRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
147 if (RT_SUCCESS(rc))
148 {
149 /* Initialize request memory */
150 pHGCMDisconnect->header.header.fRequestor = fRequestor;
151
152 pHGCMDisconnect->header.fu32Flags = 0;
153
154 pHGCMDisconnect->u32ClientID = idClient;
155
156 /* Issue request */
157 rc = VbglR0GRPerform(&pHGCMDisconnect->header.header);
158 if (RT_SUCCESS(rc))
159 {
160 /* Check if host decides to process the request asynchronously. */
161 if (rc == VINF_HGCM_ASYNC_EXECUTE)
162 {
163 /* Wait for request completion interrupt notification from host */
164 pfnAsyncCallback(&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
165 }
166
167 rc = pHGCMDisconnect->header.result;
168 }
169
170 VbglR0GRFree(&pHGCMDisconnect->header.header);
171 }
172 }
173 else
174 rc = VERR_INVALID_PARAMETER;
175 return rc;
176}
177
178
179/**
180 * Preprocesses the HGCM call, validating and locking/buffering parameters.
181 *
182 * @returns VBox status code.
183 *
184 * @param pCallInfo The call info.
185 * @param cbCallInfo The size of the call info structure.
186 * @param fIsUser Is it a user request or kernel request.
187 * @param pcbExtra Where to return the extra request space needed for
188 * physical page lists.
189 */
190static int vbglR0HGCMInternalPreprocessCall(PCVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
191 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
192{
193 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
194 uint32_t const cParms = pCallInfo->cParms;
195 uint32_t iParm;
196 uint32_t cb;
197
198 /*
199 * Lock down the any linear buffers so we can get their addresses
200 * and figure out how much extra storage we need for page lists.
201 *
202 * Note! With kernel mode users we can be assertive. For user mode users
203 * we should just (debug) log it and fail without any fanfare.
204 */
205 *pcbExtra = 0;
206 pParmInfo->cLockBufs = 0;
207 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
208 {
209 switch (pSrcParm->type)
210 {
211 case VMMDevHGCMParmType_32bit:
212 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
213 break;
214
215 case VMMDevHGCMParmType_64bit:
216 Log4(("GstHGCMCall: parm=%u type=64bit: %#018RX64\n", iParm, pSrcParm->u.value64));
217 break;
218
219 case VMMDevHGCMParmType_PageList:
220 case VMMDevHGCMParmType_ContiguousPageList:
221 if (fIsUser)
222 return VERR_INVALID_PARAMETER;
223 cb = pSrcParm->u.PageList.size;
224 if (cb)
225 {
226 uint32_t off = pSrcParm->u.PageList.offset;
227 HGCMPageListInfo *pPgLst;
228 uint32_t cPages;
229 uint32_t u32;
230
231 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
232 VERR_OUT_OF_RANGE);
233 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
234 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
235 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
236 VERR_INVALID_PARAMETER);
237
238 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
239 cPages = pPgLst->cPages;
240 u32 = RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]) + off;
241 AssertMsgReturn(u32 <= cbCallInfo,
242 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
243 VERR_INVALID_PARAMETER);
244 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
245 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
246 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
247 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
248 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
249 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
250 u32 = cPages;
251 while (u32-- > 0)
252 {
253 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
254 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
255 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
256 VERR_INVALID_PARAMETER);
257 }
258
259 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[pPgLst->cPages]);
260 }
261 else
262 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
263 break;
264
265 case VMMDevHGCMParmType_Embedded:
266 if (fIsUser) /// @todo relax this.
267 return VERR_INVALID_PARAMETER;
268 cb = pSrcParm->u.Embedded.cbData;
269 if (cb)
270 {
271 uint32_t off = pSrcParm->u.Embedded.offData;
272 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_EMBEDDED_BUFFER, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_EMBEDDED_BUFFER),
273 VERR_INVALID_PARAMETER);
274 AssertMsgReturn(cb <= cbCallInfo - cParms * sizeof(HGCMFunctionParameter),
275 ("cb=%#x cParms=%#x cbCallInfo=%3x\n", cb, cParms, cbCallInfo),
276 VERR_INVALID_PARAMETER);
277 AssertMsgReturn( off >= cParms * sizeof(HGCMFunctionParameter)
278 && off <= cbCallInfo - cb,
279 ("offData=%#x cParms=%#x cbCallInfo=%#x\n", off, cParms, cbCallInfo),
280 VERR_INVALID_PARAMETER);
281 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pSrcParm->u.Embedded.fFlags),
282 ("%#x\n", pSrcParm->u.Embedded.fFlags), VERR_INVALID_PARAMETER);
283
284 *pcbExtra += RT_ALIGN_32(cb, 8);
285 }
286 else
287 Log4(("GstHGCMCall: parm=%u type=embed: cb=0\n", iParm));
288 break;
289
290
291 case VMMDevHGCMParmType_LinAddr_Locked_In:
292 case VMMDevHGCMParmType_LinAddr_Locked_Out:
293 case VMMDevHGCMParmType_LinAddr_Locked:
294 if (fIsUser)
295 return VERR_INVALID_PARAMETER;
296 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
297 {
298 cb = pSrcParm->u.Pointer.size;
299 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
300 VERR_OUT_OF_RANGE);
301 if (cb != 0)
302 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
303 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
304 else
305 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
306 break;
307 }
308 RT_FALL_THRU();
309
310 case VMMDevHGCMParmType_LinAddr_In:
311 case VMMDevHGCMParmType_LinAddr_Out:
312 case VMMDevHGCMParmType_LinAddr:
313 cb = pSrcParm->u.Pointer.size;
314 if (cb != 0)
315 {
316#ifdef USE_BOUNCE_BUFFERS
317 void *pvSmallBuf = NULL;
318#endif
319 uint32_t iLockBuf = pParmInfo->cLockBufs;
320 RTR0MEMOBJ hObj;
321 int rc;
322 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
323 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
324 ? RTMEM_PROT_READ
325 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
326
327 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
328 if (!fIsUser)
329 {
330 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
331 VERR_OUT_OF_RANGE);
332 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
333 if (RT_FAILURE(rc))
334 {
335 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
336 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
337 return rc;
338 }
339 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
340 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
341 }
342 else if (cb > VBGLR0_MAX_HGCM_USER_PARM)
343 {
344 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
345 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
346 cb, VBGLR0_MAX_HGCM_USER_PARM));
347 return VERR_OUT_OF_RANGE;
348 }
349 else
350 {
351#ifndef USE_BOUNCE_BUFFERS
352 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
353 if (RT_FAILURE(rc))
354 {
355 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
356 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
357 return rc;
358 }
359 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
360 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
361
362#else /* USE_BOUNCE_BUFFERS */
363 /*
364 * This is a bit massive, but we don't want to waste a
365 * whole page for a 3 byte string buffer (guest props).
366 *
367 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
368 * the system is using some power of two allocator.
369 */
370 /** @todo A more efficient strategy would be to combine buffers. However it
371 * is probably going to be more massive than the current code, so
372 * it can wait till later. */
373 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
374 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
375 if (cb <= PAGE_SIZE / 2 - 16)
376 {
377 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
378 if (RT_UNLIKELY(!pvSmallBuf))
379 return VERR_NO_MEMORY;
380 if (fCopyIn)
381 {
382 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
383 if (RT_FAILURE(rc))
384 {
385 RTMemTmpFree(pvSmallBuf);
386 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
387 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
388 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
389 return rc;
390 }
391 }
392 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
393 if (RT_FAILURE(rc))
394 {
395 RTMemTmpFree(pvSmallBuf);
396 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
397 rc, pvSmallBuf, cb));
398 return rc;
399 }
400 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
401 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
402 }
403 else
404 {
405 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
406 if (RT_FAILURE(rc))
407 return rc;
408 if (!fCopyIn)
409 memset(RTR0MemObjAddress(hObj), '\0', cb);
410 else
411 {
412 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
413 if (RT_FAILURE(rc))
414 {
415 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
416 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
417 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
418 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
419 return rc;
420 }
421 }
422 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
423 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
424 }
425#endif /* USE_BOUNCE_BUFFERS */
426 }
427
428 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
429 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
430#ifdef USE_BOUNCE_BUFFERS
431 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
432#endif
433 pParmInfo->cLockBufs = iLockBuf + 1;
434
435 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
436 {
437 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
438 *pcbExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
439 }
440 }
441 else
442 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
443 break;
444
445 default:
446 return VERR_INVALID_PARAMETER;
447 }
448 }
449
450 return VINF_SUCCESS;
451}
452
453
454/**
455 * Translates locked linear address to the normal type.
456 * The locked types are only for the guest side and not handled by the host.
457 *
458 * @returns normal linear address type.
459 * @param enmType The type.
460 */
461static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
462{
463 switch (enmType)
464 {
465 case VMMDevHGCMParmType_LinAddr_Locked_In:
466 return VMMDevHGCMParmType_LinAddr_In;
467 case VMMDevHGCMParmType_LinAddr_Locked_Out:
468 return VMMDevHGCMParmType_LinAddr_Out;
469 case VMMDevHGCMParmType_LinAddr_Locked:
470 return VMMDevHGCMParmType_LinAddr;
471 default:
472 return enmType;
473 }
474}
475
476
477/**
478 * Translates linear address types to page list direction flags.
479 *
480 * @returns page list flags.
481 * @param enmType The type.
482 */
483static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
484{
485 switch (enmType)
486 {
487 case VMMDevHGCMParmType_LinAddr_In:
488 case VMMDevHGCMParmType_LinAddr_Locked_In:
489 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
490
491 case VMMDevHGCMParmType_LinAddr_Out:
492 case VMMDevHGCMParmType_LinAddr_Locked_Out:
493 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
494
495 default: AssertFailed();
496 case VMMDevHGCMParmType_LinAddr:
497 case VMMDevHGCMParmType_LinAddr_Locked:
498 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
499 }
500}
501
502
503/**
504 * Initializes the call request that we're sending to the host.
505 *
506 * @returns VBox status code.
507 *
508 * @param pCallInfo The call info.
509 * @param cbCallInfo The size of the call info structure.
510 * @param fRequestor VMMDEV_REQUESTOR_XXX.
511 * @param fIsUser Is it a user request or kernel request.
512 * @param pcbExtra Where to return the extra request space needed for
513 * physical page lists.
514 */
515static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, PCVBGLIOCHGCMCALL pCallInfo,
516 uint32_t cbCallInfo, uint32_t fRequestor, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
517{
518 HGCMFunctionParameter const *pSrcParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
519 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
520 uint32_t const cParms = pCallInfo->cParms;
521 uint32_t offExtra = (uint32_t)((uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall);
522 uint32_t iLockBuf = 0;
523 uint32_t iParm;
524 RT_NOREF1(cbCallInfo);
525#ifndef USE_BOUNCE_BUFFERS
526 RT_NOREF1(fIsUser);
527#endif
528
529 /*
530 * The call request headers.
531 */
532 pHGCMCall->header.header.fRequestor = !fIsUser || (fRequestor & VMMDEV_REQUESTOR_USERMODE) ? fRequestor
533 : VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_USR_NOT_GIVEN
534 | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN | VMMDEV_REQUESTOR_CON_DONT_KNOW;
535
536 pHGCMCall->header.fu32Flags = 0;
537 pHGCMCall->header.result = VINF_SUCCESS;
538
539 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
540 pHGCMCall->u32Function = pCallInfo->u32Function;
541 pHGCMCall->cParms = cParms;
542
543 /*
544 * The parameters.
545 */
546 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
547 {
548 switch (pSrcParm->type)
549 {
550 case VMMDevHGCMParmType_32bit:
551 case VMMDevHGCMParmType_64bit:
552 *pDstParm = *pSrcParm;
553 break;
554
555 case VMMDevHGCMParmType_PageList:
556 case VMMDevHGCMParmType_ContiguousPageList:
557 pDstParm->type = pSrcParm->type;
558 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
559 if (pSrcParm->u.PageList.size)
560 {
561 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
562 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
563 uint32_t const cPages = pSrcPgLst->cPages;
564 uint32_t iPage;
565
566 pDstParm->u.PageList.offset = offExtra;
567 pDstPgLst->flags = pSrcPgLst->flags;
568 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
569 pDstPgLst->cPages = (uint16_t)cPages;
570 for (iPage = 0; iPage < cPages; iPage++)
571 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
572
573 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
574 }
575 else
576 pDstParm->u.PageList.offset = 0; /** @todo will fail on the host side now */
577 break;
578
579 case VMMDevHGCMParmType_Embedded:
580 {
581 uint32_t const cb = pSrcParm->u.Embedded.cbData;
582 pDstParm->type = VMMDevHGCMParmType_Embedded;
583 pDstParm->u.Embedded.cbData = cb;
584 pDstParm->u.Embedded.offData = offExtra;
585 if (cb > 0)
586 {
587 uint8_t *pbDst = (uint8_t *)pHGCMCall + offExtra;
588 if (pSrcParm->u.Embedded.fFlags & VBOX_HGCM_F_PARM_DIRECTION_TO_HOST)
589 {
590 memcpy(pbDst, (uint8_t const *)pCallInfo + pSrcParm->u.Embedded.offData, cb);
591 if (RT_ALIGN(cb, 8) != cb)
592 memset(pbDst + cb, 0, RT_ALIGN(cb, 8) - cb);
593 }
594 else
595 RT_BZERO(pbDst, RT_ALIGN(cb, 8));
596 offExtra += RT_ALIGN(cb, 8);
597 }
598 break;
599 }
600
601 case VMMDevHGCMParmType_LinAddr_Locked_In:
602 case VMMDevHGCMParmType_LinAddr_Locked_Out:
603 case VMMDevHGCMParmType_LinAddr_Locked:
604 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
605 {
606 *pDstParm = *pSrcParm;
607 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
608 break;
609 }
610 RT_FALL_THRU();
611
612 case VMMDevHGCMParmType_LinAddr_In:
613 case VMMDevHGCMParmType_LinAddr_Out:
614 case VMMDevHGCMParmType_LinAddr:
615 if (pSrcParm->u.Pointer.size != 0)
616 {
617#ifdef USE_BOUNCE_BUFFERS
618 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
619#endif
620 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
621 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
622
623 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ false))
624 {
625 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
626 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
627 size_t iPage;
628
629 pDstParm->type = VMMDevHGCMParmType_PageList;
630 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
631 pDstParm->u.PageList.offset = offExtra;
632 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
633#ifdef USE_BOUNCE_BUFFERS
634 if (fIsUser)
635 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
636 else
637#endif
638 pDstPgLst->offFirstPage = (uint16_t)(pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK);
639 pDstPgLst->cPages = (uint16_t)cPages; Assert(pDstPgLst->cPages == cPages);
640 for (iPage = 0; iPage < cPages; iPage++)
641 {
642 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
643 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
644 }
645
646 offExtra += RT_UOFFSETOF_DYN(HGCMPageListInfo, aPages[cPages]);
647 }
648 else
649 {
650 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
651 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
652#ifdef USE_BOUNCE_BUFFERS
653 if (fIsUser)
654 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
655 ? (uintptr_t)pvSmallBuf
656 : (uintptr_t)RTR0MemObjAddress(hObj);
657 else
658#endif
659 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
660 }
661 iLockBuf++;
662 }
663 else
664 {
665 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
666 pDstParm->u.Pointer.size = 0;
667 pDstParm->u.Pointer.u.linearAddr = 0;
668 }
669 break;
670
671 default:
672 AssertFailed();
673 pDstParm->type = VMMDevHGCMParmType_Invalid;
674 break;
675 }
676 }
677}
678
679
680/**
681 * Performs the call and completion wait.
682 *
683 * @returns VBox status code of this operation, not necessarily the call.
684 *
685 * @param pHGCMCall The HGCM call info.
686 * @param pfnAsyncCallback The async callback that will wait for the call
687 * to complete.
688 * @param pvAsyncData Argument for the callback.
689 * @param u32AsyncData Argument for the callback.
690 * @param pfLeakIt Where to return the leak it / free it,
691 * indicator. Cancellation fun.
692 */
693static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
694 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
695{
696 int rc;
697
698 Log(("calling VbglR0GRPerform\n"));
699 rc = VbglR0GRPerform(&pHGCMCall->header.header);
700 Log(("VbglR0GRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
701
702 /*
703 * If the call failed, but as a result of the request itself, then pretend
704 * success. Upper layers will interpret the result code in the packet.
705 */
706 if ( RT_FAILURE(rc)
707 && rc == pHGCMCall->header.result)
708 {
709 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
710 rc = VINF_SUCCESS;
711 }
712
713 /*
714 * Check if host decides to process the request asynchronously,
715 * if so, we wait for it to complete using the caller supplied callback.
716 */
717 *pfLeakIt = false;
718 if (rc == VINF_HGCM_ASYNC_EXECUTE)
719 {
720 Log(("Processing HGCM call asynchronously\n"));
721 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
722 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
723 {
724 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
725 rc = VINF_SUCCESS;
726 }
727 else
728 {
729 /*
730 * The request didn't complete in time or the call was interrupted,
731 * the RC from the callback indicates which. Try cancel the request.
732 *
733 * This is a bit messy because we're racing request completion. Sorry.
734 */
735 /** @todo It would be nice if we could use the waiter callback to do further
736 * waiting in case of a completion race. If it wasn't for WINNT having its own
737 * version of all that stuff, I would've done it already. */
738 VMMDevHGCMCancel2 *pCancelReq;
739 int rc2 = VbglR0GRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
740 if (RT_SUCCESS(rc2))
741 {
742 pCancelReq->physReqToCancel = VbglR0PhysHeapGetPhysAddr(pHGCMCall);
743 rc2 = VbglR0GRPerform(&pCancelReq->header);
744 VbglR0GRFree(&pCancelReq->header);
745 }
746#if 1 /** @todo ADDVER: Remove this on next minor version change. */
747 if (rc2 == VERR_NOT_IMPLEMENTED)
748 {
749 /* host is too old, or we're out of heap. */
750 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
751 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
752 rc2 = VbglR0GRPerform(&pHGCMCall->header.header);
753 if (rc2 == VERR_INVALID_PARAMETER)
754 rc2 = VERR_NOT_FOUND;
755 else if (RT_SUCCESS(rc))
756 RTThreadSleep(1);
757 }
758#endif
759 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
760 if (RT_SUCCESS(rc2))
761 {
762 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
763 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
764 }
765 else
766 {
767 /*
768 * Wait for a bit while the host (hopefully) completes it.
769 */
770 uint64_t u64Start = RTTimeSystemMilliTS();
771 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
772 uint64_t cElapsed = 0;
773 if (rc2 != VERR_NOT_FOUND)
774 {
775 static unsigned s_cErrors = 0;
776 if (s_cErrors++ < 32)
777 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
778 }
779 else
780 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
781
782 do
783 {
784 ASMCompilerBarrier(); /* paranoia */
785 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
786 break;
787 RTThreadSleep(1);
788 cElapsed = RTTimeSystemMilliTS() - u64Start;
789 } while (cElapsed < cMilliesToWait);
790
791 ASMCompilerBarrier(); /* paranoia^2 */
792 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
793 rc = VINF_SUCCESS;
794 else
795 {
796 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
797 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
798 *pfLeakIt = true;
799 }
800 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
801 }
802 }
803 }
804
805 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
806 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
807 return rc;
808}
809
810
811/**
812 * Copies the result of the call back to the caller info structure and user
813 * buffers (if using bounce buffers).
814 *
815 * @returns rc, unless RTR0MemUserCopyTo fails.
816 * @param pCallInfo Call info structure to update.
817 * @param cbCallInfo The size of the client request.
818 * @param pHGCMCall HGCM call request.
819 * @param cbHGCMCall The size of the HGCM call request.
820 * @param pParmInfo Parameter locking/buffering info.
821 * @param fIsUser Is it a user (true) or kernel request.
822 * @param rc The current result code. Passed along to
823 * preserve informational status codes.
824 */
825static int vbglR0HGCMInternalCopyBackResult(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo,
826 VMMDevHGCMCall const *pHGCMCall, uint32_t cbHGCMCall,
827 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
828{
829 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
830 HGCMFunctionParameter *pDstParm = VBGL_HGCM_GET_CALL_PARMS(pCallInfo);
831 uint32_t const cParms = pCallInfo->cParms;
832#ifdef USE_BOUNCE_BUFFERS
833 uint32_t iLockBuf = 0;
834#endif
835 uint32_t iParm;
836 RT_NOREF1(pParmInfo);
837#ifndef USE_BOUNCE_BUFFERS
838 RT_NOREF1(fIsUser);
839#endif
840
841 /*
842 * The call result.
843 */
844 pCallInfo->Hdr.rc = pHGCMCall->header.result;
845
846 /*
847 * Copy back parameters.
848 */
849 /** @todo This is assuming user data (pDstParm) is buffered. Not true
850 * on OS/2, though I'm not sure we care... */
851 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++)
852 {
853 switch (pDstParm->type)
854 {
855 case VMMDevHGCMParmType_32bit:
856 case VMMDevHGCMParmType_64bit:
857 *pDstParm = *pSrcParm;
858 break;
859
860 case VMMDevHGCMParmType_PageList:
861 case VMMDevHGCMParmType_ContiguousPageList:
862 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
863 break;
864
865 case VMMDevHGCMParmType_Embedded:
866 {
867 uint32_t const cbDst = pDstParm->u.Embedded.cbData;
868 uint32_t cbSrc;
869 pDstParm->u.Embedded.cbData = cbSrc = pSrcParm->u.Embedded.cbData;
870 if ( cbSrc > 0
871 && (pDstParm->u.Embedded.fFlags & VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST))
872 {
873 uint32_t const offDst = pDstParm->u.Embedded.offData;
874 uint32_t const offSrc = pSrcParm->u.Embedded.offData;
875
876 AssertReturn(offDst < cbCallInfo, VERR_INTERNAL_ERROR_2);
877 AssertReturn(offDst >= sizeof(*pCallInfo) + cParms * sizeof(*pDstParm), VERR_INTERNAL_ERROR_2);
878 AssertReturn(cbDst <= cbCallInfo - offDst , VERR_INTERNAL_ERROR_2);
879
880 AssertReturn(offSrc < cbCallInfo, VERR_INTERNAL_ERROR_2);
881 AssertReturn(offSrc >= sizeof(*pHGCMCall) + cParms * sizeof(*pSrcParm), VERR_INTERNAL_ERROR_2);
882 if (cbSrc <= cbHGCMCall - offSrc)
883 { /* likely */ }
884 else
885 {
886 /* Special case: Buffer overflow w/ correct size given. */
887 AssertReturn(RT_FAILURE_NP(rc), VERR_INTERNAL_ERROR_2);
888 cbSrc = cbHGCMCall - offSrc;
889 }
890 memcpy((uint8_t *)pCallInfo + offDst, (uint8_t const *)pHGCMCall + offSrc, RT_MIN(cbSrc, cbDst));
891 }
892 break;
893 }
894
895 case VMMDevHGCMParmType_LinAddr_Locked_In:
896 case VMMDevHGCMParmType_LinAddr_In:
897#ifdef USE_BOUNCE_BUFFERS
898 if ( fIsUser
899 && iLockBuf < pParmInfo->cLockBufs
900 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
901 iLockBuf++;
902#endif
903 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
904 break;
905
906 case VMMDevHGCMParmType_LinAddr_Locked_Out:
907 case VMMDevHGCMParmType_LinAddr_Locked:
908 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST(/*a_fLocked =*/ true))
909 {
910 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
911 break;
912 }
913 RT_FALL_THRU();
914
915 case VMMDevHGCMParmType_LinAddr_Out:
916 case VMMDevHGCMParmType_LinAddr:
917 {
918#ifdef USE_BOUNCE_BUFFERS
919 if (fIsUser)
920 {
921 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
922 if (cbOut)
923 {
924 int rc2;
925 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
926 rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
927 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
928 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
929 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
930 cbOut);
931 if (RT_FAILURE(rc2))
932 return rc2;
933 iLockBuf++;
934 }
935 else if ( iLockBuf < pParmInfo->cLockBufs
936 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
937 iLockBuf++;
938 }
939#endif
940 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
941 break;
942 }
943
944 default:
945 AssertFailed();
946 rc = VERR_INTERNAL_ERROR_4;
947 break;
948 }
949 }
950
951#ifdef USE_BOUNCE_BUFFERS
952 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
953#endif
954 return rc;
955}
956
957
958DECLR0VBGL(int) VbglR0HGCMInternalCall(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
959 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
960{
961 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
962 struct VbglR0ParmInfo ParmInfo;
963 size_t cbExtra;
964 int rc;
965
966 /*
967 * Basic validation.
968 */
969 AssertMsgReturn( !pCallInfo
970 || !pfnAsyncCallback
971 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
972 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
973 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
974 VERR_INVALID_PARAMETER);
975 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
976 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
977 VERR_INVALID_PARAMETER);
978
979 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
980 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
981
982 /*
983 * Validate, lock and buffer the parameters for the call.
984 * This will calculate the amount of extra space for physical page list.
985 */
986 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
987 if (RT_SUCCESS(rc))
988 {
989 /*
990 * Allocate the request buffer and recreate the call request.
991 */
992 VMMDevHGCMCall *pHGCMCall;
993 uint32_t const cbHGCMCall = sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + (uint32_t)cbExtra;
994 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pHGCMCall, cbHGCMCall, VMMDevReq_HGCMCall);
995 if (RT_SUCCESS(rc))
996 {
997 bool fLeakIt;
998 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fRequestor, fIsUser, &ParmInfo);
999
1000 /*
1001 * Perform the call.
1002 */
1003 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
1004 if (RT_SUCCESS(rc))
1005 {
1006 /*
1007 * Copy back the result (parameters and buffers that changed).
1008 */
1009 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, cbCallInfo, pHGCMCall, cbHGCMCall, &ParmInfo, fIsUser, rc);
1010 }
1011 else
1012 {
1013 if ( rc != VERR_INTERRUPTED
1014 && rc != VERR_TIMEOUT)
1015 {
1016 static unsigned s_cErrors = 0;
1017 if (s_cErrors++ < 32)
1018 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
1019 }
1020 }
1021
1022 if (!fLeakIt)
1023 VbglR0GRFree(&pHGCMCall->header.header);
1024 }
1025 }
1026 else
1027 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
1028
1029 /*
1030 * Release locks and free bounce buffers.
1031 */
1032 if (ParmInfo.cLockBufs)
1033 while (ParmInfo.cLockBufs-- > 0)
1034 {
1035 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
1036#ifdef USE_BOUNCE_BUFFERS
1037 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
1038#endif
1039 }
1040
1041 return rc;
1042}
1043
1044
1045#if ARCH_BITS == 64
1046DECLR0VBGL(int) VbglR0HGCMInternalCall32(PVBGLIOCHGCMCALL pCallInfo, uint32_t cbCallInfo, uint32_t fFlags, uint32_t fRequestor,
1047 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
1048{
1049 PVBGLIOCHGCMCALL pCallInfo64 = NULL;
1050 HGCMFunctionParameter *pParm64 = NULL;
1051 HGCMFunctionParameter32 *pParm32 = NULL;
1052 uint32_t cParms = 0;
1053 uint32_t iParm = 0;
1054 int rc = VINF_SUCCESS;
1055
1056 /*
1057 * Input validation.
1058 */
1059 AssertMsgReturn( !pCallInfo
1060 || !pfnAsyncCallback
1061 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
1062 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
1063 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
1064 VERR_INVALID_PARAMETER);
1065 AssertReturn( cbCallInfo >= sizeof(VBGLIOCHGCMCALL)
1066 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
1067 VERR_INVALID_PARAMETER);
1068
1069 /* This Assert does not work on Solaris/Windows 64/32 mixed mode, not sure why, skipping for now */
1070#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_WINDOWS)
1071 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
1072#endif
1073
1074 cParms = pCallInfo->cParms;
1075 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
1076
1077 /*
1078 * The simple approach, allocate a temporary request and convert the parameters.
1079 */
1080 pCallInfo64 = (PVBGLIOCHGCMCALL)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
1081 if (!pCallInfo64)
1082 return VERR_NO_TMP_MEMORY;
1083
1084 *pCallInfo64 = *pCallInfo;
1085 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1086 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1087 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1088 {
1089 switch (pParm32->type)
1090 {
1091 case VMMDevHGCMParmType_32bit:
1092 pParm64->type = VMMDevHGCMParmType_32bit;
1093 pParm64->u.value32 = pParm32->u.value32;
1094 break;
1095
1096 case VMMDevHGCMParmType_64bit:
1097 pParm64->type = VMMDevHGCMParmType_64bit;
1098 pParm64->u.value64 = pParm32->u.value64;
1099 break;
1100
1101 case VMMDevHGCMParmType_LinAddr_Out:
1102 case VMMDevHGCMParmType_LinAddr:
1103 case VMMDevHGCMParmType_LinAddr_In:
1104 pParm64->type = pParm32->type;
1105 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
1106 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
1107 break;
1108
1109 default:
1110 rc = VERR_INVALID_PARAMETER;
1111 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
1112 break;
1113 }
1114 if (RT_FAILURE(rc))
1115 break;
1116 }
1117 if (RT_SUCCESS(rc))
1118 {
1119 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
1120 fRequestor, pfnAsyncCallback, pvAsyncData, u32AsyncData);
1121
1122 if (RT_SUCCESS(rc))
1123 {
1124 *pCallInfo = *pCallInfo64;
1125
1126 /*
1127 * Copy back.
1128 */
1129 pParm32 = VBGL_HGCM_GET_CALL_PARMS32(pCallInfo);
1130 pParm64 = VBGL_HGCM_GET_CALL_PARMS(pCallInfo64);
1131 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1132 {
1133 switch (pParm64->type)
1134 {
1135 case VMMDevHGCMParmType_32bit:
1136 pParm32->u.value32 = pParm64->u.value32;
1137 break;
1138
1139 case VMMDevHGCMParmType_64bit:
1140 pParm32->u.value64 = pParm64->u.value64;
1141 break;
1142
1143 case VMMDevHGCMParmType_LinAddr_Out:
1144 case VMMDevHGCMParmType_LinAddr:
1145 case VMMDevHGCMParmType_LinAddr_In:
1146 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1147 break;
1148
1149 default:
1150 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1151 rc = VERR_INTERNAL_ERROR_3;
1152 break;
1153 }
1154 }
1155 }
1156 else
1157 {
1158 static unsigned s_cErrors = 0;
1159 if (s_cErrors++ < 32)
1160 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1161 }
1162 }
1163 else
1164 {
1165 static unsigned s_cErrors = 0;
1166 if (s_cErrors++ < 32)
1167 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1168 }
1169
1170 RTMemTmpFree(pCallInfo64);
1171 return rc;
1172}
1173#endif /* ARCH_BITS == 64 */
1174
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette