VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuestLib/HGCMInternal.cpp@ 21487

Last change on this file since 21487 was 21487, checked in by vboxsync, 15 years ago

VbglR0HGCMInternal.cpp: Less wasteful bounce buffering, some bugfixes, logging.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.1 KB
Line 
1/* $Revision: 21487 $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/* Entire file is ifdef'ed with VBGL_VBOXGUEST */
23#ifdef VBGL_VBOXGUEST
24
25/*******************************************************************************
26* Header Files *
27*******************************************************************************/
28#include "VBGLInternal.h"
29#include <iprt/alloca.h>
30#include <iprt/assert.h>
31#include <iprt/mem.h>
32#include <iprt/memobj.h>
33#include <iprt/string.h>
34
35/*******************************************************************************
36* Defined Constants And Macros *
37*******************************************************************************/
38/** The max parameter buffer size for a user request. */
39#define VBGLR0_MAX_HGCM_USER_PARM _1M
40/** The max parameter buffer size for a kernel request. */
41#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
42#ifdef RT_OS_LINUX
43/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
44 * side effects. */
45# define USE_BOUNCH_BUFFERS
46#endif
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/**
52 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
53 */
54struct VbglR0ParmInfo
55{
56 uint32_t cLockBufs;
57 struct
58 {
59 uint32_t iParm;
60 RTR0MEMOBJ hObj;
61#ifdef USE_BOUNCH_BUFFERS
62 void *pvSmallBuf;
63#endif
64 } aLockBufs[10];
65};
66
67
68
69/* These functions can be only used by VBoxGuest. */
70
71DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
72 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData,
73 uint32_t u32AsyncData)
74{
75 VMMDevHGCMConnect *pHGCMConnect;
76 int rc;
77
78 if (!pConnectInfo || !pAsyncCallback)
79 return VERR_INVALID_PARAMETER;
80
81 pHGCMConnect = NULL;
82
83 /* Allocate request */
84 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
85
86 if (RT_SUCCESS(rc))
87 {
88 /* Initialize request memory */
89 pHGCMConnect->header.fu32Flags = 0;
90
91 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
92 pHGCMConnect->u32ClientID = 0;
93
94 /* Issue request */
95 rc = VbglGRPerform (&pHGCMConnect->header.header);
96
97 if (RT_SUCCESS(rc))
98 {
99 /* Check if host decides to process the request asynchronously. */
100 if (rc == VINF_HGCM_ASYNC_EXECUTE)
101 {
102 /* Wait for request completion interrupt notification from host */
103 pAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
104 }
105
106 pConnectInfo->result = pHGCMConnect->header.result;
107
108 if (RT_SUCCESS (pConnectInfo->result))
109 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
110 }
111
112 VbglGRFree (&pHGCMConnect->header.header);
113 }
114
115 return rc;
116}
117
118
119DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
120 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
121{
122 VMMDevHGCMDisconnect *pHGCMDisconnect;
123 int rc;
124
125 if (!pDisconnectInfo || !pAsyncCallback)
126 return VERR_INVALID_PARAMETER;
127
128 pHGCMDisconnect = NULL;
129
130 /* Allocate request */
131 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
132
133 if (RT_SUCCESS(rc))
134 {
135 /* Initialize request memory */
136 pHGCMDisconnect->header.fu32Flags = 0;
137
138 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
139
140 /* Issue request */
141 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
142
143 if (RT_SUCCESS(rc))
144 {
145 /* Check if host decides to process the request asynchronously. */
146 if (rc == VINF_HGCM_ASYNC_EXECUTE)
147 {
148 /* Wait for request completion interrupt notification from host */
149 pAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
150 }
151
152 pDisconnectInfo->result = pHGCMDisconnect->header.result;
153 }
154
155 VbglGRFree (&pHGCMDisconnect->header.header);
156 }
157
158 return rc;
159}
160
161#if 0 /* new code using page list and whatnot. */
162
163/**
164 * Preprocesses the HGCM call, validating and locking/buffering parameters.
165 *
166 * @returns VBox status code.
167 *
168 * @param pCallInfo The call info.
169 * @param cbCallInfo The size of the call info structure.
170 * @param fIsUser Is it a user request or kernel request.
171 * @param pcbExtra Where to return the extra request space needed for
172 * physical page lists.
173 */
174static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
175 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
176{
177 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
178 uint32_t cParms = pCallInfo->cParms;
179 uint32_t iParm;
180 uint32_t cb;
181
182 /*
183 * Lock down the any linear buffers so we can get their addresses
184 * and figure out how much extra storage we need for page lists.
185 *
186 * Note! With kernel mode users we can be assertive. For user mode users
187 * we should just (debug) log it and fail without any fanfare.
188 */
189 *pcbExtra = 0;
190 pParmInfo->cLockBufs = 0;
191 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
192 {
193 switch (pSrcParm->type)
194 {
195 case VMMDevHGCMParmType_32bit:
196 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
197 break;
198
199 case VMMDevHGCMParmType_64bit:
200 Log4(("GstHGCMCall: parm=%u type=64bit: %#018x\n", iParm, pSrcParm->u.value64));
201 break;
202
203 case VMMDevHGCMParmType_PageList:
204 if (fIsUser)
205 return VERR_INVALID_PARAMETER;
206 cb = pSrcParm->u.PageList.size;
207 if (cb)
208 {
209 uint32_t off = pSrcParm->u.PageList.offset;
210 HGCMPageListInfo *pPgLst;
211 uint32_t cPages;
212 uint32_t u32;
213
214 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM), VERR_OUT_OF_RANGE);
215 AssertMsgReturn( off >= pCallInfo->cParms * sizeof(HGCMFunctionParameter)
216 && off < cbCallInfo - sizeof(HGCMPageListInfo),
217 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, pCallInfo->cParms, cbCallInfo),
218 VERR_INVALID_PARAMETER);
219
220 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
221 cPages = pPgLst->cPages;
222 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
223 AssertMsgReturn(u32 <= cbCallInfo,
224 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
225 VERR_INVALID_PARAMETER);
226 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
227 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
228 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
229 AssertMsgReturn(pPgLst->flags > VBOX_HGCM_F_PARM_DIRECTION_NONE && pPgLst->flags <= VBOX_HGCM_F_PARM_DIRECTION_BOTH,
230 ("%#x\n", pPgLst->flags),
231 VERR_INVALID_PARAMETER);
232 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n", iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
233 u32 = cPages;
234 while (u32-- > 0)
235 {
236 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
237 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
238 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
239 VERR_INVALID_PARAMETER);
240 }
241
242 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
243 }
244 else
245 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
246 break;
247
248 case VMMDevHGCMParmType_LinAddr_Locked_In:
249 case VMMDevHGCMParmType_LinAddr_Locked_Out:
250 case VMMDevHGCMParmType_LinAddr_Locked:
251 if (fIsUser)
252 return VERR_INVALID_PARAMETER;
253 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
254 {
255 cb = pSrcParm->u.Pointer.size;
256 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM), VERR_OUT_OF_RANGE);
257 if (cb != 0)
258 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n", iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
259 else
260 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
261 break;
262 }
263 /* fall thru */
264
265 case VMMDevHGCMParmType_LinAddr_In:
266 case VMMDevHGCMParmType_LinAddr_Out:
267 case VMMDevHGCMParmType_LinAddr:
268 cb = pSrcParm->u.Pointer.size;
269 if (cb != 0)
270 {
271#ifdef USE_BOUNCH_BUFFERS
272 void *pvSmallBuf = NULL;
273#endif
274 uint32_t iLockBuf = pParmInfo->cLockBufs;
275 RTR0MEMOBJ hObj;
276 int rc;
277
278 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
279 if (!fIsUser)
280 {
281 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM), VERR_OUT_OF_RANGE);
282 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb);
283 if (RT_FAILURE(rc))
284 {
285 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
286 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
287 return rc;
288 }
289 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n", iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
290 }
291 else
292 {
293 if (cb > VBGLR0_MAX_HGCM_USER_PARM)
294 {
295 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
296 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, VBGLR0_MAX_HGCM_USER_PARM));
297 return VERR_OUT_OF_RANGE;
298 }
299
300#ifndef USE_BOUNCH_BUFFERS
301 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, NIL_RTR0PROCESS);
302 if (RT_FAILURE(rc))
303 {
304 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
305 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
306 return rc;
307 }
308 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n", iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
309
310#else /* USE_BOUNCH_BUFFERS */
311 /*
312 * This is a bit massive, but we don't want to waste a
313 * whole page for a 3 byte string buffer (guest props).
314 *
315 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
316 * the system is using some power of two allocator.
317 */
318 /** @todo A more efficient strategy would be to combine buffers. However it
319 * is probably going to be more massive than the current code, so
320 * it can wait till later. */
321 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
322 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
323 if (cb <= PAGE_SIZE / 2 - 16)
324 {
325 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
326 if (RT_UNLIKELY(!pvSmallBuf))
327 return VERR_NO_MEMORY;
328 if (fCopyIn)
329 {
330 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
331 if (RT_FAILURE(rc))
332 {
333 RTMemTmpFree(pvSmallBuf);
334 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
335 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
336 return rc;
337 }
338 }
339 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb);
340 if (RT_FAILURE(rc))
341 {
342 RTMemTmpFree(pvSmallBuf);
343 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n", rc, pvSmallBuf, cb));
344 return rc;
345 }
346 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n", iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
347 }
348 else
349 {
350 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
351 if (RT_FAILURE(rc))
352 return rc;
353 if (!fCopyIn)
354 memset(RTR0MemObjAddress(hObj), '\0', cb);
355 else
356 {
357 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
358 if (RT_FAILURE(rc))
359 {
360 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
361 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
362 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
363 return rc;
364 }
365 }
366 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n", iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
367 }
368#endif /* USE_BOUNCH_BUFFERS */
369 }
370
371 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
372 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
373#ifdef USE_BOUNCH_BUFFERS
374 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
375#endif
376 pParmInfo->cLockBufs = iLockBuf + 1;
377
378 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
379 {
380 size_t cPages = RTR0MemObjSize(hObj);
381 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
382 }
383 }
384 else
385 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
386 break;
387
388 default:
389 return VERR_INVALID_PARAMETER;
390 }
391 }
392
393 return VINF_SUCCESS;
394}
395
396/**
397 * Translates locked linear address to the normal type.
398 * The locked types are only for the guest side and not handled by the host.
399 *
400 * @returns normal linear address type.
401 * @param enmType The type.
402 */
403static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
404{
405 switch (enmType)
406 {
407 case VMMDevHGCMParmType_LinAddr_Locked_In:
408 return VMMDevHGCMParmType_LinAddr_In;
409 case VMMDevHGCMParmType_LinAddr_Locked_Out:
410 return VMMDevHGCMParmType_LinAddr_Out;
411 case VMMDevHGCMParmType_LinAddr_Locked:
412 return VMMDevHGCMParmType_LinAddr;
413 default:
414 return enmType;
415 }
416}
417
418/**
419 * Translates linear address types to page list direction flags.
420 *
421 * @returns page list flags.
422 * @param enmType The type.
423 */
424static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
425{
426 switch (enmType)
427 {
428 case VMMDevHGCMParmType_LinAddr_In:
429 case VMMDevHGCMParmType_LinAddr_Locked_In:
430 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
431
432 case VMMDevHGCMParmType_LinAddr_Out:
433 case VMMDevHGCMParmType_LinAddr_Locked_Out:
434 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
435
436 default: AssertFailed();
437 case VMMDevHGCMParmType_LinAddr:
438 case VMMDevHGCMParmType_LinAddr_Locked:
439 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
440 }
441}
442
443
444/**
445 * Initializes the call request that we're sending to the host.
446 *
447 * @returns VBox status code.
448 *
449 * @param pCallInfo The call info.
450 * @param cbCallInfo The size of the call info structure.
451 * @param fIsUser Is it a user request or kernel request.
452 * @param pcbExtra Where to return the extra request space needed for
453 * physical page lists.
454 */
455static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
456 bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
457{
458 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
459 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
460 uint32_t cParms = pCallInfo->cParms;
461 uint32_t offExtra = (uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall;
462 uint32_t iLockBuf = 0;
463 uint32_t iParm;
464
465
466 /*
467 * The call request headers.
468 */
469 pHGCMCall->header.fu32Flags = 0;
470 pHGCMCall->header.result = VINF_SUCCESS;
471
472 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
473 pHGCMCall->u32Function = pCallInfo->u32Function;
474 pHGCMCall->cParms = cParms;
475
476 /*
477 * The parameters.
478 */
479 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
480 {
481 switch (pSrcParm->type)
482 {
483 case VMMDevHGCMParmType_32bit:
484 case VMMDevHGCMParmType_64bit:
485 *pDstParm = *pSrcParm;
486 break;
487
488 case VMMDevHGCMParmType_PageList:
489 pDstParm->type = VMMDevHGCMParmType_PageList;
490 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
491 if (pSrcParm->u.PageList.size)
492 {
493 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
494 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
495 uint32_t const cPages = pSrcPgLst->cPages;
496 uint32_t iPage;
497
498 pDstParm->u.PageList.offset = offExtra;
499 pDstPgLst->flags = pSrcPgLst->flags;
500 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
501 pDstPgLst->cPages = cPages;
502 for (iPage = 0; iPage < cPages; iPage++)
503 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
504
505 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
506 }
507 else
508 pDstParm->u.PageList.offset = 0;
509 break;
510
511 case VMMDevHGCMParmType_LinAddr_Locked_In:
512 case VMMDevHGCMParmType_LinAddr_Locked_Out:
513 case VMMDevHGCMParmType_LinAddr_Locked:
514 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
515 {
516 *pDstParm = *pSrcParm;
517 break;
518 }
519 /* fall thru */
520
521 case VMMDevHGCMParmType_LinAddr_In:
522 case VMMDevHGCMParmType_LinAddr_Out:
523 case VMMDevHGCMParmType_LinAddr:
524 if (pSrcParm->u.Pointer.size != 0)
525 {
526#ifdef USE_BOUNCH_BUFFERS
527 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
528#endif
529 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
530 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
531
532 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
533 {
534 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
535 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
536 size_t iPage;
537
538 pDstParm->type = VMMDevHGCMParmType_PageList;
539 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
540 pDstParm->u.PageList.offset = offExtra;
541 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
542#ifdef USE_BOUNCH_BUFFERS
543 if (fIsUser)
544 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
545 else
546#endif
547 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
548 pDstPgLst->cPages = cPages; Assert(pDstPgLst->cPages == cPages);
549 for (iPage = 0; iPage < cPages; iPage++)
550 {
551 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
552 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
553 }
554
555 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
556 }
557 else
558 {
559 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
560 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
561#ifdef USE_BOUNCH_BUFFERS
562 if (fIsUser)
563 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
564 ? (uintptr_t)pvSmallBuf
565 : (uintptr_t)RTR0MemObjAddress(hObj);
566 else
567#endif
568 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
569 }
570 iLockBuf++;
571 }
572 else
573 {
574 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
575 pDstParm->u.Pointer.size = 0;
576 pDstParm->u.Pointer.u.linearAddr = 0;
577 }
578 break;
579
580 default:
581 AssertFailed();
582 pDstParm->type = VMMDevHGCMParmType_Invalid;
583 break;
584 }
585 }
586}
587
588
589/**
590 * Performs the call and completion wait.
591 *
592 * @returns VBox status code of this operation, not necessarily the call.
593 *
594 * @param pHGCMCall The HGCM call info.
595 * @param pfnAsyncCallback The async callback that will wait for the call
596 * to complete.
597 * @param pvAsyncData Argument for the callback.
598 * @param u32AsyncData Argument for the callback.
599 */
600static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, VBGLHGCMCALLBACK *pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
601{
602 int rc;
603
604 Log(("calling VbglGRPerform\n"));
605 rc = VbglGRPerform(&pHGCMCall->header.header);
606 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
607
608 /*
609 * If the call failed, but as a result of the request itself, then pretend
610 * success. Upper layers will interpret the result code in the packet.
611 */
612 if ( RT_FAILURE(rc)
613 && rc == pHGCMCall->header.result)
614 {
615 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
616 rc = VINF_SUCCESS;
617 }
618
619 /*
620 * Check if host decides to process the request asynchronously,
621 * if so, we wait for it to complete using the caller supplied callback.
622 */
623 if ( RT_SUCCESS(rc)
624 && rc == VINF_HGCM_ASYNC_EXECUTE)
625 {
626 Log(("Processing HGCM call asynchronously\n"));
627 /** @todo timeout vs. interrupted. */
628 pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
629
630 /*
631 * If the request isn't completed by the time the callback returns
632 * we will have to try cancel it.
633 */
634 if (!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE))
635 {
636 /** @todo use a new request for this! See @bugref{4052}. */
637 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
638 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
639 VbglGRPerform(&pHGCMCall->header.header);
640 rc = VERR_INTERRUPTED;
641 }
642 }
643
644 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x\n", rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags));
645 return rc;
646}
647
648
649/**
650 * Copies the result of the call back to the caller info structure and user
651 * buffers (if using bounce buffers).
652 *
653 * @returns rc, unless RTR0MemUserCopyTo fails.
654 * @param pCallInfo Call info structure to update.
655 * @param pHGCMCall HGCM call request.
656 * @param pParmInfo Paramter locking/buffering info.
657 * @param fIsUser Is it a user (true) or kernel request.
658 * @param rc The current result code. Passed along to
659 * preserve informational status codes.
660 */
661static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo *pCallInfo, VMMDevHGCMCall const *pHGCMCall,
662 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
663{
664 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
665 HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
666 uint32_t cParms = pCallInfo->cParms;
667#ifdef USE_BOUNCH_BUFFERS
668 uint32_t iLockBuf = 0;
669#endif
670 uint32_t iParm;
671
672 /*
673 * The call result.
674 */
675 pCallInfo->result = pHGCMCall->header.result;
676
677 /*
678 * Copy back parameters.
679 */
680 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
681 {
682 switch (pDstParm->type)
683 {
684 case VMMDevHGCMParmType_32bit:
685 case VMMDevHGCMParmType_64bit:
686 *pDstParm = *pSrcParm;
687 break;
688
689 case VMMDevHGCMParmType_PageList:
690 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
691 break;
692
693 case VMMDevHGCMParmType_LinAddr_Locked_In:
694 case VMMDevHGCMParmType_LinAddr_In:
695#ifdef USE_BOUNCH_BUFFERS
696 if ( fIsUser
697 && iLockBuf < pParmInfo->cLockBufs
698 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
699 iLockBuf++;
700#endif
701 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
702 break;
703
704 case VMMDevHGCMParmType_LinAddr_Locked_Out:
705 case VMMDevHGCMParmType_LinAddr_Locked:
706 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
707 {
708 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
709 break;
710 }
711 /* fall thru */
712
713 case VMMDevHGCMParmType_LinAddr_Out:
714 case VMMDevHGCMParmType_LinAddr:
715 {
716#ifdef USE_BOUNCH_BUFFERS
717 if (fIsUser)
718 {
719 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
720 if (cbOut)
721 {
722 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
723 int rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
724 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
725 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
726 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
727 cbOut);
728 if (RT_FAILURE(rc2))
729 return rc2;
730 iLockBuf++;
731 }
732 else if ( iLockBuf < pParmInfo->cLockBufs
733 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
734 iLockBuf++;
735 }
736#endif
737 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
738 break;
739 }
740
741 default:
742 AssertFailed();
743 rc = VERR_INTERNAL_ERROR_4;
744 break;
745 }
746 }
747
748#ifdef USE_BOUNCH_BUFFERS
749 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
750#endif
751 return rc;
752}
753
754DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
755 VBGLHGCMCALLBACK *pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
756{
757 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
758 struct VbglR0ParmInfo ParmInfo;
759 size_t cbExtra;
760 int rc;
761
762 /*
763 * Basic validation.
764 */
765 AssertMsgReturn(!pCallInfo || !pfnAsyncCallback || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
766 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
767 VERR_INVALID_PARAMETER);
768 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
769 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
770 VERR_INVALID_PARAMETER);
771
772 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
773 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
774
775 /*
776 * Validate, lock and buffer the parameters for the call.
777 * This will calculate the amount of extra space for physical page list.
778 */
779 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
780 if (RT_SUCCESS(rc))
781 {
782 /*
783 * Allocate the request buffer and recreate the call request.
784 */
785 VMMDevHGCMCall *pHGCMCall;
786 rc = VbglGRAlloc((VMMDevRequestHeader **)&pHGCMCall,
787 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
788 VMMDevReq_HGCMCall);
789 if (RT_SUCCESS(rc))
790 {
791 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
792
793 /*
794 * Perform the call.
795 */
796 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData);
797 if (RT_SUCCESS(rc))
798 {
799 /*
800 * Copy back the result (parameters and buffers that changed).
801 */
802 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
803 }
804
805 VbglGRFree(&pHGCMCall->header.header);
806 }
807 }
808
809 /*
810 * Release locks and free bounce buffers.
811 */
812 if (ParmInfo.cLockBufs)
813 while (ParmInfo.cLockBufs-- > 0)
814 {
815 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
816#ifdef USE_BOUNCH_BUFFERS
817 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
818#endif
819 }
820
821 return rc;
822}
823
824
825# if ARCH_BITS == 64
826DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
827 VBGLHGCMCALLBACK *pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
828{
829 VBoxGuestHGCMCallInfo *pCallInfo64;
830 HGCMFunctionParameter *pParm64;
831 HGCMFunctionParameter32 *pParm32;
832 uint32_t cParms;
833 uint32_t iParm;
834 int rc;
835
836 /*
837 * Input validation.
838 */
839 AssertMsgReturn(!pCallInfo || !pfnAsyncCallback || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
840 ("pCallInfo=%p pAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
841 VERR_INVALID_PARAMETER);
842 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
843 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
844 VERR_INVALID_PARAMETER);
845 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_INVALID_PARAMETER);
846
847 cParms = pCallInfo->cParms;
848 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
849
850 /*
851 * The simple approach, allocate a temporary request and convert the parameters.
852 */
853 pCallInfo64 = (VBoxGuestHGCMCallInfo *)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
854 if (!pCallInfo64)
855 return VERR_NO_TMP_MEMORY;
856
857 *pCallInfo64 = *pCallInfo;
858 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
859 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
860 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
861 {
862 switch (pParm32->type)
863 {
864 case VMMDevHGCMParmType_32bit:
865 pParm64->type = VMMDevHGCMParmType_32bit;
866 pParm64->u.value32 = pParm32->u.value32;
867 break;
868
869 case VMMDevHGCMParmType_64bit:
870 pParm64->type = VMMDevHGCMParmType_64bit;
871 pParm64->u.value64 = pParm32->u.value64;
872 break;
873
874 case VMMDevHGCMParmType_LinAddr_Out:
875 case VMMDevHGCMParmType_LinAddr:
876 case VMMDevHGCMParmType_LinAddr_In:
877 pParm64->type = pParm32->type;
878 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
879 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
880 break;
881
882 default:
883 rc = VERR_INVALID_PARAMETER;
884 break;
885 }
886 if (RT_FAILURE(rc))
887 break;
888 }
889 if (RT_SUCCESS(rc))
890 {
891 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
892 pfnAsyncCallback, pvAsyncData, u32AsyncData);
893
894 /*
895 * Copy back.
896 */
897 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
898 {
899 switch (pParm32->type)
900 {
901 case VMMDevHGCMParmType_32bit:
902 pParm32->u.value32 = pParm32->u.value32;
903 break;
904
905 case VMMDevHGCMParmType_64bit:
906 pParm32->u.value64 = pParm64->u.value64;
907 break;
908
909 case VMMDevHGCMParmType_LinAddr_Out:
910 case VMMDevHGCMParmType_LinAddr:
911 case VMMDevHGCMParmType_LinAddr_In:
912 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
913 break;
914
915 default:
916 rc = VERR_INTERNAL_ERROR_3;
917 break;
918 }
919 }
920 *pCallInfo = *pCallInfo64;
921 }
922
923 RTMemTmpFree(pCallInfo64);
924 return rc;
925}
926# endif /* ARCH_BITS == 64 */
927
928# else /* old code: */
929
930/** @todo merge with the one below (use a header file). Too lazy now. */
931DECLR0VBGL(int) VbglR0HGCMInternalCall (VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
932 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
933{
934 VMMDevHGCMCall *pHGCMCall;
935 uint32_t cbParms;
936 HGCMFunctionParameter *pParm;
937 unsigned iParm;
938 int rc;
939
940 AssertMsgReturn(!pCallInfo || !pAsyncCallback || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
941 ("pCallInfo=%p pAsyncCallback=%p fFlags=%#x\n", pCallInfo, pAsyncCallback, fFlags),
942 VERR_INVALID_PARAMETER);
943
944 Log (("GstHGCMCall: pCallInfo->cParms = %d, pHGCMCall->u32Function = %d, fFlags=%#x\n",
945 pCallInfo->cParms, pCallInfo->u32Function, fFlags));
946
947 pHGCMCall = NULL;
948
949 if (cbCallInfo == 0)
950 {
951 /* Caller did not specify the size (a valid value should be at least sizeof(VBoxGuestHGCMCallInfo)).
952 * Compute the size.
953 */
954 cbParms = pCallInfo->cParms * sizeof (HGCMFunctionParameter);
955 }
956 else if (cbCallInfo < sizeof (VBoxGuestHGCMCallInfo))
957 {
958 return VERR_INVALID_PARAMETER;
959 }
960 else
961 {
962 cbParms = cbCallInfo - sizeof (VBoxGuestHGCMCallInfo);
963 }
964
965 /* Allocate request */
966 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMCall, sizeof (VMMDevHGCMCall) + cbParms, VMMDevReq_HGCMCall);
967
968 Log (("GstHGCMCall: Allocated gr %p, rc = %Rrc, cbParms = %d\n", pHGCMCall, rc, cbParms));
969
970 if (RT_SUCCESS(rc))
971 {
972 void *apvCtx[VBOX_HGCM_MAX_PARMS];
973 memset (apvCtx, 0, sizeof(void *) * pCallInfo->cParms);
974
975 /* Initialize request memory */
976 pHGCMCall->header.fu32Flags = 0;
977 pHGCMCall->header.result = VINF_SUCCESS;
978
979 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
980 pHGCMCall->u32Function = pCallInfo->u32Function;
981 pHGCMCall->cParms = pCallInfo->cParms;
982
983 if (cbParms)
984 {
985 /* Lock user buffers. */
986 pParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
987
988 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
989 {
990 switch (pParm->type)
991 {
992 case VMMDevHGCMParmType_32bit:
993 case VMMDevHGCMParmType_64bit:
994 break;
995
996 case VMMDevHGCMParmType_LinAddr_Locked_In:
997 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
998 rc = VERR_INVALID_PARAMETER;
999 else
1000 pParm->type = VMMDevHGCMParmType_LinAddr_In;
1001 break;
1002 case VMMDevHGCMParmType_LinAddr_Locked_Out:
1003 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1004 rc = VERR_INVALID_PARAMETER;
1005 else
1006 pParm->type = VMMDevHGCMParmType_LinAddr_Out;
1007 break;
1008 case VMMDevHGCMParmType_LinAddr_Locked:
1009 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1010 rc = VERR_INVALID_PARAMETER;
1011 else
1012 pParm->type = VMMDevHGCMParmType_LinAddr;
1013 break;
1014
1015 case VMMDevHGCMParmType_PageList:
1016 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1017 rc = VERR_INVALID_PARAMETER;
1018 break;
1019
1020 case VMMDevHGCMParmType_LinAddr_In:
1021 case VMMDevHGCMParmType_LinAddr_Out:
1022 case VMMDevHGCMParmType_LinAddr:
1023 /* PORTME: When porting this to Darwin and other systems where the entire kernel isn't mapped
1024 into every process, all linear address will have to be converted to physical SG lists at
1025 this point. Care must also be taken on these guests to not mix kernel and user addresses
1026 in HGCM calls, or we'll end up locking the wrong memory. If VMMDev/HGCM gets a linear address
1027 it will assume that it's in the current memory context (i.e. use CR3 to translate it).
1028
1029 These kind of problems actually applies to some patched linux kernels too, including older
1030 fedora releases. (The patch is the infamous 4G/4G patch, aka 4g4g, by Ingo Molnar.) */
1031 rc = vbglLockLinear (&apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size,
1032 (pParm->type == VMMDevHGCMParmType_LinAddr_In) ? false : true /* write access */,
1033 fFlags);
1034 break;
1035
1036 default:
1037 rc = VERR_INVALID_PARAMETER;
1038 break;
1039 }
1040 if (RT_FAILURE (rc))
1041 break;
1042 }
1043 memcpy (VMMDEV_HGCM_CALL_PARMS(pHGCMCall), VBOXGUEST_HGCM_CALL_PARMS(pCallInfo), cbParms);
1044 }
1045
1046 /* Check that the parameter locking was ok. */
1047 if (RT_SUCCESS(rc))
1048 {
1049 Log (("calling VbglGRPerform\n"));
1050
1051 /* Issue request */
1052 rc = VbglGRPerform (&pHGCMCall->header.header);
1053
1054 Log (("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
1055
1056 /** If the call failed, but as a result of the request itself, then pretend success
1057 * Upper layers will interpret the result code in the packet.
1058 */
1059 if (RT_FAILURE(rc) && rc == pHGCMCall->header.result)
1060 {
1061 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
1062 rc = VINF_SUCCESS;
1063 }
1064
1065 if (RT_SUCCESS(rc))
1066 {
1067 /* Check if host decides to process the request asynchronously. */
1068 if (rc == VINF_HGCM_ASYNC_EXECUTE)
1069 {
1070 /* Wait for request completion interrupt notification from host */
1071 Log (("Processing HGCM call asynchronously\n"));
1072 pAsyncCallback (&pHGCMCall->header, pvAsyncData, u32AsyncData);
1073 }
1074
1075 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
1076 {
1077 if (cbParms)
1078 {
1079 memcpy (VBOXGUEST_HGCM_CALL_PARMS(pCallInfo), VMMDEV_HGCM_CALL_PARMS(pHGCMCall), cbParms);
1080 }
1081 pCallInfo->result = pHGCMCall->header.result;
1082 }
1083 else
1084 {
1085 /* The callback returns without completing the request,
1086 * that means the wait was interrrupted. That can happen
1087 * if the request times out, the system reboots or the
1088 * VBoxService ended abnormally.
1089 *
1090 * Cancel the request, the host will not write to the
1091 * memory related to the cancelled request.
1092 */
1093 Log (("Cancelling HGCM call\n"));
1094 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
1095
1096 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
1097 VbglGRPerform (&pHGCMCall->header.header);
1098 }
1099 }
1100 }
1101
1102 /* Unlock user buffers. */
1103 pParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
1104
1105 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
1106 {
1107 if ( pParm->type == VMMDevHGCMParmType_LinAddr_In
1108 || pParm->type == VMMDevHGCMParmType_LinAddr_Out
1109 || pParm->type == VMMDevHGCMParmType_LinAddr)
1110 {
1111 if (apvCtx[iParm] != NULL)
1112 {
1113 vbglUnlockLinear (apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size);
1114 }
1115 }
1116 else
1117 Assert(!apvCtx[iParm]);
1118 }
1119
1120 if ((pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED) == 0)
1121 VbglGRFree (&pHGCMCall->header.header);
1122 else
1123 rc = VERR_INTERRUPTED;
1124 }
1125
1126 return rc;
1127}
1128
1129# if ARCH_BITS == 64
1130/** @todo merge with the one above (use a header file). Too lazy now. */
1131DECLR0VBGL(int) VbglR0HGCMInternalCall32 (VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
1132 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
1133{
1134 VMMDevHGCMCall *pHGCMCall;
1135 uint32_t cbParms;
1136 HGCMFunctionParameter32 *pParm;
1137 unsigned iParm;
1138 int rc;
1139
1140 AssertMsgReturn(!pCallInfo || !pAsyncCallback || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
1141 ("pCallInfo=%p pAsyncCallback=%p fFlags=%#x\n", pCallInfo, pAsyncCallback, fFlags),
1142 VERR_INVALID_PARAMETER);
1143
1144 Log (("GstHGCMCall32: pCallInfo->cParms = %d, pHGCMCall->u32Function = %d, fFlags=%#x\n",
1145 pCallInfo->cParms, pCallInfo->u32Function, fFlags));
1146
1147 pHGCMCall = NULL;
1148
1149 if (cbCallInfo == 0)
1150 {
1151 /* Caller did not specify the size (a valid value should be at least sizeof(VBoxGuestHGCMCallInfo)).
1152 * Compute the size.
1153 */
1154 cbParms = pCallInfo->cParms * sizeof (HGCMFunctionParameter32);
1155 }
1156 else if (cbCallInfo < sizeof (VBoxGuestHGCMCallInfo))
1157 {
1158 return VERR_INVALID_PARAMETER;
1159 }
1160 else
1161 {
1162 cbParms = cbCallInfo - sizeof (VBoxGuestHGCMCallInfo);
1163 }
1164
1165 /* Allocate request */
1166 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMCall, sizeof (VMMDevHGCMCall) + cbParms, VMMDevReq_HGCMCall32);
1167
1168 Log (("GstHGCMCall32: Allocated gr %p, rc = %Rrc, cbParms = %d\n", pHGCMCall, rc, cbParms));
1169
1170 if (RT_SUCCESS(rc))
1171 {
1172 void *apvCtx[VBOX_HGCM_MAX_PARMS];
1173 memset (apvCtx, 0, sizeof(void *) * pCallInfo->cParms);
1174
1175 /* Initialize request memory */
1176 pHGCMCall->header.fu32Flags = 0;
1177 pHGCMCall->header.result = VINF_SUCCESS;
1178
1179 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
1180 pHGCMCall->u32Function = pCallInfo->u32Function;
1181 pHGCMCall->cParms = pCallInfo->cParms;
1182
1183 if (cbParms)
1184 {
1185 /* Lock user buffers. */
1186 pParm = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
1187
1188 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
1189 {
1190 switch (pParm->type)
1191 {
1192 case VMMDevHGCMParmType_32bit:
1193 case VMMDevHGCMParmType_64bit:
1194 break;
1195
1196 case VMMDevHGCMParmType_LinAddr_Locked_In:
1197 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1198 rc = VERR_INVALID_PARAMETER;
1199 else
1200 pParm->type = VMMDevHGCMParmType_LinAddr_In;
1201 break;
1202 case VMMDevHGCMParmType_LinAddr_Locked_Out:
1203 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1204 rc = VERR_INVALID_PARAMETER;
1205 else
1206 pParm->type = VMMDevHGCMParmType_LinAddr_Out;
1207 break;
1208 case VMMDevHGCMParmType_LinAddr_Locked:
1209 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1210 rc = VERR_INVALID_PARAMETER;
1211 else
1212 pParm->type = VMMDevHGCMParmType_LinAddr;
1213 break;
1214
1215 case VMMDevHGCMParmType_PageList:
1216 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
1217 rc = VERR_INVALID_PARAMETER;
1218 break;
1219
1220 case VMMDevHGCMParmType_LinAddr_In:
1221 case VMMDevHGCMParmType_LinAddr_Out:
1222 case VMMDevHGCMParmType_LinAddr:
1223 /* PORTME: When porting this to Darwin and other systems where the entire kernel isn't mapped
1224 into every process, all linear address will have to be converted to physical SG lists at
1225 this point. Care must also be taken on these guests to not mix kernel and user addresses
1226 in HGCM calls, or we'll end up locking the wrong memory. If VMMDev/HGCM gets a linear address
1227 it will assume that it's in the current memory context (i.e. use CR3 to translate it).
1228
1229 These kind of problems actually applies to some patched linux kernels too, including older
1230 fedora releases. (The patch is the infamous 4G/4G patch, aka 4g4g, by Ingo Molnar.) */
1231 rc = vbglLockLinear (&apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size,
1232 (pParm->type == VMMDevHGCMParmType_LinAddr_In) ? false : true /* write access */,
1233 fFlags);
1234 break;
1235
1236 default:
1237 rc = VERR_INVALID_PARAMETER;
1238 break;
1239 }
1240 if (RT_FAILURE (rc))
1241 break;
1242 }
1243 memcpy (VMMDEV_HGCM_CALL_PARMS32(pHGCMCall), VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo), cbParms);
1244 }
1245
1246 /* Check that the parameter locking was ok. */
1247 if (RT_SUCCESS(rc))
1248 {
1249 Log (("calling VbglGRPerform\n"));
1250
1251 /* Issue request */
1252 rc = VbglGRPerform (&pHGCMCall->header.header);
1253
1254 Log (("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
1255
1256 /** If the call failed, but as a result of the request itself, then pretend success
1257 * Upper layers will interpret the result code in the packet.
1258 */
1259 if (RT_FAILURE(rc) && rc == pHGCMCall->header.result)
1260 {
1261 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
1262 rc = VINF_SUCCESS;
1263 }
1264
1265 if (RT_SUCCESS(rc))
1266 {
1267 /* Check if host decides to process the request asynchronously. */
1268 if (rc == VINF_HGCM_ASYNC_EXECUTE)
1269 {
1270 /* Wait for request completion interrupt notification from host */
1271 Log (("Processing HGCM call asynchronously\n"));
1272 pAsyncCallback (&pHGCMCall->header, pvAsyncData, u32AsyncData);
1273 }
1274
1275 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
1276 {
1277 if (cbParms)
1278 memcpy (VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo), VMMDEV_HGCM_CALL_PARMS32(pHGCMCall), cbParms);
1279
1280 pCallInfo->result = pHGCMCall->header.result;
1281 }
1282 else
1283 {
1284 /* The callback returns without completing the request,
1285 * that means the wait was interrrupted. That can happen
1286 * if the request times out, the system reboots or the
1287 * VBoxService ended abnormally.
1288 *
1289 * Cancel the request, the host will not write to the
1290 * memory related to the cancelled request.
1291 */
1292 Log (("Cancelling HGCM call\n"));
1293 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
1294
1295 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
1296 VbglGRPerform (&pHGCMCall->header.header);
1297 }
1298 }
1299 }
1300
1301 /* Unlock user buffers. */
1302 pParm = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
1303
1304 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
1305 {
1306 if ( pParm->type == VMMDevHGCMParmType_LinAddr_In
1307 || pParm->type == VMMDevHGCMParmType_LinAddr_Out
1308 || pParm->type == VMMDevHGCMParmType_LinAddr)
1309 {
1310 if (apvCtx[iParm] != NULL)
1311 {
1312 vbglUnlockLinear (apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size);
1313 }
1314 }
1315 else
1316 Assert(!apvCtx[iParm]);
1317 }
1318
1319 if ((pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED) == 0)
1320 VbglGRFree (&pHGCMCall->header.header);
1321 else
1322 rc = VERR_INTERRUPTED;
1323 }
1324
1325 return rc;
1326}
1327# endif /* ARCH_BITS == 64 */
1328
1329# endif /* old code */
1330
1331#endif /* VBGL_VBOXGUEST */
1332
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette