VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuestLib/HGCMInternal.cpp@ 21489

Last change on this file since 21489 was 21489, checked in by vboxsync, 15 years ago

VbglR0HGCMInternal.cpp: Enable the physical page list code everywhere.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.1 KB
Line 
1/* $Revision: 21489 $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/* Entire file is ifdef'ed with VBGL_VBOXGUEST */
23#ifdef VBGL_VBOXGUEST
24
25/*******************************************************************************
26* Header Files *
27*******************************************************************************/
28#include "VBGLInternal.h"
29#include <iprt/alloca.h>
30#include <iprt/assert.h>
31#include <iprt/mem.h>
32#include <iprt/memobj.h>
33#include <iprt/string.h>
34
35/*******************************************************************************
36* Defined Constants And Macros *
37*******************************************************************************/
38/** The max parameter buffer size for a user request. */
39#define VBGLR0_MAX_HGCM_USER_PARM _1M
40/** The max parameter buffer size for a kernel request. */
41#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
42#ifdef RT_OS_LINUX
43/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
44 * side effects. */
45# define USE_BOUNCH_BUFFERS
46#endif
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/**
52 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
53 */
54struct VbglR0ParmInfo
55{
56 uint32_t cLockBufs;
57 struct
58 {
59 uint32_t iParm;
60 RTR0MEMOBJ hObj;
61#ifdef USE_BOUNCH_BUFFERS
62 void *pvSmallBuf;
63#endif
64 } aLockBufs[10];
65};
66
67
68
69/* These functions can be only used by VBoxGuest. */
70
71DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
72 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData,
73 uint32_t u32AsyncData)
74{
75 VMMDevHGCMConnect *pHGCMConnect;
76 int rc;
77
78 if (!pConnectInfo || !pAsyncCallback)
79 return VERR_INVALID_PARAMETER;
80
81 pHGCMConnect = NULL;
82
83 /* Allocate request */
84 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
85
86 if (RT_SUCCESS(rc))
87 {
88 /* Initialize request memory */
89 pHGCMConnect->header.fu32Flags = 0;
90
91 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
92 pHGCMConnect->u32ClientID = 0;
93
94 /* Issue request */
95 rc = VbglGRPerform (&pHGCMConnect->header.header);
96
97 if (RT_SUCCESS(rc))
98 {
99 /* Check if host decides to process the request asynchronously. */
100 if (rc == VINF_HGCM_ASYNC_EXECUTE)
101 {
102 /* Wait for request completion interrupt notification from host */
103 pAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
104 }
105
106 pConnectInfo->result = pHGCMConnect->header.result;
107
108 if (RT_SUCCESS (pConnectInfo->result))
109 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
110 }
111
112 VbglGRFree (&pHGCMConnect->header.header);
113 }
114
115 return rc;
116}
117
118
119DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
120 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
121{
122 VMMDevHGCMDisconnect *pHGCMDisconnect;
123 int rc;
124
125 if (!pDisconnectInfo || !pAsyncCallback)
126 return VERR_INVALID_PARAMETER;
127
128 pHGCMDisconnect = NULL;
129
130 /* Allocate request */
131 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
132
133 if (RT_SUCCESS(rc))
134 {
135 /* Initialize request memory */
136 pHGCMDisconnect->header.fu32Flags = 0;
137
138 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
139
140 /* Issue request */
141 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
142
143 if (RT_SUCCESS(rc))
144 {
145 /* Check if host decides to process the request asynchronously. */
146 if (rc == VINF_HGCM_ASYNC_EXECUTE)
147 {
148 /* Wait for request completion interrupt notification from host */
149 pAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
150 }
151
152 pDisconnectInfo->result = pHGCMDisconnect->header.result;
153 }
154
155 VbglGRFree (&pHGCMDisconnect->header.header);
156 }
157
158 return rc;
159}
160
161
162/**
163 * Preprocesses the HGCM call, validating and locking/buffering parameters.
164 *
165 * @returns VBox status code.
166 *
167 * @param pCallInfo The call info.
168 * @param cbCallInfo The size of the call info structure.
169 * @param fIsUser Is it a user request or kernel request.
170 * @param pcbExtra Where to return the extra request space needed for
171 * physical page lists.
172 */
173static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
174 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
175{
176 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
177 uint32_t cParms = pCallInfo->cParms;
178 uint32_t iParm;
179 uint32_t cb;
180
181 /*
182 * Lock down the any linear buffers so we can get their addresses
183 * and figure out how much extra storage we need for page lists.
184 *
185 * Note! With kernel mode users we can be assertive. For user mode users
186 * we should just (debug) log it and fail without any fanfare.
187 */
188 *pcbExtra = 0;
189 pParmInfo->cLockBufs = 0;
190 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
191 {
192 switch (pSrcParm->type)
193 {
194 case VMMDevHGCMParmType_32bit:
195 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
196 break;
197
198 case VMMDevHGCMParmType_64bit:
199 Log4(("GstHGCMCall: parm=%u type=64bit: %#018x\n", iParm, pSrcParm->u.value64));
200 break;
201
202 case VMMDevHGCMParmType_PageList:
203 if (fIsUser)
204 return VERR_INVALID_PARAMETER;
205 cb = pSrcParm->u.PageList.size;
206 if (cb)
207 {
208 uint32_t off = pSrcParm->u.PageList.offset;
209 HGCMPageListInfo *pPgLst;
210 uint32_t cPages;
211 uint32_t u32;
212
213 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
214 VERR_OUT_OF_RANGE);
215 AssertMsgReturn( off >= pCallInfo->cParms * sizeof(HGCMFunctionParameter)
216 && off < cbCallInfo - sizeof(HGCMPageListInfo),
217 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, pCallInfo->cParms, cbCallInfo),
218 VERR_INVALID_PARAMETER);
219
220 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
221 cPages = pPgLst->cPages;
222 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
223 AssertMsgReturn(u32 <= cbCallInfo,
224 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
225 VERR_INVALID_PARAMETER);
226 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
227 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
228 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
229 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
230 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
231 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
232 u32 = cPages;
233 while (u32-- > 0)
234 {
235 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
236 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
237 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
238 VERR_INVALID_PARAMETER);
239 }
240
241 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
242 }
243 else
244 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
245 break;
246
247 case VMMDevHGCMParmType_LinAddr_Locked_In:
248 case VMMDevHGCMParmType_LinAddr_Locked_Out:
249 case VMMDevHGCMParmType_LinAddr_Locked:
250 if (fIsUser)
251 return VERR_INVALID_PARAMETER;
252 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
253 {
254 cb = pSrcParm->u.Pointer.size;
255 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
256 VERR_OUT_OF_RANGE);
257 if (cb != 0)
258 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
259 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
260 else
261 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
262 break;
263 }
264 /* fall thru */
265
266 case VMMDevHGCMParmType_LinAddr_In:
267 case VMMDevHGCMParmType_LinAddr_Out:
268 case VMMDevHGCMParmType_LinAddr:
269 cb = pSrcParm->u.Pointer.size;
270 if (cb != 0)
271 {
272#ifdef USE_BOUNCH_BUFFERS
273 void *pvSmallBuf = NULL;
274#endif
275 uint32_t iLockBuf = pParmInfo->cLockBufs;
276 RTR0MEMOBJ hObj;
277 int rc;
278
279 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
280 if (!fIsUser)
281 {
282 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
283 VERR_OUT_OF_RANGE);
284 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb);
285 if (RT_FAILURE(rc))
286 {
287 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
288 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
289 return rc;
290 }
291 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
292 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
293 }
294 else
295 {
296 if (cb > VBGLR0_MAX_HGCM_USER_PARM)
297 {
298 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
299 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
300 cb, VBGLR0_MAX_HGCM_USER_PARM));
301 return VERR_OUT_OF_RANGE;
302 }
303
304#ifndef USE_BOUNCH_BUFFERS
305 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, NIL_RTR0PROCESS);
306 if (RT_FAILURE(rc))
307 {
308 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
309 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
310 return rc;
311 }
312 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
313 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
314
315#else /* USE_BOUNCH_BUFFERS */
316 /*
317 * This is a bit massive, but we don't want to waste a
318 * whole page for a 3 byte string buffer (guest props).
319 *
320 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
321 * the system is using some power of two allocator.
322 */
323 /** @todo A more efficient strategy would be to combine buffers. However it
324 * is probably going to be more massive than the current code, so
325 * it can wait till later. */
326 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
327 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
328 if (cb <= PAGE_SIZE / 2 - 16)
329 {
330 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
331 if (RT_UNLIKELY(!pvSmallBuf))
332 return VERR_NO_MEMORY;
333 if (fCopyIn)
334 {
335 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
336 if (RT_FAILURE(rc))
337 {
338 RTMemTmpFree(pvSmallBuf);
339 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
340 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
341 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
342 return rc;
343 }
344 }
345 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb);
346 if (RT_FAILURE(rc))
347 {
348 RTMemTmpFree(pvSmallBuf);
349 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
350 rc, pvSmallBuf, cb));
351 return rc;
352 }
353 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
354 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
355 }
356 else
357 {
358 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
359 if (RT_FAILURE(rc))
360 return rc;
361 if (!fCopyIn)
362 memset(RTR0MemObjAddress(hObj), '\0', cb);
363 else
364 {
365 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
366 if (RT_FAILURE(rc))
367 {
368 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
369 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
370 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
371 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
372 return rc;
373 }
374 }
375 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
376 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
377 }
378#endif /* USE_BOUNCH_BUFFERS */
379 }
380
381 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
382 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
383#ifdef USE_BOUNCH_BUFFERS
384 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
385#endif
386 pParmInfo->cLockBufs = iLockBuf + 1;
387
388 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
389 {
390 size_t cPages = RTR0MemObjSize(hObj);
391 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
392 }
393 }
394 else
395 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
396 break;
397
398 default:
399 return VERR_INVALID_PARAMETER;
400 }
401 }
402
403 return VINF_SUCCESS;
404}
405
406
407/**
408 * Translates locked linear address to the normal type.
409 * The locked types are only for the guest side and not handled by the host.
410 *
411 * @returns normal linear address type.
412 * @param enmType The type.
413 */
414static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
415{
416 switch (enmType)
417 {
418 case VMMDevHGCMParmType_LinAddr_Locked_In:
419 return VMMDevHGCMParmType_LinAddr_In;
420 case VMMDevHGCMParmType_LinAddr_Locked_Out:
421 return VMMDevHGCMParmType_LinAddr_Out;
422 case VMMDevHGCMParmType_LinAddr_Locked:
423 return VMMDevHGCMParmType_LinAddr;
424 default:
425 return enmType;
426 }
427}
428
429
430/**
431 * Translates linear address types to page list direction flags.
432 *
433 * @returns page list flags.
434 * @param enmType The type.
435 */
436static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
437{
438 switch (enmType)
439 {
440 case VMMDevHGCMParmType_LinAddr_In:
441 case VMMDevHGCMParmType_LinAddr_Locked_In:
442 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
443
444 case VMMDevHGCMParmType_LinAddr_Out:
445 case VMMDevHGCMParmType_LinAddr_Locked_Out:
446 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
447
448 default: AssertFailed();
449 case VMMDevHGCMParmType_LinAddr:
450 case VMMDevHGCMParmType_LinAddr_Locked:
451 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
452 }
453}
454
455
456/**
457 * Initializes the call request that we're sending to the host.
458 *
459 * @returns VBox status code.
460 *
461 * @param pCallInfo The call info.
462 * @param cbCallInfo The size of the call info structure.
463 * @param fIsUser Is it a user request or kernel request.
464 * @param pcbExtra Where to return the extra request space needed for
465 * physical page lists.
466 */
467static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, VBoxGuestHGCMCallInfo const *pCallInfo,
468 uint32_t cbCallInfo, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
469{
470 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
471 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
472 uint32_t cParms = pCallInfo->cParms;
473 uint32_t offExtra = (uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall;
474 uint32_t iLockBuf = 0;
475 uint32_t iParm;
476
477
478 /*
479 * The call request headers.
480 */
481 pHGCMCall->header.fu32Flags = 0;
482 pHGCMCall->header.result = VINF_SUCCESS;
483
484 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
485 pHGCMCall->u32Function = pCallInfo->u32Function;
486 pHGCMCall->cParms = cParms;
487
488 /*
489 * The parameters.
490 */
491 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
492 {
493 switch (pSrcParm->type)
494 {
495 case VMMDevHGCMParmType_32bit:
496 case VMMDevHGCMParmType_64bit:
497 *pDstParm = *pSrcParm;
498 break;
499
500 case VMMDevHGCMParmType_PageList:
501 pDstParm->type = VMMDevHGCMParmType_PageList;
502 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
503 if (pSrcParm->u.PageList.size)
504 {
505 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
506 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
507 uint32_t const cPages = pSrcPgLst->cPages;
508 uint32_t iPage;
509
510 pDstParm->u.PageList.offset = offExtra;
511 pDstPgLst->flags = pSrcPgLst->flags;
512 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
513 pDstPgLst->cPages = cPages;
514 for (iPage = 0; iPage < cPages; iPage++)
515 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
516
517 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
518 }
519 else
520 pDstParm->u.PageList.offset = 0;
521 break;
522
523 case VMMDevHGCMParmType_LinAddr_Locked_In:
524 case VMMDevHGCMParmType_LinAddr_Locked_Out:
525 case VMMDevHGCMParmType_LinAddr_Locked:
526 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
527 {
528 *pDstParm = *pSrcParm;
529 break;
530 }
531 /* fall thru */
532
533 case VMMDevHGCMParmType_LinAddr_In:
534 case VMMDevHGCMParmType_LinAddr_Out:
535 case VMMDevHGCMParmType_LinAddr:
536 if (pSrcParm->u.Pointer.size != 0)
537 {
538#ifdef USE_BOUNCH_BUFFERS
539 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
540#endif
541 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
542 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
543
544 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
545 {
546 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
547 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
548 size_t iPage;
549
550 pDstParm->type = VMMDevHGCMParmType_PageList;
551 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
552 pDstParm->u.PageList.offset = offExtra;
553 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
554#ifdef USE_BOUNCH_BUFFERS
555 if (fIsUser)
556 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
557 else
558#endif
559 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
560 pDstPgLst->cPages = cPages; Assert(pDstPgLst->cPages == cPages);
561 for (iPage = 0; iPage < cPages; iPage++)
562 {
563 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
564 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
565 }
566
567 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
568 }
569 else
570 {
571 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
572 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
573#ifdef USE_BOUNCH_BUFFERS
574 if (fIsUser)
575 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
576 ? (uintptr_t)pvSmallBuf
577 : (uintptr_t)RTR0MemObjAddress(hObj);
578 else
579#endif
580 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
581 }
582 iLockBuf++;
583 }
584 else
585 {
586 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
587 pDstParm->u.Pointer.size = 0;
588 pDstParm->u.Pointer.u.linearAddr = 0;
589 }
590 break;
591
592 default:
593 AssertFailed();
594 pDstParm->type = VMMDevHGCMParmType_Invalid;
595 break;
596 }
597 }
598}
599
600
601/**
602 * Performs the call and completion wait.
603 *
604 * @returns VBox status code of this operation, not necessarily the call.
605 *
606 * @param pHGCMCall The HGCM call info.
607 * @param pfnAsyncCallback The async callback that will wait for the call
608 * to complete.
609 * @param pvAsyncData Argument for the callback.
610 * @param u32AsyncData Argument for the callback.
611 */
612static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, VBGLHGCMCALLBACK *pfnAsyncCallback,
613 void *pvAsyncData, uint32_t u32AsyncData)
614{
615 int rc;
616
617 Log(("calling VbglGRPerform\n"));
618 rc = VbglGRPerform(&pHGCMCall->header.header);
619 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
620
621 /*
622 * If the call failed, but as a result of the request itself, then pretend
623 * success. Upper layers will interpret the result code in the packet.
624 */
625 if ( RT_FAILURE(rc)
626 && rc == pHGCMCall->header.result)
627 {
628 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
629 rc = VINF_SUCCESS;
630 }
631
632 /*
633 * Check if host decides to process the request asynchronously,
634 * if so, we wait for it to complete using the caller supplied callback.
635 */
636 if ( RT_SUCCESS(rc)
637 && rc == VINF_HGCM_ASYNC_EXECUTE)
638 {
639 Log(("Processing HGCM call asynchronously\n"));
640 /** @todo timeout vs. interrupted. */
641 pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
642
643 /*
644 * If the request isn't completed by the time the callback returns
645 * we will have to try cancel it.
646 */
647 if (!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE))
648 {
649 /** @todo use a new request for this! See @bugref{4052}. */
650 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
651 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
652 VbglGRPerform(&pHGCMCall->header.header);
653 rc = VERR_INTERRUPTED;
654 }
655 }
656
657 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x\n", rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags));
658 return rc;
659}
660
661
662/**
663 * Copies the result of the call back to the caller info structure and user
664 * buffers (if using bounce buffers).
665 *
666 * @returns rc, unless RTR0MemUserCopyTo fails.
667 * @param pCallInfo Call info structure to update.
668 * @param pHGCMCall HGCM call request.
669 * @param pParmInfo Paramter locking/buffering info.
670 * @param fIsUser Is it a user (true) or kernel request.
671 * @param rc The current result code. Passed along to
672 * preserve informational status codes.
673 */
674static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo *pCallInfo, VMMDevHGCMCall const *pHGCMCall,
675 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
676{
677 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
678 HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
679 uint32_t cParms = pCallInfo->cParms;
680#ifdef USE_BOUNCH_BUFFERS
681 uint32_t iLockBuf = 0;
682#endif
683 uint32_t iParm;
684
685 /*
686 * The call result.
687 */
688 pCallInfo->result = pHGCMCall->header.result;
689
690 /*
691 * Copy back parameters.
692 */
693 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
694 {
695 switch (pDstParm->type)
696 {
697 case VMMDevHGCMParmType_32bit:
698 case VMMDevHGCMParmType_64bit:
699 *pDstParm = *pSrcParm;
700 break;
701
702 case VMMDevHGCMParmType_PageList:
703 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
704 break;
705
706 case VMMDevHGCMParmType_LinAddr_Locked_In:
707 case VMMDevHGCMParmType_LinAddr_In:
708#ifdef USE_BOUNCH_BUFFERS
709 if ( fIsUser
710 && iLockBuf < pParmInfo->cLockBufs
711 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
712 iLockBuf++;
713#endif
714 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
715 break;
716
717 case VMMDevHGCMParmType_LinAddr_Locked_Out:
718 case VMMDevHGCMParmType_LinAddr_Locked:
719 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
720 {
721 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
722 break;
723 }
724 /* fall thru */
725
726 case VMMDevHGCMParmType_LinAddr_Out:
727 case VMMDevHGCMParmType_LinAddr:
728 {
729#ifdef USE_BOUNCH_BUFFERS
730 if (fIsUser)
731 {
732 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
733 if (cbOut)
734 {
735 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
736 int rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
737 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
738 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
739 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
740 cbOut);
741 if (RT_FAILURE(rc2))
742 return rc2;
743 iLockBuf++;
744 }
745 else if ( iLockBuf < pParmInfo->cLockBufs
746 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
747 iLockBuf++;
748 }
749#endif
750 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
751 break;
752 }
753
754 default:
755 AssertFailed();
756 rc = VERR_INTERNAL_ERROR_4;
757 break;
758 }
759 }
760
761#ifdef USE_BOUNCH_BUFFERS
762 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
763#endif
764 return rc;
765}
766
767
768DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
769 VBGLHGCMCALLBACK *pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
770{
771 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
772 struct VbglR0ParmInfo ParmInfo;
773 size_t cbExtra;
774 int rc;
775
776 /*
777 * Basic validation.
778 */
779 AssertMsgReturn( !pCallInfo
780 || !pfnAsyncCallback
781 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
782 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
783 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
784 VERR_INVALID_PARAMETER);
785 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
786 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
787 VERR_INVALID_PARAMETER);
788
789 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
790 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
791
792 /*
793 * Validate, lock and buffer the parameters for the call.
794 * This will calculate the amount of extra space for physical page list.
795 */
796 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
797 if (RT_SUCCESS(rc))
798 {
799 /*
800 * Allocate the request buffer and recreate the call request.
801 */
802 VMMDevHGCMCall *pHGCMCall;
803 rc = VbglGRAlloc((VMMDevRequestHeader **)&pHGCMCall,
804 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
805 VMMDevReq_HGCMCall);
806 if (RT_SUCCESS(rc))
807 {
808 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
809
810 /*
811 * Perform the call.
812 */
813 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData);
814 if (RT_SUCCESS(rc))
815 {
816 /*
817 * Copy back the result (parameters and buffers that changed).
818 */
819 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
820 }
821
822 VbglGRFree(&pHGCMCall->header.header);
823 }
824 }
825
826 /*
827 * Release locks and free bounce buffers.
828 */
829 if (ParmInfo.cLockBufs)
830 while (ParmInfo.cLockBufs-- > 0)
831 {
832 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
833#ifdef USE_BOUNCH_BUFFERS
834 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
835#endif
836 }
837
838 return rc;
839}
840
841
842#if ARCH_BITS == 64
843DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
844 VBGLHGCMCALLBACK *pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
845{
846 VBoxGuestHGCMCallInfo *pCallInfo64;
847 HGCMFunctionParameter *pParm64;
848 HGCMFunctionParameter32 *pParm32;
849 uint32_t cParms;
850 uint32_t iParm;
851 int rc;
852
853 /*
854 * Input validation.
855 */
856 AssertMsgReturn( !pCallInfo
857 || !pfnAsyncCallback
858 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
859 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
860 ("pCallInfo=%p pAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
861 VERR_INVALID_PARAMETER);
862 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
863 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
864 VERR_INVALID_PARAMETER);
865 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_INVALID_PARAMETER);
866
867 cParms = pCallInfo->cParms;
868 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
869
870 /*
871 * The simple approach, allocate a temporary request and convert the parameters.
872 */
873 pCallInfo64 = (VBoxGuestHGCMCallInfo *)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
874 if (!pCallInfo64)
875 return VERR_NO_TMP_MEMORY;
876
877 *pCallInfo64 = *pCallInfo;
878 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
879 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
880 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
881 {
882 switch (pParm32->type)
883 {
884 case VMMDevHGCMParmType_32bit:
885 pParm64->type = VMMDevHGCMParmType_32bit;
886 pParm64->u.value32 = pParm32->u.value32;
887 break;
888
889 case VMMDevHGCMParmType_64bit:
890 pParm64->type = VMMDevHGCMParmType_64bit;
891 pParm64->u.value64 = pParm32->u.value64;
892 break;
893
894 case VMMDevHGCMParmType_LinAddr_Out:
895 case VMMDevHGCMParmType_LinAddr:
896 case VMMDevHGCMParmType_LinAddr_In:
897 pParm64->type = pParm32->type;
898 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
899 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
900 break;
901
902 default:
903 rc = VERR_INVALID_PARAMETER;
904 break;
905 }
906 if (RT_FAILURE(rc))
907 break;
908 }
909 if (RT_SUCCESS(rc))
910 {
911 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
912 pfnAsyncCallback, pvAsyncData, u32AsyncData);
913
914 /*
915 * Copy back.
916 */
917 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
918 {
919 switch (pParm32->type)
920 {
921 case VMMDevHGCMParmType_32bit:
922 pParm32->u.value32 = pParm32->u.value32;
923 break;
924
925 case VMMDevHGCMParmType_64bit:
926 pParm32->u.value64 = pParm64->u.value64;
927 break;
928
929 case VMMDevHGCMParmType_LinAddr_Out:
930 case VMMDevHGCMParmType_LinAddr:
931 case VMMDevHGCMParmType_LinAddr_In:
932 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
933 break;
934
935 default:
936 rc = VERR_INTERNAL_ERROR_3;
937 break;
938 }
939 }
940 *pCallInfo = *pCallInfo64;
941 }
942
943 RTMemTmpFree(pCallInfo64);
944 return rc;
945}
946#endif /* ARCH_BITS == 64 */
947
948#endif /* VBGL_VBOXGUEST */
949
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette