VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuestLib/HGCMInternal.cpp@ 21268

Last change on this file since 21268 was 21268, checked in by vboxsync, 16 years ago

HGCMInternal.cpp,SysHlp.cpp: use the kernel/user mode indicator.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.5 KB
Line 
1/* $Revision: 21268 $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/* Entire file is ifdef'ed with VBGL_VBOXGUEST */
23#ifdef VBGL_VBOXGUEST
24
25#include "VBGLInternal.h"
26#include <iprt/string.h>
27#include <iprt/assert.h>
28#include <iprt/alloca.h>
29
30/* These functions can be only used by VBoxGuest. */
31
32DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
33 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData,
34 uint32_t u32AsyncData)
35{
36 VMMDevHGCMConnect *pHGCMConnect;
37 int rc;
38
39 if (!pConnectInfo || !pAsyncCallback)
40 return VERR_INVALID_PARAMETER;
41
42 pHGCMConnect = NULL;
43
44 /* Allocate request */
45 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
46
47 if (RT_SUCCESS(rc))
48 {
49 /* Initialize request memory */
50 pHGCMConnect->header.fu32Flags = 0;
51
52 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
53 pHGCMConnect->u32ClientID = 0;
54
55 /* Issue request */
56 rc = VbglGRPerform (&pHGCMConnect->header.header);
57
58 if (RT_SUCCESS(rc))
59 {
60 /* Check if host decides to process the request asynchronously. */
61 if (rc == VINF_HGCM_ASYNC_EXECUTE)
62 {
63 /* Wait for request completion interrupt notification from host */
64 pAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
65 }
66
67 pConnectInfo->result = pHGCMConnect->header.result;
68
69 if (RT_SUCCESS (pConnectInfo->result))
70 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
71 }
72
73 VbglGRFree (&pHGCMConnect->header.header);
74 }
75
76 return rc;
77}
78
79
80DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
81 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
82{
83 VMMDevHGCMDisconnect *pHGCMDisconnect;
84 int rc;
85
86 if (!pDisconnectInfo || !pAsyncCallback)
87 return VERR_INVALID_PARAMETER;
88
89 pHGCMDisconnect = NULL;
90
91 /* Allocate request */
92 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
93
94 if (RT_SUCCESS(rc))
95 {
96 /* Initialize request memory */
97 pHGCMDisconnect->header.fu32Flags = 0;
98
99 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
100
101 /* Issue request */
102 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
103
104 if (RT_SUCCESS(rc))
105 {
106 /* Check if host decides to process the request asynchronously. */
107 if (rc == VINF_HGCM_ASYNC_EXECUTE)
108 {
109 /* Wait for request completion interrupt notification from host */
110 pAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
111 }
112
113 pDisconnectInfo->result = pHGCMDisconnect->header.result;
114 }
115
116 VbglGRFree (&pHGCMDisconnect->header.header);
117 }
118
119 return rc;
120}
121
122
123/** @todo merge with the one below (use a header file). Too lazy now. */
124DECLR0VBGL(int) VbglR0HGCMInternalCall (VBoxGuestHGCMCallInfo *pCallInfo, uint32_t fFlags,
125 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
126{
127 VMMDevHGCMCall *pHGCMCall;
128 uint32_t cbParms;
129 HGCMFunctionParameter *pParm;
130 unsigned iParm;
131 int rc;
132
133 AssertMsgReturn(!pCallInfo || !pAsyncCallback || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
134 ("pCallInfo=%p pAsyncCallback=%p fFlags=%#x\n", pCallInfo, pAsyncCallback, fFlags),
135 VERR_INVALID_PARAMETER);
136
137 Log (("VbglR0HGCMInternalCall: pCallInfo->cParms = %d, pHGCMCall->u32Function = %d, fFlags=%#x\n",
138 pCallInfo->cParms, pCallInfo->u32Function, fFlags));
139
140 pHGCMCall = NULL;
141
142 cbParms = pCallInfo->cParms * sizeof (HGCMFunctionParameter);
143
144 /* Allocate request */
145 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMCall, sizeof (VMMDevHGCMCall) + cbParms, VMMDevReq_HGCMCall);
146
147 Log (("VbglR0HGCMInternalCall: Allocated gr %p, rc = %Rrc, cbParms = %d\n", pHGCMCall, rc, cbParms));
148
149 if (RT_SUCCESS(rc))
150 {
151 void *apvCtx[VBOX_HGCM_MAX_PARMS];
152 memset (apvCtx, 0, sizeof(void *) * pCallInfo->cParms);
153
154 /* Initialize request memory */
155 pHGCMCall->header.fu32Flags = 0;
156 pHGCMCall->header.result = VINF_SUCCESS;
157
158 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
159 pHGCMCall->u32Function = pCallInfo->u32Function;
160 pHGCMCall->cParms = pCallInfo->cParms;
161
162 if (cbParms)
163 {
164 /* Lock user buffers. */
165 pParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
166
167 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
168 {
169 switch (pParm->type)
170 {
171 case VMMDevHGCMParmType_32bit:
172 case VMMDevHGCMParmType_64bit:
173 break;
174
175 case VMMDevHGCMParmType_PhysAddr:
176 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
177 rc = VERR_INVALID_PARAMETER;
178 break;
179
180 case VMMDevHGCMParmType_LinAddr_Locked_In:
181 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
182 rc = VERR_INVALID_PARAMETER;
183 else
184 pParm->type = VMMDevHGCMParmType_LinAddr_In;
185 break;
186 case VMMDevHGCMParmType_LinAddr_Locked_Out:
187 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
188 rc = VERR_INVALID_PARAMETER;
189 else
190 pParm->type = VMMDevHGCMParmType_LinAddr_Out;
191 break;
192 case VMMDevHGCMParmType_LinAddr_Locked:
193 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
194 rc = VERR_INVALID_PARAMETER;
195 else
196 pParm->type = VMMDevHGCMParmType_LinAddr;
197 break;
198
199 case VMMDevHGCMParmType_LinAddr_In:
200 case VMMDevHGCMParmType_LinAddr_Out:
201 case VMMDevHGCMParmType_LinAddr:
202 /* PORTME: When porting this to Darwin and other systems where the entire kernel isn't mapped
203 into every process, all linear address will have to be converted to physical SG lists at
204 this point. Care must also be taken on these guests to not mix kernel and user addresses
205 in HGCM calls, or we'll end up locking the wrong memory. If VMMDev/HGCM gets a linear address
206 it will assume that it's in the current memory context (i.e. use CR3 to translate it).
207
208 These kind of problems actually applies to some patched linux kernels too, including older
209 fedora releases. (The patch is the infamous 4G/4G patch, aka 4g4g, by Ingo Molnar.) */
210 rc = vbglLockLinear (&apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size,
211 (pParm->type == VMMDevHGCMParmType_LinAddr_In) ? false : true /* write access */,
212 fFlags);
213 break;
214
215 default:
216 rc = VERR_INVALID_PARAMETER;
217 break;
218 }
219 if (RT_FAILURE (rc))
220 break;
221 }
222 memcpy (VMMDEV_HGCM_CALL_PARMS(pHGCMCall), VBOXGUEST_HGCM_CALL_PARMS(pCallInfo), cbParms);
223 }
224
225 /* Check that the parameter locking was ok. */
226 if (RT_SUCCESS(rc))
227 {
228 Log (("calling VbglGRPerform\n"));
229
230 /* Issue request */
231 rc = VbglGRPerform (&pHGCMCall->header.header);
232
233 Log (("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
234
235 /** If the call failed, but as a result of the request itself, then pretend success
236 * Upper layers will interpret the result code in the packet.
237 */
238 if (RT_FAILURE(rc) && rc == pHGCMCall->header.result)
239 {
240 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
241 rc = VINF_SUCCESS;
242 }
243
244 if (RT_SUCCESS(rc))
245 {
246 /* Check if host decides to process the request asynchronously. */
247 if (rc == VINF_HGCM_ASYNC_EXECUTE)
248 {
249 /* Wait for request completion interrupt notification from host */
250 Log (("Processing HGCM call asynchronously\n"));
251 pAsyncCallback (&pHGCMCall->header, pvAsyncData, u32AsyncData);
252 }
253
254 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
255 {
256 if (cbParms)
257 {
258 memcpy (VBOXGUEST_HGCM_CALL_PARMS(pCallInfo), VMMDEV_HGCM_CALL_PARMS(pHGCMCall), cbParms);
259 }
260 pCallInfo->result = pHGCMCall->header.result;
261 }
262 else
263 {
264 /* The callback returns without completing the request,
265 * that means the wait was interrrupted. That can happen
266 * if the request times out, the system reboots or the
267 * VBoxService ended abnormally.
268 *
269 * Cancel the request, the host will not write to the
270 * memory related to the cancelled request.
271 */
272 Log (("Cancelling HGCM call\n"));
273 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
274
275 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
276 VbglGRPerform (&pHGCMCall->header.header);
277 }
278 }
279 }
280
281 /* Unlock user buffers. */
282 pParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
283
284 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
285 {
286 if ( pParm->type == VMMDevHGCMParmType_LinAddr_In
287 || pParm->type == VMMDevHGCMParmType_LinAddr_Out
288 || pParm->type == VMMDevHGCMParmType_LinAddr)
289 {
290 if (apvCtx[iParm] != NULL)
291 {
292 vbglUnlockLinear (apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size);
293 }
294 }
295 else
296 Assert(!apvCtx[iParm]);
297 }
298
299 if ((pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED) == 0)
300 VbglGRFree (&pHGCMCall->header.header);
301 else
302 rc = VERR_INTERRUPTED;
303 }
304
305 return rc;
306}
307# if ARCH_BITS == 64
308/** @todo merge with the one above (use a header file). Too lazy now. */
309DECLR0VBGL(int) VbglR0HGCMInternalCall32 (VBoxGuestHGCMCallInfo *pCallInfo, uint32_t fFlags,
310 VBGLHGCMCALLBACK *pAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
311{
312 VMMDevHGCMCall *pHGCMCall;
313 uint32_t cbParms;
314 HGCMFunctionParameter32 *pParm;
315 unsigned iParm;
316 int rc;
317
318 AssertMsgReturn(!pCallInfo || !pAsyncCallback || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
319 ("pCallInfo=%p pAsyncCallback=%p fFlags=%#x\n", pCallInfo, pAsyncCallback, fFlags),
320 VERR_INVALID_PARAMETER);
321
322 Log (("VbglR0HGCMInternalCall32: pCallInfo->cParms = %d, pHGCMCall->u32Function = %d, fFlags=%#x\n",
323 pCallInfo->cParms, pCallInfo->u32Function, fFlags));
324
325 pHGCMCall = NULL;
326
327 cbParms = pCallInfo->cParms * sizeof (HGCMFunctionParameter32);
328
329 /* Allocate request */
330 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMCall, sizeof (VMMDevHGCMCall) + cbParms, VMMDevReq_HGCMCall32);
331
332 Log (("VbglR0HGCMInternalCall32: Allocated gr %p, rc = %Rrc, cbParms = %d\n", pHGCMCall, rc, cbParms));
333
334 if (RT_SUCCESS(rc))
335 {
336 void *apvCtx[VBOX_HGCM_MAX_PARMS];
337 memset (apvCtx, 0, sizeof(void *) * pCallInfo->cParms);
338
339 /* Initialize request memory */
340 pHGCMCall->header.fu32Flags = 0;
341 pHGCMCall->header.result = VINF_SUCCESS;
342
343 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
344 pHGCMCall->u32Function = pCallInfo->u32Function;
345 pHGCMCall->cParms = pCallInfo->cParms;
346
347 if (cbParms)
348 {
349 /* Lock user buffers. */
350 pParm = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
351
352 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
353 {
354 switch (pParm->type)
355 {
356 case VMMDevHGCMParmType_32bit:
357 case VMMDevHGCMParmType_64bit:
358 break;
359
360 case VMMDevHGCMParmType_PhysAddr:
361 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
362 rc = VERR_INVALID_PARAMETER;
363 break;
364
365 case VMMDevHGCMParmType_LinAddr_Locked_In:
366 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
367 rc = VERR_INVALID_PARAMETER;
368 else
369 pParm->type = VMMDevHGCMParmType_LinAddr_In;
370 break;
371 case VMMDevHGCMParmType_LinAddr_Locked_Out:
372 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
373 rc = VERR_INVALID_PARAMETER;
374 else
375 pParm->type = VMMDevHGCMParmType_LinAddr_Out;
376 break;
377 case VMMDevHGCMParmType_LinAddr_Locked:
378 if ((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER)
379 rc = VERR_INVALID_PARAMETER;
380 else
381 pParm->type = VMMDevHGCMParmType_LinAddr;
382 break;
383
384 case VMMDevHGCMParmType_LinAddr_In:
385 case VMMDevHGCMParmType_LinAddr_Out:
386 case VMMDevHGCMParmType_LinAddr:
387 /* PORTME: When porting this to Darwin and other systems where the entire kernel isn't mapped
388 into every process, all linear address will have to be converted to physical SG lists at
389 this point. Care must also be taken on these guests to not mix kernel and user addresses
390 in HGCM calls, or we'll end up locking the wrong memory. If VMMDev/HGCM gets a linear address
391 it will assume that it's in the current memory context (i.e. use CR3 to translate it).
392
393 These kind of problems actually applies to some patched linux kernels too, including older
394 fedora releases. (The patch is the infamous 4G/4G patch, aka 4g4g, by Ingo Molnar.) */
395 rc = vbglLockLinear (&apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size,
396 (pParm->type == VMMDevHGCMParmType_LinAddr_In) ? false : true /* write access */,
397 fFlags);
398 break;
399
400 default:
401 rc = VERR_INVALID_PARAMETER;
402 break;
403 }
404 if (RT_FAILURE (rc))
405 break;
406 }
407 memcpy (VMMDEV_HGCM_CALL_PARMS32(pHGCMCall), VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo), cbParms);
408 }
409
410 /* Check that the parameter locking was ok. */
411 if (RT_SUCCESS(rc))
412 {
413 Log (("calling VbglGRPerform\n"));
414
415 /* Issue request */
416 rc = VbglGRPerform (&pHGCMCall->header.header);
417
418 Log (("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
419
420 /** If the call failed, but as a result of the request itself, then pretend success
421 * Upper layers will interpret the result code in the packet.
422 */
423 if (RT_FAILURE(rc) && rc == pHGCMCall->header.result)
424 {
425 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
426 rc = VINF_SUCCESS;
427 }
428
429 if (RT_SUCCESS(rc))
430 {
431 /* Check if host decides to process the request asynchronously. */
432 if (rc == VINF_HGCM_ASYNC_EXECUTE)
433 {
434 /* Wait for request completion interrupt notification from host */
435 Log (("Processing HGCM call asynchronously\n"));
436 pAsyncCallback (&pHGCMCall->header, pvAsyncData, u32AsyncData);
437 }
438
439 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
440 {
441 if (cbParms)
442 memcpy (VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo), VMMDEV_HGCM_CALL_PARMS32(pHGCMCall), cbParms);
443
444 pCallInfo->result = pHGCMCall->header.result;
445 }
446 else
447 {
448 /* The callback returns without completing the request,
449 * that means the wait was interrrupted. That can happen
450 * if the request times out, the system reboots or the
451 * VBoxService ended abnormally.
452 *
453 * Cancel the request, the host will not write to the
454 * memory related to the cancelled request.
455 */
456 Log (("Cancelling HGCM call\n"));
457 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
458
459 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
460 VbglGRPerform (&pHGCMCall->header.header);
461 }
462 }
463 }
464
465 /* Unlock user buffers. */
466 pParm = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
467
468 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pParm++)
469 {
470 if ( pParm->type == VMMDevHGCMParmType_LinAddr_In
471 || pParm->type == VMMDevHGCMParmType_LinAddr_Out
472 || pParm->type == VMMDevHGCMParmType_LinAddr)
473 {
474 if (apvCtx[iParm] != NULL)
475 {
476 vbglUnlockLinear (apvCtx[iParm], (void *)pParm->u.Pointer.u.linearAddr, pParm->u.Pointer.size);
477 }
478 }
479 else
480 Assert(!apvCtx[iParm]);
481 }
482
483 if ((pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED) == 0)
484 VbglGRFree (&pHGCMCall->header.header);
485 else
486 rc = VERR_INTERRUPTED;
487 }
488
489 return rc;
490}
491# endif /* ARCH_BITS == 64 */
492
493#endif /* VBGL_VBOXGUEST */
494
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette