VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuestLib/HGCMInternal.cpp@ 26124

Last change on this file since 26124 was 26124, checked in by vboxsync, 15 years ago

Additions/VBoxGuest,VBoxGuestLib: fixed HGCM 32-bit calls in 64-bit kernel driver. OpenGL works again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 43.1 KB
Line 
1/* $Revision: 26124 $ */
2/** @file
3 * VBoxGuestLib - Host-Guest Communication Manager internal functions, implemented by VBoxGuest
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/* Entire file is ifdef'ed with VBGL_VBOXGUEST */
23#ifdef VBGL_VBOXGUEST
24
25/*******************************************************************************
26* Header Files *
27*******************************************************************************/
28#include "VBGLInternal.h"
29#include <iprt/alloca.h>
30#include <iprt/asm.h>
31#include <iprt/assert.h>
32#include <iprt/mem.h>
33#include <iprt/memobj.h>
34#include <iprt/string.h>
35#include <iprt/thread.h>
36#include <iprt/time.h>
37
38
39/*******************************************************************************
40* Defined Constants And Macros *
41*******************************************************************************/
42/** The max parameter buffer size for a user request. */
43#define VBGLR0_MAX_HGCM_USER_PARM (16*_1M)
44/** The max parameter buffer size for a kernel request. */
45#define VBGLR0_MAX_HGCM_KERNEL_PARM (16*_1M)
46#ifdef RT_OS_LINUX
47/** Linux needs to use bounce buffers since RTR0MemObjLockUser has unwanted
48 * side effects. */
49# define USE_BOUNCE_BUFFERS
50#endif
51
52
53/*******************************************************************************
54* Structures and Typedefs *
55*******************************************************************************/
56/**
57 * Lock info structure used by VbglR0HGCMInternalCall and its helpers.
58 */
59struct VbglR0ParmInfo
60{
61 uint32_t cLockBufs;
62 struct
63 {
64 uint32_t iParm;
65 RTR0MEMOBJ hObj;
66#ifdef USE_BOUNCE_BUFFERS
67 void *pvSmallBuf;
68#endif
69 } aLockBufs[10];
70};
71
72
73
74/* These functions can be only used by VBoxGuest. */
75
76DECLVBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
77 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
78{
79 VMMDevHGCMConnect *pHGCMConnect;
80 int rc;
81
82 if (!pConnectInfo || !pfnAsyncCallback)
83 return VERR_INVALID_PARAMETER;
84
85 pHGCMConnect = NULL;
86
87 /* Allocate request */
88 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMConnect, sizeof (VMMDevHGCMConnect), VMMDevReq_HGCMConnect);
89
90 if (RT_SUCCESS(rc))
91 {
92 /* Initialize request memory */
93 pHGCMConnect->header.fu32Flags = 0;
94
95 memcpy (&pHGCMConnect->loc, &pConnectInfo->Loc, sizeof (HGCMServiceLocation));
96 pHGCMConnect->u32ClientID = 0;
97
98 /* Issue request */
99 rc = VbglGRPerform (&pHGCMConnect->header.header);
100
101 if (RT_SUCCESS(rc))
102 {
103 /* Check if host decides to process the request asynchronously. */
104 if (rc == VINF_HGCM_ASYNC_EXECUTE)
105 {
106 /* Wait for request completion interrupt notification from host */
107 pfnAsyncCallback (&pHGCMConnect->header, pvAsyncData, u32AsyncData);
108 }
109
110 pConnectInfo->result = pHGCMConnect->header.result;
111
112 if (RT_SUCCESS (pConnectInfo->result))
113 pConnectInfo->u32ClientID = pHGCMConnect->u32ClientID;
114 }
115
116 VbglGRFree (&pHGCMConnect->header.header);
117 }
118
119 return rc;
120}
121
122
123DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
124 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
125{
126 VMMDevHGCMDisconnect *pHGCMDisconnect;
127 int rc;
128
129 if (!pDisconnectInfo || !pfnAsyncCallback)
130 return VERR_INVALID_PARAMETER;
131
132 pHGCMDisconnect = NULL;
133
134 /* Allocate request */
135 rc = VbglGRAlloc ((VMMDevRequestHeader **)&pHGCMDisconnect, sizeof (VMMDevHGCMDisconnect), VMMDevReq_HGCMDisconnect);
136
137 if (RT_SUCCESS(rc))
138 {
139 /* Initialize request memory */
140 pHGCMDisconnect->header.fu32Flags = 0;
141
142 pHGCMDisconnect->u32ClientID = pDisconnectInfo->u32ClientID;
143
144 /* Issue request */
145 rc = VbglGRPerform (&pHGCMDisconnect->header.header);
146
147 if (RT_SUCCESS(rc))
148 {
149 /* Check if host decides to process the request asynchronously. */
150 if (rc == VINF_HGCM_ASYNC_EXECUTE)
151 {
152 /* Wait for request completion interrupt notification from host */
153 pfnAsyncCallback (&pHGCMDisconnect->header, pvAsyncData, u32AsyncData);
154 }
155
156 pDisconnectInfo->result = pHGCMDisconnect->header.result;
157 }
158
159 VbglGRFree (&pHGCMDisconnect->header.header);
160 }
161
162 return rc;
163}
164
165
166/**
167 * Preprocesses the HGCM call, validating and locking/buffering parameters.
168 *
169 * @returns VBox status code.
170 *
171 * @param pCallInfo The call info.
172 * @param cbCallInfo The size of the call info structure.
173 * @param fIsUser Is it a user request or kernel request.
174 * @param pcbExtra Where to return the extra request space needed for
175 * physical page lists.
176 */
177static int vbglR0HGCMInternalPreprocessCall(VBoxGuestHGCMCallInfo const *pCallInfo, uint32_t cbCallInfo,
178 bool fIsUser, struct VbglR0ParmInfo *pParmInfo, size_t *pcbExtra)
179{
180 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
181 uint32_t cParms = pCallInfo->cParms;
182 uint32_t iParm;
183 uint32_t cb;
184
185 /*
186 * Lock down the any linear buffers so we can get their addresses
187 * and figure out how much extra storage we need for page lists.
188 *
189 * Note! With kernel mode users we can be assertive. For user mode users
190 * we should just (debug) log it and fail without any fanfare.
191 */
192 *pcbExtra = 0;
193 pParmInfo->cLockBufs = 0;
194 for (iParm = 0; iParm < cParms; iParm++, pSrcParm++)
195 {
196 switch (pSrcParm->type)
197 {
198 case VMMDevHGCMParmType_32bit:
199 Log4(("GstHGCMCall: parm=%u type=32bit: %#010x\n", iParm, pSrcParm->u.value32));
200 break;
201
202 case VMMDevHGCMParmType_64bit:
203 Log4(("GstHGCMCall: parm=%u type=64bit: %#018x\n", iParm, pSrcParm->u.value64));
204 break;
205
206 case VMMDevHGCMParmType_PageList:
207 if (fIsUser)
208 return VERR_INVALID_PARAMETER;
209 cb = pSrcParm->u.PageList.size;
210 if (cb)
211 {
212 uint32_t off = pSrcParm->u.PageList.offset;
213 HGCMPageListInfo *pPgLst;
214 uint32_t cPages;
215 uint32_t u32;
216
217 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
218 VERR_OUT_OF_RANGE);
219 AssertMsgReturn( off >= pCallInfo->cParms * sizeof(HGCMFunctionParameter)
220 && off <= cbCallInfo - sizeof(HGCMPageListInfo),
221 ("offset=%#x cParms=%#x cbCallInfo=%#x\n", off, pCallInfo->cParms, cbCallInfo),
222 VERR_INVALID_PARAMETER);
223
224 pPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + off);
225 cPages = pPgLst->cPages;
226 u32 = RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]) + off;
227 AssertMsgReturn(u32 <= cbCallInfo,
228 ("u32=%#x (cPages=%#x offset=%#x) cbCallInfo=%#x\n", u32, cPages, off, cbCallInfo),
229 VERR_INVALID_PARAMETER);
230 AssertMsgReturn(pPgLst->offFirstPage < PAGE_SIZE, ("#x\n", pPgLst->offFirstPage), VERR_INVALID_PARAMETER);
231 u32 = RT_ALIGN_32(pPgLst->offFirstPage + cb, PAGE_SIZE) >> PAGE_SHIFT;
232 AssertMsgReturn(cPages == u32, ("cPages=%#x u32=%#x\n", cPages, u32), VERR_INVALID_PARAMETER);
233 AssertMsgReturn(VBOX_HGCM_F_PARM_ARE_VALID(pPgLst->flags), ("%#x\n", pPgLst->flags), VERR_INVALID_PARAMETER);
234 Log4(("GstHGCMCall: parm=%u type=pglst: cb=%#010x cPgs=%u offPg0=%#x flags=%#x\n",
235 iParm, cb, cPages, pPgLst->offFirstPage, pPgLst->flags));
236 u32 = cPages;
237 while (u32-- > 0)
238 {
239 Log4(("GstHGCMCall: pg#%u=%RHp\n", u32, pPgLst->aPages[u32]));
240 AssertMsgReturn(!(pPgLst->aPages[u32] & (PAGE_OFFSET_MASK | UINT64_C(0xfff0000000000000))),
241 ("pg#%u=%RHp\n", u32, pPgLst->aPages[u32]),
242 VERR_INVALID_PARAMETER);
243 }
244
245 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[pPgLst->cPages]);
246 }
247 else
248 Log4(("GstHGCMCall: parm=%u type=pglst: cb=0\n", iParm));
249 break;
250
251 case VMMDevHGCMParmType_LinAddr_Locked_In:
252 case VMMDevHGCMParmType_LinAddr_Locked_Out:
253 case VMMDevHGCMParmType_LinAddr_Locked:
254 if (fIsUser)
255 return VERR_INVALID_PARAMETER;
256 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
257 {
258 cb = pSrcParm->u.Pointer.size;
259 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
260 VERR_OUT_OF_RANGE);
261 if (cb != 0)
262 Log4(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p\n",
263 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr));
264 else
265 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
266 break;
267 }
268 /* fall thru */
269
270 case VMMDevHGCMParmType_LinAddr_In:
271 case VMMDevHGCMParmType_LinAddr_Out:
272 case VMMDevHGCMParmType_LinAddr:
273 cb = pSrcParm->u.Pointer.size;
274 if (cb != 0)
275 {
276#ifdef USE_BOUNCE_BUFFERS
277 void *pvSmallBuf = NULL;
278#endif
279 uint32_t iLockBuf = pParmInfo->cLockBufs;
280 RTR0MEMOBJ hObj;
281 int rc;
282 uint32_t fAccess = pSrcParm->type == VMMDevHGCMParmType_LinAddr_In
283 || pSrcParm->type == VMMDevHGCMParmType_LinAddr_Locked_In
284 ? RTMEM_PROT_READ
285 : RTMEM_PROT_READ | RTMEM_PROT_WRITE;
286
287 AssertReturn(iLockBuf < RT_ELEMENTS(pParmInfo->aLockBufs), VERR_INVALID_PARAMETER);
288 if (!fIsUser)
289 {
290 AssertMsgReturn(cb <= VBGLR0_MAX_HGCM_KERNEL_PARM, ("%#x > %#x\n", cb, VBGLR0_MAX_HGCM_KERNEL_PARM),
291 VERR_OUT_OF_RANGE);
292 rc = RTR0MemObjLockKernel(&hObj, (void *)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess);
293 if (RT_FAILURE(rc))
294 {
295 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockKernel(,%p,%#x) -> %Rrc\n",
296 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
297 return rc;
298 }
299 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked kernel -> %p\n",
300 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
301 }
302 else
303 {
304 if (cb > VBGLR0_MAX_HGCM_USER_PARM)
305 {
306 Log(("GstHGCMCall: id=%#x fn=%u parm=%u pv=%p cb=%#x > %#x -> out of range\n",
307 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr,
308 cb, VBGLR0_MAX_HGCM_USER_PARM));
309 return VERR_OUT_OF_RANGE;
310 }
311
312#ifndef USE_BOUNCE_BUFFERS
313 rc = RTR0MemObjLockUser(&hObj, (RTR3PTR)pSrcParm->u.Pointer.u.linearAddr, cb, fAccess, NIL_RTR0PROCESS);
314 if (RT_FAILURE(rc))
315 {
316 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemObjLockUser(,%p,%#x,nil) -> %Rrc\n",
317 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm, pSrcParm->u.Pointer.u.linearAddr, cb, rc));
318 return rc;
319 }
320 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p locked user -> %p\n",
321 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
322
323#else /* USE_BOUNCE_BUFFERS */
324 /*
325 * This is a bit massive, but we don't want to waste a
326 * whole page for a 3 byte string buffer (guest props).
327 *
328 * The threshold is ASSUMING sizeof(RTMEMHDR) == 16 and
329 * the system is using some power of two allocator.
330 */
331 /** @todo A more efficient strategy would be to combine buffers. However it
332 * is probably going to be more massive than the current code, so
333 * it can wait till later. */
334 bool fCopyIn = pSrcParm->type != VMMDevHGCMParmType_LinAddr_Out
335 && pSrcParm->type != VMMDevHGCMParmType_LinAddr_Locked_Out;
336 if (cb <= PAGE_SIZE / 2 - 16)
337 {
338 pvSmallBuf = fCopyIn ? RTMemTmpAlloc(cb) : RTMemTmpAllocZ(cb);
339 if (RT_UNLIKELY(!pvSmallBuf))
340 return VERR_NO_MEMORY;
341 if (fCopyIn)
342 {
343 rc = RTR0MemUserCopyFrom(pvSmallBuf, pSrcParm->u.Pointer.u.linearAddr, cb);
344 if (RT_FAILURE(rc))
345 {
346 RTMemTmpFree(pvSmallBuf);
347 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
348 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
349 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
350 return rc;
351 }
352 }
353 rc = RTR0MemObjLockKernel(&hObj, pvSmallBuf, cb, fAccess);
354 if (RT_FAILURE(rc))
355 {
356 RTMemTmpFree(pvSmallBuf);
357 Log(("GstHGCMCall: RTR0MemObjLockKernel failed for small buffer: rc=%Rrc pvSmallBuf=%p cb=%#x\n",
358 rc, pvSmallBuf, cb));
359 return rc;
360 }
361 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p small buffer %p -> %p\n",
362 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, pvSmallBuf, hObj));
363 }
364 else
365 {
366 rc = RTR0MemObjAllocPage(&hObj, cb, false /*fExecutable*/);
367 if (RT_FAILURE(rc))
368 return rc;
369 if (!fCopyIn)
370 memset(RTR0MemObjAddress(hObj), '\0', cb);
371 else
372 {
373 rc = RTR0MemUserCopyFrom(RTR0MemObjAddress(hObj), pSrcParm->u.Pointer.u.linearAddr, cb);
374 if (RT_FAILURE(rc))
375 {
376 RTR0MemObjFree(hObj, false /*fFreeMappings*/);
377 Log(("GstHGCMCall: id=%#x fn=%u parm=%u RTR0MemUserCopyFrom(,%p,%#x) -> %Rrc\n",
378 pCallInfo->u32ClientID, pCallInfo->u32Function, iParm,
379 pSrcParm->u.Pointer.u.linearAddr, cb, rc));
380 return rc;
381 }
382 }
383 Log3(("GstHGCMCall: parm=%u type=%#x: cb=%#010x pv=%p big buffer -> %p\n",
384 iParm, pSrcParm->type, cb, pSrcParm->u.Pointer.u.linearAddr, hObj));
385 }
386#endif /* USE_BOUNCE_BUFFERS */
387 }
388
389 pParmInfo->aLockBufs[iLockBuf].iParm = iParm;
390 pParmInfo->aLockBufs[iLockBuf].hObj = hObj;
391#ifdef USE_BOUNCE_BUFFERS
392 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf = pvSmallBuf;
393#endif
394 pParmInfo->cLockBufs = iLockBuf + 1;
395
396 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
397 {
398 size_t cPages = RTR0MemObjSize(hObj);
399 *pcbExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
400 }
401 }
402 else
403 Log4(("GstHGCMCall: parm=%u type=%#x: cb=0\n", iParm, pSrcParm->type));
404 break;
405
406 default:
407 return VERR_INVALID_PARAMETER;
408 }
409 }
410
411 return VINF_SUCCESS;
412}
413
414
415/**
416 * Translates locked linear address to the normal type.
417 * The locked types are only for the guest side and not handled by the host.
418 *
419 * @returns normal linear address type.
420 * @param enmType The type.
421 */
422static HGCMFunctionParameterType vbglR0HGCMInternalConvertLinAddrType(HGCMFunctionParameterType enmType)
423{
424 switch (enmType)
425 {
426 case VMMDevHGCMParmType_LinAddr_Locked_In:
427 return VMMDevHGCMParmType_LinAddr_In;
428 case VMMDevHGCMParmType_LinAddr_Locked_Out:
429 return VMMDevHGCMParmType_LinAddr_Out;
430 case VMMDevHGCMParmType_LinAddr_Locked:
431 return VMMDevHGCMParmType_LinAddr;
432 default:
433 return enmType;
434 }
435}
436
437
438/**
439 * Translates linear address types to page list direction flags.
440 *
441 * @returns page list flags.
442 * @param enmType The type.
443 */
444static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
445{
446 switch (enmType)
447 {
448 case VMMDevHGCMParmType_LinAddr_In:
449 case VMMDevHGCMParmType_LinAddr_Locked_In:
450 return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
451
452 case VMMDevHGCMParmType_LinAddr_Out:
453 case VMMDevHGCMParmType_LinAddr_Locked_Out:
454 return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
455
456 default: AssertFailed();
457 case VMMDevHGCMParmType_LinAddr:
458 case VMMDevHGCMParmType_LinAddr_Locked:
459 return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
460 }
461}
462
463
464/**
465 * Initializes the call request that we're sending to the host.
466 *
467 * @returns VBox status code.
468 *
469 * @param pCallInfo The call info.
470 * @param cbCallInfo The size of the call info structure.
471 * @param fIsUser Is it a user request or kernel request.
472 * @param pcbExtra Where to return the extra request space needed for
473 * physical page lists.
474 */
475static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall *pHGCMCall, VBoxGuestHGCMCallInfo const *pCallInfo,
476 uint32_t cbCallInfo, bool fIsUser, struct VbglR0ParmInfo *pParmInfo)
477{
478 HGCMFunctionParameter const *pSrcParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
479 HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
480 uint32_t cParms = pCallInfo->cParms;
481 uint32_t offExtra = (uintptr_t)(pDstParm + cParms) - (uintptr_t)pHGCMCall;
482 uint32_t iLockBuf = 0;
483 uint32_t iParm;
484
485
486 /*
487 * The call request headers.
488 */
489 pHGCMCall->header.fu32Flags = 0;
490 pHGCMCall->header.result = VINF_SUCCESS;
491
492 pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
493 pHGCMCall->u32Function = pCallInfo->u32Function;
494 pHGCMCall->cParms = cParms;
495
496 /*
497 * The parameters.
498 */
499 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
500 {
501 switch (pSrcParm->type)
502 {
503 case VMMDevHGCMParmType_32bit:
504 case VMMDevHGCMParmType_64bit:
505 *pDstParm = *pSrcParm;
506 break;
507
508 case VMMDevHGCMParmType_PageList:
509 pDstParm->type = VMMDevHGCMParmType_PageList;
510 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
511 if (pSrcParm->u.PageList.size)
512 {
513 HGCMPageListInfo const *pSrcPgLst = (HGCMPageListInfo *)((uint8_t *)pCallInfo + pSrcParm->u.PageList.offset);
514 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
515 uint32_t const cPages = pSrcPgLst->cPages;
516 uint32_t iPage;
517
518 pDstParm->u.PageList.offset = offExtra;
519 pDstPgLst->flags = pSrcPgLst->flags;
520 pDstPgLst->offFirstPage = pSrcPgLst->offFirstPage;
521 pDstPgLst->cPages = cPages;
522 for (iPage = 0; iPage < cPages; iPage++)
523 pDstPgLst->aPages[iPage] = pSrcPgLst->aPages[iPage];
524
525 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
526 }
527 else
528 pDstParm->u.PageList.offset = 0;
529 break;
530
531 case VMMDevHGCMParmType_LinAddr_Locked_In:
532 case VMMDevHGCMParmType_LinAddr_Locked_Out:
533 case VMMDevHGCMParmType_LinAddr_Locked:
534 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
535 {
536 *pDstParm = *pSrcParm;
537 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
538 break;
539 }
540 /* fall thru */
541
542 case VMMDevHGCMParmType_LinAddr_In:
543 case VMMDevHGCMParmType_LinAddr_Out:
544 case VMMDevHGCMParmType_LinAddr:
545 if (pSrcParm->u.Pointer.size != 0)
546 {
547#ifdef USE_BOUNCE_BUFFERS
548 void *pvSmallBuf = pParmInfo->aLockBufs[iLockBuf].pvSmallBuf;
549#endif
550 RTR0MEMOBJ hObj = pParmInfo->aLockBufs[iLockBuf].hObj;
551 Assert(iParm == pParmInfo->aLockBufs[iLockBuf].iParm);
552
553 if (VBGLR0_CAN_USE_PHYS_PAGE_LIST())
554 {
555 HGCMPageListInfo *pDstPgLst = (HGCMPageListInfo *)((uint8_t *)pHGCMCall + offExtra);
556 size_t const cPages = RTR0MemObjSize(hObj) >> PAGE_SHIFT;
557 size_t iPage;
558
559 pDstParm->type = VMMDevHGCMParmType_PageList;
560 pDstParm->u.PageList.size = pSrcParm->u.Pointer.size;
561 pDstParm->u.PageList.offset = offExtra;
562 pDstPgLst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(pSrcParm->type);
563#ifdef USE_BOUNCE_BUFFERS
564 if (fIsUser)
565 pDstPgLst->offFirstPage = (uintptr_t)pvSmallBuf & PAGE_OFFSET_MASK;
566 else
567#endif
568 pDstPgLst->offFirstPage = pSrcParm->u.Pointer.u.linearAddr & PAGE_OFFSET_MASK;
569 pDstPgLst->cPages = cPages; Assert(pDstPgLst->cPages == cPages);
570 for (iPage = 0; iPage < cPages; iPage++)
571 {
572 pDstPgLst->aPages[iPage] = RTR0MemObjGetPagePhysAddr(hObj, iPage);
573 Assert(pDstPgLst->aPages[iPage] != NIL_RTHCPHYS);
574 }
575
576 offExtra += RT_OFFSETOF(HGCMPageListInfo, aPages[cPages]);
577 }
578 else
579 {
580 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
581 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
582#ifdef USE_BOUNCE_BUFFERS
583 if (fIsUser)
584 pDstParm->u.Pointer.u.linearAddr = pvSmallBuf
585 ? (uintptr_t)pvSmallBuf
586 : (uintptr_t)RTR0MemObjAddress(hObj);
587 else
588#endif
589 pDstParm->u.Pointer.u.linearAddr = pSrcParm->u.Pointer.u.linearAddr;
590 }
591 iLockBuf++;
592 }
593 else
594 {
595 pDstParm->type = vbglR0HGCMInternalConvertLinAddrType(pSrcParm->type);
596 pDstParm->u.Pointer.size = 0;
597 pDstParm->u.Pointer.u.linearAddr = 0;
598 }
599 break;
600
601 default:
602 AssertFailed();
603 pDstParm->type = VMMDevHGCMParmType_Invalid;
604 break;
605 }
606 }
607}
608
609
610/**
611 * Performs the call and completion wait.
612 *
613 * @returns VBox status code of this operation, not necessarily the call.
614 *
615 * @param pHGCMCall The HGCM call info.
616 * @param pfnAsyncCallback The async callback that will wait for the call
617 * to complete.
618 * @param pvAsyncData Argument for the callback.
619 * @param u32AsyncData Argument for the callback.
620 * @param pfLeakIt Where to return the leak it / free it,
621 * indicator. Cancellation fun.
622 */
623static int vbglR0HGCMInternalDoCall(VMMDevHGCMCall *pHGCMCall, PFNVBGLHGCMCALLBACK pfnAsyncCallback,
624 void *pvAsyncData, uint32_t u32AsyncData, bool *pfLeakIt)
625{
626 int rc;
627
628 Log(("calling VbglGRPerform\n"));
629 rc = VbglGRPerform(&pHGCMCall->header.header);
630 Log(("VbglGRPerform rc = %Rrc (header rc=%d)\n", rc, pHGCMCall->header.result));
631
632 /*
633 * If the call failed, but as a result of the request itself, then pretend
634 * success. Upper layers will interpret the result code in the packet.
635 */
636 if ( RT_FAILURE(rc)
637 && rc == pHGCMCall->header.result)
638 {
639 Assert(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE);
640 rc = VINF_SUCCESS;
641 }
642
643 /*
644 * Check if host decides to process the request asynchronously,
645 * if so, we wait for it to complete using the caller supplied callback.
646 */
647 *pfLeakIt = false;
648 if (rc == VINF_HGCM_ASYNC_EXECUTE)
649 {
650 Log(("Processing HGCM call asynchronously\n"));
651 rc = pfnAsyncCallback(&pHGCMCall->header, pvAsyncData, u32AsyncData);
652 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
653 {
654 Assert(!(pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
655 rc = VINF_SUCCESS;
656 }
657 else
658 {
659 /*
660 * The request didn't complete in time or the call was interrupted,
661 * the RC from the callback indicates which. Try cancel the request.
662 *
663 * This is a bit messy because we're racing request completion. Sorry.
664 */
665 /** @todo It would be nice if we could use the waiter callback to do further
666 * waiting in case of a completion race. If it wasn't for WINNT having its own
667 * version of all that stuff, I would've done it already. */
668 VMMDevHGCMCancel2 *pCancelReq;
669 int rc2 = VbglGRAlloc((VMMDevRequestHeader **)&pCancelReq, sizeof(*pCancelReq), VMMDevReq_HGCMCancel2);
670 if (RT_SUCCESS(rc2))
671 {
672 pCancelReq->physReqToCancel = VbglPhysHeapGetPhysAddr(pHGCMCall);
673 rc2 = VbglGRPerform(&pCancelReq->header);
674 VbglGRFree(&pCancelReq->header);
675 }
676#if 1 /** @todo ADDVER: Remove this on next minor version change. */
677 if (rc2 == VERR_NOT_IMPLEMENTED)
678 {
679 /* host is too old, or we're out of heap. */
680 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
681 pHGCMCall->header.header.requestType = VMMDevReq_HGCMCancel;
682 rc2 = VbglGRPerform(&pHGCMCall->header.header);
683 if (rc2 == VERR_INVALID_PARAMETER)
684 rc2 = VERR_NOT_FOUND;
685 else if (RT_SUCCESS(rc))
686 RTThreadSleep(1);
687 }
688#endif
689 if (RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; /** @todo weed this out from the WINNT VBoxGuest code. */
690 if (RT_SUCCESS(rc2))
691 {
692 Log(("vbglR0HGCMInternalDoCall: successfully cancelled\n"));
693 pHGCMCall->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
694 }
695 else
696 {
697 /*
698 * Wait for a bit while the host (hopefully) completes it.
699 */
700 uint64_t u64Start = RTTimeSystemMilliTS();
701 uint32_t cMilliesToWait = rc2 == VERR_NOT_FOUND || rc2 == VERR_SEM_DESTROYED ? 500 : 2000;
702 uint64_t cElapsed = 0;
703 if (rc2 != VERR_NOT_FOUND)
704 LogRel(("vbglR0HGCMInternalDoCall: Failed to cancel the HGCM call on %Rrc: rc2=%Rrc\n", rc, rc2));
705 else
706 Log(("vbglR0HGCMInternalDoCall: Cancel race rc=%Rrc rc2=%Rrc\n", rc, rc2));
707
708 do
709 {
710 ASMCompilerBarrier(); /* paranoia */
711 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
712 break;
713 RTThreadSleep(1);
714 cElapsed = RTTimeSystemMilliTS() - u64Start;
715 } while (cElapsed < cMilliesToWait);
716
717 ASMCompilerBarrier(); /* paranoia^2 */
718 if (pHGCMCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
719 rc = VINF_SUCCESS;
720 else
721 {
722 LogRel(("vbglR0HGCMInternalDoCall: Leaking %u bytes. Pending call to %u with %u parms. (rc2=%Rrc)\n",
723 pHGCMCall->header.header.size, pHGCMCall->u32Function, pHGCMCall->cParms, rc2));
724 *pfLeakIt = true;
725 }
726 Log(("vbglR0HGCMInternalDoCall: Cancel race ended with rc=%Rrc (rc2=%Rrc) after %llu ms\n", rc, rc2, cElapsed));
727 }
728 }
729 }
730
731 Log(("GstHGCMCall: rc=%Rrc result=%Rrc fu32Flags=%#x fLeakIt=%d\n",
732 rc, pHGCMCall->header.result, pHGCMCall->header.fu32Flags, *pfLeakIt));
733 return rc;
734}
735
736
737/**
738 * Copies the result of the call back to the caller info structure and user
739 * buffers (if using bounce buffers).
740 *
741 * @returns rc, unless RTR0MemUserCopyTo fails.
742 * @param pCallInfo Call info structure to update.
743 * @param pHGCMCall HGCM call request.
744 * @param pParmInfo Paramter locking/buffering info.
745 * @param fIsUser Is it a user (true) or kernel request.
746 * @param rc The current result code. Passed along to
747 * preserve informational status codes.
748 */
749static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo *pCallInfo, VMMDevHGCMCall const *pHGCMCall,
750 struct VbglR0ParmInfo *pParmInfo, bool fIsUser, int rc)
751{
752 HGCMFunctionParameter const *pSrcParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
753 HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
754 uint32_t cParms = pCallInfo->cParms;
755#ifdef USE_BOUNCE_BUFFERS
756 uint32_t iLockBuf = 0;
757#endif
758 uint32_t iParm;
759
760 /*
761 * The call result.
762 */
763 pCallInfo->result = pHGCMCall->header.result;
764
765 /*
766 * Copy back parameters.
767 */
768 for (iParm = 0; iParm < pCallInfo->cParms; iParm++, pSrcParm++, pDstParm++)
769 {
770 switch (pDstParm->type)
771 {
772 case VMMDevHGCMParmType_32bit:
773 case VMMDevHGCMParmType_64bit:
774 *pDstParm = *pSrcParm;
775 break;
776
777 case VMMDevHGCMParmType_PageList:
778 pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
779 break;
780
781 case VMMDevHGCMParmType_LinAddr_Locked_In:
782 case VMMDevHGCMParmType_LinAddr_In:
783#ifdef USE_BOUNCE_BUFFERS
784 if ( fIsUser
785 && iLockBuf < pParmInfo->cLockBufs
786 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
787 iLockBuf++;
788#endif
789 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
790 break;
791
792 case VMMDevHGCMParmType_LinAddr_Locked_Out:
793 case VMMDevHGCMParmType_LinAddr_Locked:
794 if (!VBGLR0_CAN_USE_PHYS_PAGE_LIST())
795 {
796 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
797 break;
798 }
799 /* fall thru */
800
801 case VMMDevHGCMParmType_LinAddr_Out:
802 case VMMDevHGCMParmType_LinAddr:
803 {
804#ifdef USE_BOUNCE_BUFFERS
805 if (fIsUser)
806 {
807 size_t cbOut = RT_MIN(pSrcParm->u.Pointer.size, pDstParm->u.Pointer.size);
808 if (cbOut)
809 {
810 Assert(pParmInfo->aLockBufs[iLockBuf].iParm == iParm);
811 int rc2 = RTR0MemUserCopyTo((RTR3PTR)pDstParm->u.Pointer.u.linearAddr,
812 pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
813 ? pParmInfo->aLockBufs[iLockBuf].pvSmallBuf
814 : RTR0MemObjAddress(pParmInfo->aLockBufs[iLockBuf].hObj),
815 cbOut);
816 if (RT_FAILURE(rc2))
817 return rc2;
818 iLockBuf++;
819 }
820 else if ( iLockBuf < pParmInfo->cLockBufs
821 && iParm == pParmInfo->aLockBufs[iLockBuf].iParm)
822 iLockBuf++;
823 }
824#endif
825 pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
826 break;
827 }
828
829 default:
830 AssertFailed();
831 rc = VERR_INTERNAL_ERROR_4;
832 break;
833 }
834 }
835
836#ifdef USE_BOUNCE_BUFFERS
837 Assert(!fIsUser || pParmInfo->cLockBufs == iLockBuf);
838#endif
839 return rc;
840}
841
842
843DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
844 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
845{
846 bool fIsUser = (fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_USER;
847 struct VbglR0ParmInfo ParmInfo;
848 size_t cbExtra;
849 int rc;
850
851 /*
852 * Basic validation.
853 */
854 AssertMsgReturn( !pCallInfo
855 || !pfnAsyncCallback
856 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
857 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
858 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
859 VERR_INVALID_PARAMETER);
860 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
861 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter),
862 VERR_INVALID_PARAMETER);
863
864 Log(("GstHGCMCall: u32ClientID=%#x u32Function=%u cParms=%u cbCallInfo=%#x fFlags=%#x\n",
865 pCallInfo->u32ClientID, pCallInfo->u32ClientID, pCallInfo->u32Function, pCallInfo->cParms, cbCallInfo, fFlags));
866
867 /*
868 * Validate, lock and buffer the parameters for the call.
869 * This will calculate the amount of extra space for physical page list.
870 */
871 rc = vbglR0HGCMInternalPreprocessCall(pCallInfo, cbCallInfo, fIsUser, &ParmInfo, &cbExtra);
872 if (RT_SUCCESS(rc))
873 {
874 /*
875 * Allocate the request buffer and recreate the call request.
876 */
877 VMMDevHGCMCall *pHGCMCall;
878 rc = VbglGRAlloc((VMMDevRequestHeader **)&pHGCMCall,
879 sizeof(VMMDevHGCMCall) + pCallInfo->cParms * sizeof(HGCMFunctionParameter) + cbExtra,
880 VMMDevReq_HGCMCall);
881 if (RT_SUCCESS(rc))
882 {
883 bool fLeakIt;
884 vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, fIsUser, &ParmInfo);
885
886 /*
887 * Perform the call.
888 */
889 rc = vbglR0HGCMInternalDoCall(pHGCMCall, pfnAsyncCallback, pvAsyncData, u32AsyncData, &fLeakIt);
890 if (RT_SUCCESS(rc))
891 {
892 /*
893 * Copy back the result (parameters and buffers that changed).
894 */
895 rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall, &ParmInfo, fIsUser, rc);
896 }
897 else
898 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalDoCall failed. rc=%Rrc\n", rc));
899
900 if (!fLeakIt)
901 VbglGRFree(&pHGCMCall->header.header);
902 }
903 }
904 else
905 LogRel(("VbglR0HGCMInternalCall: vbglR0HGCMInternalPreprocessCall failed. rc=%Rrc\n", rc));
906
907 /*
908 * Release locks and free bounce buffers.
909 */
910 if (ParmInfo.cLockBufs)
911 while (ParmInfo.cLockBufs-- > 0)
912 {
913 RTR0MemObjFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].hObj, false /*fFreeMappings*/);
914#ifdef USE_BOUNCE_BUFFERS
915 RTMemTmpFree(ParmInfo.aLockBufs[ParmInfo.cLockBufs].pvSmallBuf);
916#endif
917 }
918
919 return rc;
920}
921
922
923#if ARCH_BITS == 64
924DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
925 PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData)
926{
927 VBoxGuestHGCMCallInfo *pCallInfo64 = NULL;
928 HGCMFunctionParameter *pParm64 = NULL;
929 HGCMFunctionParameter32 *pParm32 = NULL;
930 uint32_t cParms = 0;
931 uint32_t iParm = 0;
932 int rc = VINF_SUCCESS;
933
934 /*
935 * Input validation.
936 */
937 AssertMsgReturn( !pCallInfo
938 || !pfnAsyncCallback
939 || pCallInfo->cParms > VBOX_HGCM_MAX_PARMS
940 || !(fFlags & ~VBGLR0_HGCMCALL_F_MODE_MASK),
941 ("pCallInfo=%p pfnAsyncCallback=%p fFlags=%#x\n", pCallInfo, pfnAsyncCallback, fFlags),
942 VERR_INVALID_HANDLE);
943 AssertReturn( cbCallInfo >= sizeof(VBoxGuestHGCMCallInfo)
944 || cbCallInfo >= pCallInfo->cParms * sizeof(HGCMFunctionParameter32),
945 VERR_INVALID_MAGIC);
946
947 /* This Assert does not work on Solaris 64/32 mixed mode, not sure why, skipping for now */
948#ifndef RT_OS_SOLARIS
949 AssertReturn((fFlags & VBGLR0_HGCMCALL_F_MODE_MASK) == VBGLR0_HGCMCALL_F_KERNEL, VERR_WRONG_ORDER);
950#endif
951
952 cParms = pCallInfo->cParms;
953 Log(("VbglR0HGCMInternalCall32: cParms=%d, u32Function=%d, fFlags=%#x\n", cParms, pCallInfo->u32Function, fFlags));
954
955 /*
956 * The simple approach, allocate a temporary request and convert the parameters.
957 */
958 pCallInfo64 = (VBoxGuestHGCMCallInfo *)RTMemTmpAllocZ(sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter));
959 if (!pCallInfo64)
960 return VERR_NO_TMP_MEMORY;
961
962 *pCallInfo64 = *pCallInfo;
963 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
964 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
965 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
966 {
967 switch (pParm32->type)
968 {
969 case VMMDevHGCMParmType_32bit:
970 pParm64->type = VMMDevHGCMParmType_32bit;
971 pParm64->u.value32 = pParm32->u.value32;
972 break;
973
974 case VMMDevHGCMParmType_64bit:
975 pParm64->type = VMMDevHGCMParmType_64bit;
976 pParm64->u.value64 = pParm32->u.value64;
977 break;
978
979 case VMMDevHGCMParmType_LinAddr_Out:
980 case VMMDevHGCMParmType_LinAddr:
981 case VMMDevHGCMParmType_LinAddr_In:
982 pParm64->type = pParm32->type;
983 pParm64->u.Pointer.size = pParm32->u.Pointer.size;
984 pParm64->u.Pointer.u.linearAddr = pParm32->u.Pointer.u.linearAddr;
985 break;
986
987 default:
988 rc = VERR_INVALID_PARAMETER;
989 LogRel(("VbglR0HGCMInternalCall32: pParm32 type %#x invalid.\n", pParm32->type));
990 break;
991 }
992 if (RT_FAILURE(rc))
993 break;
994 }
995 if (RT_SUCCESS(rc))
996 {
997 rc = VbglR0HGCMInternalCall(pCallInfo64, sizeof(*pCallInfo64) + cParms * sizeof(HGCMFunctionParameter), fFlags,
998 pfnAsyncCallback, pvAsyncData, u32AsyncData);
999
1000 if (RT_SUCCESS(rc))
1001 {
1002 *pCallInfo = *pCallInfo64;
1003
1004 /*
1005 * Copy back.
1006 */
1007 pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
1008 pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
1009 for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++)
1010 {
1011 LogRel(("VbglR0HGCMInternalCall32: iParm=%d cParms=%d\n",iParm, cParms));
1012 switch (pParm64->type)
1013 {
1014 case VMMDevHGCMParmType_32bit:
1015 LogRel(("pParm32->u.value32=%d\n", pParm32->u.value32));
1016 pParm32->u.value32 = pParm64->u.value32;
1017 break;
1018
1019 case VMMDevHGCMParmType_64bit:
1020 pParm32->u.value64 = pParm64->u.value64;
1021 break;
1022
1023 case VMMDevHGCMParmType_LinAddr_Out:
1024 case VMMDevHGCMParmType_LinAddr:
1025 case VMMDevHGCMParmType_LinAddr_In:
1026 pParm32->u.Pointer.size = pParm64->u.Pointer.size;
1027 break;
1028
1029 default:
1030 LogRel(("VbglR0HGCMInternalCall32: failed invalid pParm32 type %d\n", pParm32->type));
1031 rc = VERR_INTERNAL_ERROR_3;
1032 break;
1033 }
1034 }
1035 }
1036 else
1037 LogRel(("VbglR0HGCMInternalCall32: VbglR0HGCMInternalCall failed. rc=%Rrc\n", rc));
1038 }
1039 else
1040 LogRel(("VbglR0HGCMInternalCall32: failed. rc=%Rrc\n", rc));
1041
1042 RTMemTmpFree(pCallInfo64);
1043 return rc;
1044}
1045#endif /* ARCH_BITS == 64 */
1046
1047#endif /* VBGL_VBOXGUEST */
1048
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette