VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 80474

Last change on this file since 80474 was 80474, checked in by vboxsync, 5 years ago

DevVGA_VMDA.cpp: More doxygen fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 131.3 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 80474 2019-08-28 11:54:58Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VGA
23#include <VBox/VMMDev.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pgm.h>
26#include <VBoxVideo.h>
27#include <VBox/AssertGuest.h>
28#include <iprt/semaphore.h>
29#include <iprt/thread.h>
30#include <iprt/mem.h>
31#include <iprt/asm.h>
32#include <iprt/list.h>
33#include <iprt/param.h>
34
35#include "DevVGA.h"
36#include "HGSMI/SHGSMIHost.h"
37
38#include <VBoxVideo3D.h>
39#include <VBoxVideoHost3D.h>
40
41#ifdef DEBUG_misha
42# define VBOXVDBG_MEMCACHE_DISABLE
43#endif
44
45#ifndef VBOXVDBG_MEMCACHE_DISABLE
46# include <iprt/memcache.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef DEBUG_misha
54# define WARN_BP() do { AssertFailed(); } while (0)
55#else
56# define WARN_BP() do { } while (0)
57#endif
58#define WARN(_msg) do { \
59 LogRel(_msg); \
60 WARN_BP(); \
61 } while (0)
62
63#define VBOXVDMATHREAD_STATE_TERMINATED 0
64#define VBOXVDMATHREAD_STATE_CREATING 1
65#define VBOXVDMATHREAD_STATE_CREATED 3
66#define VBOXVDMATHREAD_STATE_TERMINATING 4
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72struct VBOXVDMATHREAD;
73
74typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
75
76static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
77
78
79typedef struct VBOXVDMATHREAD
80{
81 RTTHREAD hWorkerThread;
82 RTSEMEVENT hEvent;
83 volatile uint32_t u32State;
84 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
85 void *pvChanged;
86} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
87
88
89/* state transformations:
90 *
91 * submitter | processor
92 *
93 * LISTENING ---> PROCESSING
94 *
95 * */
96#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
97#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
98
99#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
100#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
101#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
102
103typedef struct VBVAEXHOSTCONTEXT
104{
105 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA;
106 /** Maximum number of data bytes addressible relative to pVBVA. */
107 uint32_t cbMaxData;
108 volatile int32_t i32State;
109 volatile int32_t i32EnableState;
110 volatile uint32_t u32cCtls;
111 /* critical section for accessing ctl lists */
112 RTCRITSECT CltCritSect;
113 RTLISTANCHOR GuestCtlList;
114 RTLISTANCHOR HostCtlList;
115#ifndef VBOXVDBG_MEMCACHE_DISABLE
116 RTMEMCACHE CtlCache;
117#endif
118} VBVAEXHOSTCONTEXT;
119
120typedef enum
121{
122 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
123 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
124 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
125 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
126 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
127 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
128 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
129 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
130 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
131 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
132 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
133 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
134 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
135} VBVAEXHOSTCTL_TYPE;
136
137struct VBVAEXHOSTCTL;
138
139typedef DECLCALLBACK(void) FNVBVAEXHOSTCTL_COMPLETE(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
140typedef FNVBVAEXHOSTCTL_COMPLETE *PFNVBVAEXHOSTCTL_COMPLETE;
141
142typedef struct VBVAEXHOSTCTL
143{
144 RTLISTNODE Node;
145 VBVAEXHOSTCTL_TYPE enmType;
146 union
147 {
148 struct
149 {
150 void RT_UNTRUSTED_VOLATILE_GUEST *pvCmd;
151 uint32_t cbCmd;
152 } cmd;
153
154 struct
155 {
156 PSSMHANDLE pSSM;
157 uint32_t u32Version;
158 } state;
159 } u;
160 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
161 void *pvComplete;
162} VBVAEXHOSTCTL;
163
164/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
165 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
166 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
167 * see mor edetailed comments in headers for function definitions */
168typedef enum
169{
170 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
171 VBVAEXHOST_DATA_TYPE_CMD,
172 VBVAEXHOST_DATA_TYPE_HOSTCTL,
173 VBVAEXHOST_DATA_TYPE_GUESTCTL
174} VBVAEXHOST_DATA_TYPE;
175
176
177typedef struct VBOXVDMA_SOURCE
178{
179 VBVAINFOSCREEN Screen;
180 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
181} VBOXVDMA_SOURCE;
182
183
184typedef struct VBOXVDMAHOST
185{
186 PHGSMIINSTANCE pHgsmi; /**< Same as VGASTATE::pHgsmi. */
187 PVGASTATE pVGAState;
188 VBVAEXHOSTCONTEXT CmdVbva;
189 VBOXVDMATHREAD Thread;
190 VBOXCRCMD_SVRINFO CrSrvInfo;
191 VBVAEXHOSTCTL* pCurRemainingHostCtl;
192 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
193 int32_t volatile i32cHostCrCtlCompleted;
194 RTCRITSECT CalloutCritSect;
195// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
196#ifdef VBOX_VDMA_WITH_WATCHDOG
197 PTMTIMERR3 WatchDogTimer;
198#endif
199} VBOXVDMAHOST, *PVBOXVDMAHOST;
200
201
202/**
203 * List selector for VBoxVBVAExHCtlSubmit(), vdmaVBVACtlSubmit().
204 */
205typedef enum
206{
207 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
208 VBVAEXHOSTCTL_SOURCE_HOST
209} VBVAEXHOSTCTL_SOURCE;
210
211
212/*********************************************************************************************************************************
213* Internal Functions *
214*********************************************************************************************************************************/
215static int vdmaVBVANotifyDisable(PVGASTATE pVGAState);
216static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
217static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
218static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread);
219static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
220 uint32_t cbBuffer);
221static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
222static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
223 int rc, void *pvContext);
224
225/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
226 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
227
228
229
230/**
231 * Creates a host control command.
232 */
233static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
234{
235# ifndef VBOXVDBG_MEMCACHE_DISABLE
236 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemCacheAlloc(pCmdVbva->CtlCache);
237# else
238 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemAlloc(sizeof(VBVAEXHOSTCTL));
239# endif
240 if (pCtl)
241 {
242 RT_ZERO(*pCtl);
243 pCtl->enmType = enmType;
244 }
245 else
246 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
247 return pCtl;
248}
249
250/**
251 * Destroys a host control command.
252 */
253static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
254{
255# ifndef VBOXVDBG_MEMCACHE_DISABLE
256 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
257# else
258 RTMemFree(pCtl);
259# endif
260}
261
262
263
264/**
265 * Works the VBVA state.
266 */
267static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
268{
269 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
270
271 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
272 return VINF_SUCCESS;
273 return VERR_SEM_BUSY;
274}
275
276/**
277 * Worker for vboxVBVAExHPDataGetInner() and VBoxVBVAExHPCheckHostCtlOnDisable()
278 * that gets the next control command.
279 *
280 * @returns Pointer to command if found, NULL if not.
281 * @param pCmdVbva The VBVA command context.
282 * @param pfHostCtl Where to indicate whether it's a host or guest
283 * control command.
284 * @param fHostOnlyMode Whether to only fetch host commands, or both.
285 */
286static VBVAEXHOSTCTL *vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
287{
288 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
289
290 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
291 return NULL;
292
293 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
294 if (RT_SUCCESS(rc))
295 {
296 VBVAEXHOSTCTL *pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
297 if (pCtl)
298 *pfHostCtl = true;
299 else if (!fHostOnlyMode)
300 {
301 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
302 {
303 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
304 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
305 * and there are no HostCtl commands*/
306 Assert(pCtl);
307 *pfHostCtl = false;
308 }
309 }
310
311 if (pCtl)
312 {
313 RTListNodeRemove(&pCtl->Node);
314 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
315 }
316
317 RTCritSectLeave(&pCmdVbva->CltCritSect);
318
319 return pCtl;
320 }
321 else
322 WARN(("RTCritSectEnter failed %Rrc\n", rc));
323
324 return NULL;
325}
326
327/**
328 * Worker for vboxVDMACrHgcmHandleEnableRemainingHostCommand().
329 */
330static VBVAEXHOSTCTL *VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
331{
332 bool fHostCtl = false;
333 VBVAEXHOSTCTL *pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
334 Assert(!pCtl || fHostCtl);
335 return pCtl;
336}
337
338/**
339 * Worker for vboxVBVAExHPCheckProcessCtlInternal() and
340 * vboxVDMACrGuestCtlProcess() / VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED.
341 */
342static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
343{
344 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
345 {
346 WARN(("Invalid state\n"));
347 return VERR_INVALID_STATE;
348 }
349
350 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
351 return VINF_SUCCESS;
352}
353
354/**
355 * Works the VBVA state in response to VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME.
356 */
357static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
358{
359 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
360 {
361 WARN(("Invalid state\n"));
362 return VERR_INVALID_STATE;
363 }
364
365 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
366 return VINF_SUCCESS;
367}
368
369/**
370 * Worker for vboxVBVAExHPDataGetInner that processes PAUSE and RESUME requests.
371 *
372 * Unclear why these cannot be handled the normal way.
373 *
374 * @returns true if handled, false if not.
375 * @param pCmdVbva The VBVA context.
376 * @param pCtl The host control command.
377 */
378static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
379{
380 switch (pCtl->enmType)
381 {
382 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
383 VBoxVBVAExHPPause(pCmdVbva);
384 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
385 return true;
386
387 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
388 VBoxVBVAExHPResume(pCmdVbva);
389 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
390 return true;
391
392 default:
393 return false;
394 }
395}
396
397/**
398 * Works the VBVA state.
399 */
400static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
401{
402 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
403
404 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
405}
406
407/**
408 * Works the VBVA state.
409 */
410static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
411{
412 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
413 if (pCmdVbva->pVBVA)
414 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
415}
416
417/**
418 * Works the VBVA state.
419 */
420static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
421{
422 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
423 if (pCmdVbva->pVBVA)
424 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
425}
426
427/**
428 * Worker for vboxVBVAExHPDataGetInner.
429 *
430 * @retval VINF_SUCCESS
431 * @retval VINF_EOF
432 * @retval VINF_TRY_AGAIN
433 * @retval VERR_INVALID_STATE
434 *
435 * @thread VDMA
436 */
437static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
438{
439 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
440 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
441
442 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; /* This is shared with the guest, so careful! */
443
444 /*
445 * Inspect records.
446 */
447 uint32_t idxRecordFirst = ASMAtomicUoReadU32(&pVBVA->indexRecordFirst);
448 uint32_t idxRecordFree = ASMAtomicReadU32(&pVBVA->indexRecordFree);
449 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
450 Log(("first = %d, free = %d\n", idxRecordFirst, idxRecordFree));
451 if (idxRecordFirst == idxRecordFree)
452 return VINF_EOF; /* No records to process. Return without assigning output variables. */
453 AssertReturn(idxRecordFirst < VBVA_MAX_RECORDS, VERR_INVALID_STATE);
454 RT_UNTRUSTED_VALIDATED_FENCE();
455
456 /*
457 * Read the record size and check that it has been completly recorded.
458 */
459 uint32_t const cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[idxRecordFirst].cbRecord);
460 uint32_t const cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
461 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
462 if ( (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
463 || !cbRecord)
464 return VINF_TRY_AGAIN; /* The record is being recorded, try again. */
465 Assert(cbRecord);
466
467 /*
468 * Get and validate the data area.
469 */
470 uint32_t const offData = ASMAtomicReadU32(&pVBVA->off32Data);
471 uint32_t cbMaxData = ASMAtomicReadU32(&pVBVA->cbData);
472 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
473 AssertLogRelMsgStmt(cbMaxData <= pCmdVbva->cbMaxData, ("%#x vs %#x\n", cbMaxData, pCmdVbva->cbMaxData),
474 cbMaxData = pCmdVbva->cbMaxData);
475 AssertLogRelMsgReturn( cbRecord <= cbMaxData
476 && offData <= cbMaxData - cbRecord,
477 ("offData=%#x cbRecord=%#x cbMaxData=%#x cbRecord\n", offData, cbRecord, cbMaxData),
478 VERR_INVALID_STATE);
479 RT_UNTRUSTED_VALIDATED_FENCE();
480
481 /*
482 * Just set the return values and we're done.
483 */
484 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)&pVBVA->au8Data[offData];
485 *pcbCmd = cbRecord;
486 return VINF_SUCCESS;
487}
488
489/**
490 * Completion routine advancing our end of the ring and data buffers forward.
491 *
492 * @param pCmdVbva The VBVA context.
493 * @param cbCmd The size of the data.
494 */
495static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
496{
497 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
498 if (pVBVA)
499 {
500 /* Move data head. */
501 uint32_t const cbData = pVBVA->cbData;
502 uint32_t const offData = pVBVA->off32Data;
503 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
504 if (cbData > 0)
505 ASMAtomicWriteU32(&pVBVA->off32Data, (offData + cbCmd) % cbData);
506 else
507 ASMAtomicWriteU32(&pVBVA->off32Data, 0);
508
509 /* Increment record pointer. */
510 uint32_t const idxRecFirst = pVBVA->indexRecordFirst;
511 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
512 ASMAtomicWriteU32(&pVBVA->indexRecordFirst, (idxRecFirst + 1) % RT_ELEMENTS(pVBVA->aRecords));
513 }
514}
515
516/**
517 * Control command completion routine used by many.
518 */
519static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
520{
521 if (pCtl->pfnComplete)
522 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
523 else
524 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
525}
526
527
528/**
529 * Worker for VBoxVBVAExHPDataGet.
530 * @thread VDMA
531 */
532static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGetInner(struct VBVAEXHOSTCONTEXT *pCmdVbva,
533 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
534{
535 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
536 VBVAEXHOSTCTL *pCtl;
537 bool fHostClt;
538
539 for (;;)
540 {
541 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
542 if (pCtl)
543 {
544 if (fHostClt)
545 {
546 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
547 {
548 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
549 *pcbCmd = sizeof (*pCtl);
550 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
551 }
552 continue; /* Processed by vboxVBVAExHPCheckProcessCtlInternal, get next. */
553 }
554 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
555 *pcbCmd = sizeof (*pCtl);
556 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
557 }
558
559 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
560 return VBVAEXHOST_DATA_TYPE_NO_DATA;
561
562 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppbCmd, pcbCmd);
563 switch (rc)
564 {
565 case VINF_SUCCESS:
566 return VBVAEXHOST_DATA_TYPE_CMD;
567 case VINF_EOF:
568 return VBVAEXHOST_DATA_TYPE_NO_DATA;
569 case VINF_TRY_AGAIN:
570 RTThreadSleep(1);
571 continue;
572 default:
573 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
574 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %Rrc\n", rc));
575 return VBVAEXHOST_DATA_TYPE_NO_DATA;
576 }
577 }
578 /* not reached */
579}
580
581/**
582 * Called by vboxVDMAWorkerThread to get the next command to process.
583 * @thread VDMA
584 */
585static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva,
586 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
587{
588 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
589 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
590 {
591 vboxVBVAExHPHgEventClear(pCmdVbva);
592 vboxVBVAExHPProcessorRelease(pCmdVbva);
593
594 /*
595 * We need to prevent racing between us clearing the flag and command check/submission thread, i.e.
596 * 1. we check the queue -> and it is empty
597 * 2. submitter adds command to the queue
598 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
599 * 4. we clear the "processing" state
600 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
601 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
602 */
603 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
604 if (RT_SUCCESS(rc))
605 {
606 /* we are the processor now */
607 enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
608 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
609 {
610 vboxVBVAExHPProcessorRelease(pCmdVbva);
611 return VBVAEXHOST_DATA_TYPE_NO_DATA;
612 }
613
614 vboxVBVAExHPHgEventSet(pCmdVbva);
615 }
616 }
617
618 return enmType;
619}
620
621/**
622 * Checks for pending VBVA command or (internal) control command.
623 */
624DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
625{
626 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
627 if (pVBVA)
628 {
629 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
630 uint32_t indexRecordFree = pVBVA->indexRecordFree;
631 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
632
633 if (indexRecordFirst != indexRecordFree)
634 return true;
635 }
636
637 return ASMAtomicReadU32(&pCmdVbva->u32cCtls) > 0;
638}
639
640/** Checks whether the new commands are ready for processing
641 * @returns
642 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
643 * VINF_EOF - no commands in a queue
644 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
645 * VERR_INVALID_STATE - the VBVA is paused or pausing */
646static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
647{
648 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
649 if (RT_SUCCESS(rc))
650 {
651 /* we are the processor now */
652 if (vboxVBVAExHSHasCommands(pCmdVbva))
653 {
654 vboxVBVAExHPHgEventSet(pCmdVbva);
655 return VINF_SUCCESS;
656 }
657
658 vboxVBVAExHPProcessorRelease(pCmdVbva);
659 return VINF_EOF;
660 }
661 if (rc == VERR_SEM_BUSY)
662 return VINF_ALREADY_INITIALIZED;
663 return VERR_INVALID_STATE;
664}
665
666/**
667 * Worker for vboxVDMAConstruct() that initializes the give VBVA host context.
668 */
669static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
670{
671 RT_ZERO(*pCmdVbva);
672 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
673 if (RT_SUCCESS(rc))
674 {
675# ifndef VBOXVDBG_MEMCACHE_DISABLE
676 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
677 0, /* size_t cbAlignment */
678 UINT32_MAX, /* uint32_t cMaxObjects */
679 NULL, /* PFNMEMCACHECTOR pfnCtor*/
680 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
681 NULL, /* void *pvUser*/
682 0 /* uint32_t fFlags*/
683 );
684 if (RT_SUCCESS(rc))
685# endif
686 {
687 RTListInit(&pCmdVbva->GuestCtlList);
688 RTListInit(&pCmdVbva->HostCtlList);
689 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
690 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
691 return VINF_SUCCESS;
692 }
693# ifndef VBOXVDBG_MEMCACHE_DISABLE
694 WARN(("RTMemCacheCreate failed %Rrc\n", rc));
695# endif
696 }
697 else
698 WARN(("RTCritSectInit failed %Rrc\n", rc));
699
700 return rc;
701}
702
703/**
704 * Checks if VBVA state is some form of enabled.
705 */
706DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
707{
708 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED;
709}
710
711/**
712 * Checks if VBVA state is disabled.
713 */
714DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
715{
716 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
717}
718
719/**
720 * Worker for vdmaVBVAEnableProcess().
721 *
722 * @thread VDMA
723 */
724static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA,
725 uint8_t *pbVRam, uint32_t cbVRam)
726{
727 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
728 {
729 WARN(("VBVAEx is enabled already\n"));
730 return VERR_INVALID_STATE;
731 }
732
733 uintptr_t offVRam = (uintptr_t)pVBVA - (uintptr_t)pbVRam;
734 AssertLogRelMsgReturn(offVRam < cbVRam - sizeof(*pVBVA), ("%#p cbVRam=%#x\n", offVRam, cbVRam), VERR_OUT_OF_RANGE);
735 RT_UNTRUSTED_VALIDATED_FENCE();
736
737 pCmdVbva->pVBVA = pVBVA;
738 pCmdVbva->cbMaxData = cbVRam - offVRam - RT_UOFFSETOF(VBVABUFFER, au8Data);
739 pVBVA->hostFlags.u32HostEvents = 0;
740 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
741 return VINF_SUCCESS;
742}
743
744/**
745 * Works the enable state.
746 * @thread VDMA, CR, EMT, ...
747 */
748static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
749{
750 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
751 return VINF_SUCCESS;
752
753 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
754 return VINF_SUCCESS;
755}
756
757/**
758 * Worker for vboxVDMADestruct() and vboxVDMAConstruct().
759 */
760static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
761{
762 /* ensure the processor is stopped */
763 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
764
765 /* ensure no one tries to submit the command */
766 if (pCmdVbva->pVBVA)
767 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
768
769 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
770 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
771
772 RTCritSectDelete(&pCmdVbva->CltCritSect);
773
774# ifndef VBOXVDBG_MEMCACHE_DISABLE
775 RTMemCacheDestroy(pCmdVbva->CtlCache);
776# endif
777
778 RT_ZERO(*pCmdVbva);
779}
780
781
782/**
783 * Worker for vboxVBVAExHSSaveStateLocked().
784 * @thread VDMA
785 */
786static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
787{
788 RT_NOREF(pCmdVbva);
789 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
790 AssertRCReturn(rc, rc);
791 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
792 AssertRCReturn(rc, rc);
793 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pCtl->u.cmd.pvCmd - (uintptr_t)pu8VramBase));
794 AssertRCReturn(rc, rc);
795
796 return VINF_SUCCESS;
797}
798
799/**
800 * Worker for VBoxVBVAExHSSaveState().
801 * @thread VDMA
802 */
803static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
804{
805 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
806 {
807 WARN(("vbva not paused\n"));
808 return VERR_INVALID_STATE;
809 }
810
811 int rc;
812 VBVAEXHOSTCTL* pCtl;
813 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
814 {
815 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
816 AssertRCReturn(rc, rc);
817 }
818
819 rc = SSMR3PutU32(pSSM, 0);
820 AssertRCReturn(rc, rc);
821
822 return VINF_SUCCESS;
823}
824
825/**
826 * Handles VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for vboxVDMACrHostCtlProcess, saving
827 * state on the VDMA thread.
828 *
829 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
830 * @thread VDMA
831 */
832static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
833{
834 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
835 AssertRCReturn(rc, rc);
836
837 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
838 if (RT_FAILURE(rc))
839 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
840
841 RTCritSectLeave(&pCmdVbva->CltCritSect);
842 return rc;
843}
844
845
846/**
847 * Worker for vboxVBVAExHSLoadStateLocked.
848 * @retval VINF_EOF if end stuff to load.
849 * @thread VDMA
850 */
851static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
852{
853 RT_NOREF(u32Version);
854 uint32_t u32;
855 int rc = SSMR3GetU32(pSSM, &u32);
856 AssertLogRelRCReturn(rc, rc);
857
858 if (!u32)
859 return VINF_EOF;
860
861 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
862 if (!pHCtl)
863 {
864 WARN(("VBoxVBVAExHCtlCreate failed\n"));
865 return VERR_NO_MEMORY;
866 }
867
868 rc = SSMR3GetU32(pSSM, &u32);
869 AssertLogRelRCReturn(rc, rc);
870 pHCtl->u.cmd.cbCmd = u32;
871
872 rc = SSMR3GetU32(pSSM, &u32);
873 AssertLogRelRCReturn(rc, rc);
874 pHCtl->u.cmd.pvCmd = pu8VramBase + u32;
875
876 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
877 ++pCmdVbva->u32cCtls;
878
879 return VINF_SUCCESS;
880}
881
882/**
883 * Worker for VBoxVBVAExHSLoadState.
884 * @thread VDMA
885 */
886static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
887{
888 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
889 {
890 WARN(("vbva not stopped\n"));
891 return VERR_INVALID_STATE;
892 }
893
894 int rc;
895 do
896 {
897 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
898 AssertLogRelRCReturn(rc, rc);
899 } while (rc != VINF_EOF);
900
901 return VINF_SUCCESS;
902}
903
904/**
905 * Handles VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for vboxVDMACrHostCtlProcess(),
906 * loading state on the VDMA thread.
907 *
908 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
909 * @thread VDMA
910 */
911static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
912{
913 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
914 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
915 AssertRCReturn(rc, rc);
916
917 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
918 if (RT_FAILURE(rc))
919 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
920
921 RTCritSectLeave(&pCmdVbva->CltCritSect);
922 return rc;
923}
924
925
926
927/**
928 * Queues a control command to the VDMA worker thread.
929 *
930 * The @a enmSource argument decides which list (guest/host) it's queued on.
931 *
932 */
933static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
934 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
935{
936 int rc;
937 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
938 {
939 pCtl->pfnComplete = pfnComplete;
940 pCtl->pvComplete = pvComplete;
941
942 rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
943 if (RT_SUCCESS(rc))
944 {
945 /* Recheck that we're enabled after we've got the lock. */
946 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
947 {
948 /* Queue it. */
949 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
950 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
951 else
952 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
953 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
954
955 RTCritSectLeave(&pCmdVbva->CltCritSect);
956
957 /* Work the state or something. */
958 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
959 }
960 else
961 {
962 RTCritSectLeave(&pCmdVbva->CltCritSect);
963 Log(("cmd vbva not enabled (race)\n"));
964 rc = VERR_INVALID_STATE;
965 }
966 }
967 else
968 AssertRC(rc);
969 }
970 else
971 {
972 Log(("cmd vbva not enabled\n"));
973 rc = VERR_INVALID_STATE;
974 }
975 return rc;
976}
977
978/**
979 * Submits the control command and notifies the VDMA thread.
980 */
981static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
982 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
983{
984 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
985 if (RT_SUCCESS(rc))
986 {
987 if (rc == VINF_SUCCESS)
988 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
989 Assert(rc == VINF_ALREADY_INITIALIZED);
990 }
991 else
992 Log(("VBoxVBVAExHCtlSubmit failed %Rrc\n", rc));
993
994 return rc;
995}
996
997
998/**
999 * Call VDMA thread creation notification callback.
1000 */
1001void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
1002{
1003 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
1004 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1005 void *pvChanged = pThread->pvChanged;
1006
1007 pThread->pfnChanged = NULL;
1008 pThread->pvChanged = NULL;
1009
1010 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
1011
1012 if (pfnChanged)
1013 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1014}
1015
1016/**
1017 * Call VDMA thread termination notification callback.
1018 */
1019void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
1020{
1021 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1022 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1023 void *pvChanged = pThread->pvChanged;
1024
1025 pThread->pfnChanged = NULL;
1026 pThread->pvChanged = NULL;
1027
1028 if (pfnChanged)
1029 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1030}
1031
1032/**
1033 * Check if VDMA thread is terminating.
1034 */
1035DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
1036{
1037 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
1038}
1039
1040/**
1041 * Init VDMA thread.
1042 */
1043void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
1044{
1045 RT_ZERO(*pThread);
1046 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1047}
1048
1049/**
1050 * Clean up VDMA thread.
1051 */
1052int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
1053{
1054 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1055 switch (u32State)
1056 {
1057 case VBOXVDMATHREAD_STATE_TERMINATED:
1058 return VINF_SUCCESS;
1059
1060 case VBOXVDMATHREAD_STATE_TERMINATING:
1061 {
1062 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
1063 if (RT_SUCCESS(rc))
1064 {
1065 RTSemEventDestroy(pThread->hEvent);
1066 pThread->hEvent = NIL_RTSEMEVENT;
1067 pThread->hWorkerThread = NIL_RTTHREAD;
1068 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
1069 }
1070 else
1071 WARN(("RTThreadWait failed %Rrc\n", rc));
1072 return rc;
1073 }
1074
1075 default:
1076 WARN(("invalid state"));
1077 return VERR_INVALID_STATE;
1078 }
1079}
1080
1081/**
1082 * Start VDMA thread.
1083 */
1084int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread,
1085 PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated)
1086{
1087 int rc = VBoxVDMAThreadCleanup(pThread);
1088 if (RT_SUCCESS(rc))
1089 {
1090 rc = RTSemEventCreate(&pThread->hEvent);
1091 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
1092 pThread->pfnChanged = pfnCreated;
1093 pThread->pvChanged = pvCreated;
1094 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1095 if (RT_SUCCESS(rc))
1096 return VINF_SUCCESS;
1097
1098 WARN(("RTThreadCreate failed %Rrc\n", rc));
1099 RTSemEventDestroy(pThread->hEvent);
1100 pThread->hEvent = NIL_RTSEMEVENT;
1101 pThread->hWorkerThread = NIL_RTTHREAD;
1102 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1103 }
1104 else
1105 WARN(("VBoxVDMAThreadCleanup failed %Rrc\n", rc));
1106 return rc;
1107}
1108
1109/**
1110 * Notifies the VDMA thread.
1111 * @thread !VDMA
1112 */
1113static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
1114{
1115 int rc = RTSemEventSignal(pThread->hEvent);
1116 AssertRC(rc);
1117 return rc;
1118}
1119
1120/**
1121 * State worker for VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD &
1122 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrHostCtlProcess(), and
1123 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrGuestCtlProcess().
1124 *
1125 * @thread VDMA
1126 */
1127static int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void *pvTerminated, bool fNotify)
1128{
1129 for (;;)
1130 {
1131 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1132 switch (u32State)
1133 {
1134 case VBOXVDMATHREAD_STATE_CREATED:
1135 pThread->pfnChanged = pfnTerminated;
1136 pThread->pvChanged = pvTerminated;
1137 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
1138 if (fNotify)
1139 {
1140 int rc = VBoxVDMAThreadEventNotify(pThread);
1141 AssertRC(rc);
1142 }
1143 return VINF_SUCCESS;
1144
1145 case VBOXVDMATHREAD_STATE_TERMINATING:
1146 case VBOXVDMATHREAD_STATE_TERMINATED:
1147 WARN(("thread is marked to termination or terminated\nn"));
1148 return VERR_INVALID_STATE;
1149
1150 case VBOXVDMATHREAD_STATE_CREATING:
1151 /* wait till the thread creation is completed */
1152 WARN(("concurrent thread create/destron\n"));
1153 RTThreadYield();
1154 continue;
1155
1156 default:
1157 WARN(("invalid state"));
1158 return VERR_INVALID_STATE;
1159 }
1160 }
1161}
1162
1163
1164
1165/*
1166 *
1167 *
1168 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1169 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1170 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1171 *
1172 *
1173 */
1174
1175/** Completion callback for vboxVDMACrCtlPostAsync(). */
1176typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1177/** Pointer to a vboxVDMACrCtlPostAsync completion callback. */
1178typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1179
1180/**
1181 * Private wrapper around VBOXVDMACMD_CHROMIUM_CTL.
1182 */
1183typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1184{
1185 uint32_t uMagic; /**< VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC */
1186 uint32_t cRefs;
1187 int32_t volatile rc;
1188 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1189 void *pvCompletion;
1190 RTSEMEVENT hEvtDone;
1191 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1192} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1193/** Magic number for VBOXVDMACMD_CHROMIUM_CTL_PRIVATE (Michael Wolff). */
1194# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC UINT32_C(0x19530827)
1195
1196/** Converts from a VBOXVDMACMD_CHROMIUM_CTL::Cmd pointer to a pointer to the
1197 * containing structure. */
1198# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) RT_FROM_MEMBER(pCmd, VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)
1199
1200/**
1201 * Creates a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1202 */
1203static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1204{
1205 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr;
1206 pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1207 if (pHdr)
1208 {
1209 pHdr->uMagic = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1210 pHdr->cRefs = 1;
1211 pHdr->rc = VERR_NOT_IMPLEMENTED;
1212 pHdr->hEvtDone = NIL_RTSEMEVENT;
1213 pHdr->Cmd.enmType = enmCmd;
1214 pHdr->Cmd.cbCmd = cbCmd;
1215 return &pHdr->Cmd;
1216 }
1217 return NULL;
1218}
1219
1220/**
1221 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1222 */
1223DECLINLINE(void) vboxVDMACrCtlRelease(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1224{
1225 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1226 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1227
1228 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1229 if (!cRefs)
1230 {
1231 pHdr->uMagic = ~VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1232 if (pHdr->hEvtDone != NIL_RTSEMEVENT)
1233 {
1234 RTSemEventDestroy(pHdr->hEvtDone);
1235 pHdr->hEvtDone = NIL_RTSEMEVENT;
1236 }
1237 RTMemFree(pHdr);
1238 }
1239}
1240
1241/**
1242 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1243 */
1244DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1245{
1246 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1247 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1248
1249 uint32_t cRefs = ASMAtomicIncU32(&pHdr->cRefs);
1250 Assert(cRefs > 1);
1251 Assert(cRefs < _1K);
1252 RT_NOREF_PV(cRefs);
1253}
1254
1255/**
1256 * Gets the result from our private chromium control command.
1257 *
1258 * @returns status code.
1259 * @param pCmd The command.
1260 */
1261DECLINLINE(int) vboxVDMACrCtlGetRc(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1262{
1263 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1264 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1265 return pHdr->rc;
1266}
1267
1268/**
1269 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync}
1270 *
1271 * @note Some indirect completion magic, you gotta love this code!
1272 */
1273DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1274{
1275 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1276 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1277 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1278
1279 pHdr->rc = rc;
1280 if (pHdr->pfnCompletion)
1281 pHdr->pfnCompletion(pVGAState, pCmd, pHdr->pvCompletion);
1282 return VINF_SUCCESS;
1283}
1284
1285/**
1286 * @callback_method_impl{FNCRCTLCOMPLETION,
1287 * Completion callback for vboxVDMACrCtlPost. }
1288 */
1289static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void *pvContext)
1290{
1291 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)pvContext;
1292 Assert(pHdr == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd));
1293 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1294 RT_NOREF(pVGAState, pCmd);
1295
1296 int rc = RTSemEventSignal(pHdr->hEvtDone);
1297 AssertRC(rc);
1298
1299 vboxVDMACrCtlRelease(&pHdr->Cmd);
1300}
1301
1302/**
1303 * Worker for vboxVDMACrCtlPost().
1304 */
1305static int vboxVDMACrCtlPostAsync(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd,
1306 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1307{
1308 if ( pVGAState->pDrv
1309 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1310 {
1311 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1312 pHdr->pfnCompletion = pfnCompletion;
1313 pHdr->pvCompletion = pvCompletion;
1314 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1315 return VINF_SUCCESS;
1316 }
1317 return VERR_NOT_SUPPORTED;
1318}
1319
1320/**
1321 * Posts stuff and waits.
1322 */
1323static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1324{
1325 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1326
1327 /* Allocate the semaphore. */
1328 Assert(pHdr->hEvtDone == NIL_RTSEMEVENT);
1329 int rc = RTSemEventCreate(&pHdr->hEvtDone);
1330 AssertRCReturn(rc, rc);
1331
1332 /* Grab a reference for the completion routine. */
1333 vboxVDMACrCtlRetain(&pHdr->Cmd);
1334
1335 /* Submit and wait for it. */
1336 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, pHdr);
1337 if (RT_SUCCESS(rc))
1338 rc = RTSemEventWaitNoResume(pHdr->hEvtDone, RT_INDEFINITE_WAIT);
1339 else
1340 {
1341 if (rc != VERR_NOT_SUPPORTED)
1342 AssertRC(rc);
1343 vboxVDMACrCtlRelease(pCmd);
1344 }
1345 return rc;
1346}
1347
1348
1349/**
1350 * Structure for passing data between vboxVDMACrHgcmSubmitSync() and the
1351 * completion routine vboxVDMACrHgcmSubmitSyncCompletion().
1352 */
1353typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1354{
1355 int volatile rc;
1356 RTSEMEVENT hEvent;
1357} VDMA_VBVA_CTL_CYNC_COMPLETION;
1358
1359/**
1360 * @callback_method_impl{FNCRCTLCOMPLETION,
1361 * Completion callback for vboxVDMACrHgcmSubmitSync() that signals the
1362 * waiting thread.}
1363 */
1364static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1365{
1366 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1367 pData->rc = rc;
1368 rc = RTSemEventSignal(pData->hEvent);
1369 AssertLogRelRC(rc);
1370
1371 RT_NOREF(pCmd, cbCmd);
1372}
1373
1374/**
1375 * Worker for vboxVDMACrHgcmHandleEnable() and vdmaVBVAEnableProcess() that
1376 * works pVGAState->pDrv->pfnCrHgcmCtlSubmit.
1377 *
1378 * @thread VDMA
1379 */
1380static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1381{
1382 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1383 Data.rc = VERR_NOT_IMPLEMENTED;
1384 int rc = RTSemEventCreate(&Data.hEvent);
1385 if (!RT_SUCCESS(rc))
1386 {
1387 WARN(("RTSemEventCreate failed %Rrc\n", rc));
1388 return rc;
1389 }
1390
1391 pCtl->CalloutList.List.pNext = NULL;
1392
1393 PVGASTATE pVGAState = pVdma->pVGAState;
1394 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1395 if (RT_SUCCESS(rc))
1396 {
1397 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1398 if (RT_SUCCESS(rc))
1399 {
1400 rc = Data.rc;
1401 if (!RT_SUCCESS(rc))
1402 {
1403 WARN(("pfnCrHgcmCtlSubmit command failed %Rrc\n", rc));
1404 }
1405
1406 }
1407 else
1408 WARN(("RTSemEventWait failed %Rrc\n", rc));
1409 }
1410 else
1411 WARN(("pfnCrHgcmCtlSubmit failed %Rrc\n", rc));
1412
1413
1414 RTSemEventDestroy(Data.hEvent);
1415
1416 return rc;
1417}
1418
1419
1420/**
1421 * Worker for vboxVDMAReset().
1422 */
1423static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1424{
1425 VBVAEXHOSTCTL HCtl;
1426 RT_ZERO(HCtl);
1427 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1428 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1429 if (RT_SUCCESS(rc))
1430 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1431 else
1432 Log(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1433 return rc;
1434}
1435
1436
1437/**
1438 * @interface_method_impl{VBOXCRCMDCTL_HGCMENABLE_DATA,pfnRHCmd,
1439 * Used by vboxVDMACrHgcmNotifyTerminatingCb() and called by
1440 * crVBoxServerCrCmdDisablePostProcess() during crServerTearDown() to drain
1441 * command queues or something.}
1442 */
1443static DECLCALLBACK(uint8_t *)
1444vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1445{
1446 struct VBOXVDMAHOST *pVdma = hClient;
1447
1448 if (!pVdma->pCurRemainingHostCtl)
1449 VBoxVBVAExHSDisable(&pVdma->CmdVbva); /* disable VBVA, all subsequent host commands will go HGCM way */
1450 else
1451 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1452
1453 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1454 if (pVdma->pCurRemainingHostCtl)
1455 {
1456 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1457 return (uint8_t *)pVdma->pCurRemainingHostCtl->u.cmd.pvCmd;
1458 }
1459
1460 *pcbCtl = 0;
1461 return NULL;
1462}
1463
1464/**
1465 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTermDone,
1466 * Called by crServerTearDown().}
1467 */
1468static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1469{
1470# ifdef VBOX_STRICT
1471 struct VBOXVDMAHOST *pVdma = hClient;
1472 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1473 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1474# else
1475 RT_NOREF(hClient);
1476# endif
1477}
1478
1479/**
1480 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTerm,
1481 * Called by crServerTearDown().}
1482 */
1483static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient,
1484 VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1485{
1486 struct VBOXVDMAHOST *pVdma = hClient;
1487
1488 VBVAEXHOSTCTL HCtl;
1489 RT_ZERO(HCtl);
1490 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1491 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1492
1493 pHgcmEnableData->hRHCmd = pVdma;
1494 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1495
1496 if (rc == VERR_INVALID_STATE)
1497 rc = VINF_SUCCESS;
1498 else if (RT_FAILURE(rc))
1499 WARN(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1500
1501 return rc;
1502}
1503
1504/**
1505 * Worker for vdmaVBVAEnableProcess() and vdmaVBVADisableProcess().
1506 *
1507 * @thread VDMA
1508 */
1509static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1510{
1511 VBOXCRCMDCTL_ENABLE Enable;
1512 RT_ZERO(Enable);
1513 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1514 Enable.Data.hRHCmd = pVdma;
1515 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1516
1517 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1518 Assert(!pVdma->pCurRemainingHostCtl);
1519 if (RT_SUCCESS(rc))
1520 {
1521 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1522 return VINF_SUCCESS;
1523 }
1524
1525 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1526 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1527 return rc;
1528}
1529
1530/**
1531 * Handles VBVAEXHOSTCTL_TYPE_GHH_ENABLE and VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED
1532 * for vboxVDMACrGuestCtlProcess().
1533 *
1534 * @thread VDMA
1535 */
1536static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1537{
1538 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1539 {
1540 WARN(("vdma VBVA is already enabled\n"));
1541 return VERR_INVALID_STATE;
1542 }
1543
1544 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA
1545 = (VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1546 if (!pVBVA)
1547 {
1548 WARN(("invalid offset %d (%#x)\n", u32Offset, u32Offset));
1549 return VERR_INVALID_PARAMETER;
1550 }
1551
1552 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA, pVdma->pVGAState->vram_ptrR3, pVdma->pVGAState->vram_size);
1553 if (RT_SUCCESS(rc))
1554 {
1555 if (!pVdma->CrSrvInfo.pfnEnable)
1556 {
1557 /* "HGCM-less" mode. All inited. */
1558 return VINF_SUCCESS;
1559 }
1560
1561 VBOXCRCMDCTL_DISABLE Disable;
1562 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1563 Disable.Data.hNotifyTerm = pVdma;
1564 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1565 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1566 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1567 if (RT_SUCCESS(rc))
1568 {
1569 PVGASTATE pVGAState = pVdma->pVGAState;
1570 VBOXCRCMD_SVRENABLE_INFO Info;
1571 Info.hCltScr = pVGAState->pDrv;
1572 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1573 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1574 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1575 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1576 if (RT_SUCCESS(rc))
1577 return VINF_SUCCESS;
1578
1579 WARN(("pfnEnable failed %Rrc\n", rc));
1580 vboxVDMACrHgcmHandleEnable(pVdma);
1581 }
1582 else
1583 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1584
1585 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1586 }
1587 else
1588 WARN(("VBoxVBVAExHSEnable failed %Rrc\n", rc));
1589
1590 return rc;
1591}
1592
1593/**
1594 * Worker for several vboxVDMACrHostCtlProcess() commands.
1595 *
1596 * @returns IPRT status code.
1597 * @param pVdma The VDMA channel.
1598 * @param fDoHgcmEnable ???
1599 * @thread VDMA
1600 */
1601static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1602{
1603 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1604 {
1605 Log(("vdma VBVA is already disabled\n"));
1606 return VINF_SUCCESS;
1607 }
1608
1609 if (!pVdma->CrSrvInfo.pfnDisable)
1610 {
1611 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
1612 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1613 return VINF_SUCCESS;
1614 }
1615
1616 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1617 if (RT_SUCCESS(rc))
1618 {
1619 if (fDoHgcmEnable)
1620 {
1621 PVGASTATE pVGAState = pVdma->pVGAState;
1622
1623 /* disable is a bit tricky
1624 * we need to ensure the host ctl commands do not come out of order
1625 * and do not come over HGCM channel until after it is enabled */
1626 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1627 if (RT_SUCCESS(rc))
1628 {
1629 vdmaVBVANotifyDisable(pVGAState);
1630 return VINF_SUCCESS;
1631 }
1632
1633 VBOXCRCMD_SVRENABLE_INFO Info;
1634 Info.hCltScr = pVGAState->pDrv;
1635 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1636 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1637 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1638 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); /** @todo ignoring return code */
1639 }
1640 }
1641 else
1642 WARN(("pfnDisable failed %Rrc\n", rc));
1643
1644 return rc;
1645}
1646
1647/**
1648 * Handles VBVAEXHOST_DATA_TYPE_HOSTCTL for vboxVDMAWorkerThread.
1649 *
1650 * @returns VBox status code.
1651 * @param pVdma The VDMA channel.
1652 * @param pCmd The control command to process. Should be
1653 * safe, i.e. not shared with guest.
1654 * @param pfContinue Where to return whether to continue or not.
1655 * @thread VDMA
1656 */
1657static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1658{
1659 *pfContinue = true;
1660
1661 int rc;
1662 switch (pCmd->enmType)
1663 {
1664 /*
1665 * See vdmaVBVACtlOpaqueHostSubmit() and its callers.
1666 */
1667 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1668 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1669 {
1670 if (pVdma->CrSrvInfo.pfnHostCtl)
1671 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, (uint8_t *)pCmd->u.cmd.pvCmd, pCmd->u.cmd.cbCmd);
1672 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1673 }
1674 else
1675 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
1676 return VERR_INVALID_STATE;
1677
1678 /*
1679 * See vdmaVBVACtlDisableSync().
1680 */
1681 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1682 rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
1683 if (RT_SUCCESS(rc))
1684 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */ );
1685 else
1686 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1687 return rc;
1688
1689 /*
1690 * See vboxVDMACrHgcmNotifyTerminatingCb().
1691 */
1692 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1693 rc = vdmaVBVADisableProcess(pVdma, false /* fDoHgcmEnable */);
1694 if (RT_SUCCESS(rc))
1695 {
1696 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true /* fNotify */);
1697 if (RT_SUCCESS(rc))
1698 *pfContinue = false;
1699 else
1700 WARN(("VBoxVDMAThreadTerm failed %Rrc\n", rc));
1701 }
1702 else
1703 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1704 return rc;
1705
1706 /*
1707 * See vboxVDMASaveStateExecPerform().
1708 */
1709 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1710 rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM);
1711 if (RT_SUCCESS(rc))
1712 {
1713 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1714 if (pVdma->CrSrvInfo.pfnSaveState)
1715 rc = pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1716 }
1717 else
1718 WARN(("VBoxVBVAExHSSaveState failed %Rrc\n", rc));
1719 return rc;
1720
1721 /*
1722 * See vboxVDMASaveLoadExecPerform().
1723 */
1724 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1725 rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1726 if (RT_SUCCESS(rc))
1727 {
1728 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1729 if (pVdma->CrSrvInfo.pfnLoadState)
1730 {
1731 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1732 if (RT_FAILURE(rc))
1733 WARN(("pfnLoadState failed %Rrc\n", rc));
1734 }
1735 }
1736 else
1737 WARN(("VBoxVBVAExHSLoadState failed %Rrc\n", rc));
1738 return rc;
1739
1740 /*
1741 * See vboxVDMASaveLoadDone().
1742 */
1743 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1744 {
1745 PVGASTATE pVGAState = pVdma->pVGAState;
1746 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1747 {
1748 VBVAINFOSCREEN CurScreen;
1749 VBVAINFOVIEW CurView;
1750 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1751 AssertLogRelMsgRCReturn(rc, ("VBVAGetInfoViewAndScreen [screen #%u] -> %#x\n", i, rc), rc);
1752
1753 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1754 AssertLogRelMsgRCReturn(rc, ("VBVAInfoScreen [screen #%u] -> %#x\n", i, rc), rc);
1755 }
1756
1757 return VINF_SUCCESS;
1758 }
1759
1760 default:
1761 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1762 return VERR_INVALID_PARAMETER;
1763 }
1764}
1765
1766/**
1767 * Worker for vboxVDMACrGuestCtlResizeEntryProcess.
1768 *
1769 * @returns VINF_SUCCESS or VERR_INVALID_PARAMETER.
1770 * @param pVGAState The VGA device state.
1771 * @param pScreen The screen info (safe copy).
1772 */
1773static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1774{
1775 const uint32_t idxView = pScreen->u32ViewIndex;
1776 const uint16_t fFlags = pScreen->u16Flags;
1777
1778 if (fFlags & VBVA_SCREEN_F_DISABLED)
1779 {
1780 if ( idxView < pVGAState->cMonitors
1781 || idxView == UINT32_C(0xFFFFFFFF))
1782 {
1783 RT_UNTRUSTED_VALIDATED_FENCE();
1784
1785 RT_ZERO(*pScreen);
1786 pScreen->u32ViewIndex = idxView;
1787 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1788 return VINF_SUCCESS;
1789 }
1790 }
1791 else
1792 {
1793 if (fFlags & VBVA_SCREEN_F_BLANK2)
1794 {
1795 if ( idxView >= pVGAState->cMonitors
1796 && idxView != UINT32_C(0xFFFFFFFF))
1797 return VERR_INVALID_PARAMETER;
1798 RT_UNTRUSTED_VALIDATED_FENCE();
1799
1800 /* Special case for blanking using current video mode.
1801 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1802 */
1803 RT_ZERO(*pScreen);
1804 pScreen->u32ViewIndex = idxView;
1805 pScreen->u16Flags = fFlags;
1806 return VINF_SUCCESS;
1807 }
1808
1809 if ( idxView < pVGAState->cMonitors
1810 && pScreen->u16BitsPerPixel <= 32
1811 && pScreen->u32Width <= UINT16_MAX
1812 && pScreen->u32Height <= UINT16_MAX
1813 && pScreen->u32LineSize <= UINT16_MAX * 4)
1814 {
1815 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1816 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1817 {
1818 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1819 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1820 && u64ScreenSize <= pVGAState->vram_size
1821 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1822 return VINF_SUCCESS;
1823 }
1824 }
1825 }
1826
1827 LogFunc(("Failed\n"));
1828 return VERR_INVALID_PARAMETER;
1829}
1830
1831/**
1832 * Handles on entry in a VBVAEXHOSTCTL_TYPE_GHH_RESIZE command.
1833 *
1834 * @returns IPRT status code.
1835 * @param pVdma The VDMA channel
1836 * @param pEntry The entry to handle. Considered volatile.
1837 *
1838 * @thread VDMA
1839 */
1840static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma,
1841 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry)
1842{
1843 PVGASTATE pVGAState = pVdma->pVGAState;
1844
1845 VBVAINFOSCREEN Screen;
1846 RT_COPY_VOLATILE(Screen, pEntry->Screen);
1847 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1848
1849 /* Verify and cleanup local copy of the input data. */
1850 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1851 if (RT_FAILURE(rc))
1852 {
1853 WARN(("invalid screen data\n"));
1854 return rc;
1855 }
1856 RT_UNTRUSTED_VALIDATED_FENCE();
1857
1858 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1859 RT_BCOPY_VOLATILE(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1860 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1861
1862 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1863
1864 if (pVdma->CrSrvInfo.pfnResize)
1865 {
1866 /* Also inform the HGCM service, if it is there. */
1867 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1868 if (RT_FAILURE(rc))
1869 {
1870 WARN(("pfnResize failed %Rrc\n", rc));
1871 return rc;
1872 }
1873 }
1874
1875 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1876 VBVAINFOVIEW View;
1877 View.u32ViewOffset = 0;
1878 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1879 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1880
1881 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1882
1883 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1884 i >= 0;
1885 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1886 {
1887 Screen.u32ViewIndex = i;
1888
1889 VBVAINFOSCREEN CurScreen;
1890 VBVAINFOVIEW CurView;
1891
1892 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1893 AssertRC(rc);
1894
1895 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1896 continue;
1897
1898 /* The view does not change if _BLANK2 is set. */
1899 if ( (!fDisable || !CurView.u32ViewSize)
1900 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1901 {
1902 View.u32ViewIndex = Screen.u32ViewIndex;
1903
1904 rc = VBVAInfoView(pVGAState, &View);
1905 if (RT_FAILURE(rc))
1906 {
1907 WARN(("VBVAInfoView failed %Rrc\n", rc));
1908 break;
1909 }
1910 }
1911
1912 rc = VBVAInfoScreen(pVGAState, &Screen);
1913 if (RT_FAILURE(rc))
1914 {
1915 WARN(("VBVAInfoScreen failed %Rrc\n", rc));
1916 break;
1917 }
1918 }
1919
1920 return rc;
1921}
1922
1923
1924/**
1925 * Processes VBVAEXHOST_DATA_TYPE_GUESTCTL for vboxVDMAWorkerThread and
1926 * vdmaVBVACtlThreadCreatedEnable.
1927 *
1928 * @returns VBox status code.
1929 * @param pVdma The VDMA channel.
1930 * @param pCmd The command to process. Maybe safe (not shared
1931 * with guest).
1932 *
1933 * @thread VDMA
1934 */
1935static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1936{
1937 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1938 switch (enmType)
1939 {
1940 /*
1941 * See handling of VBOXCMDVBVACTL_TYPE_3DCTL in vboxCmdVBVACmdCtl().
1942 */
1943 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1944 ASSERT_GUEST_LOGREL_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
1945 ASSERT_GUEST_LOGREL_RETURN(pVdma->CrSrvInfo.pfnGuestCtl, VERR_INVALID_STATE);
1946 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr,
1947 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd,
1948 pCmd->u.cmd.cbCmd);
1949
1950 /*
1951 * See handling of VBOXCMDVBVACTL_TYPE_RESIZE in vboxCmdVBVACmdCtl().
1952 */
1953 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1954 {
1955 ASSERT_GUEST_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
1956 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1957 ASSERT_GUEST_LOGREL_MSG_RETURN( !(cbCmd % sizeof(VBOXCMDVBVA_RESIZE_ENTRY))
1958 && cbCmd > 0,
1959 ("cbCmd=%#x\n", cbCmd), VERR_INVALID_PARAMETER);
1960
1961 uint32_t const cElements = cbCmd / sizeof(VBOXCMDVBVA_RESIZE_ENTRY);
1962 VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *pResize
1963 = (VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
1964 for (uint32_t i = 0; i < cElements; ++i)
1965 {
1966 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry = &pResize->aEntries[i];
1967 int rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1968 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("vboxVDMACrGuestCtlResizeEntryProcess failed for #%u: %Rrc\n", i, rc), rc);
1969 }
1970 return VINF_SUCCESS;
1971 }
1972
1973 /*
1974 * See vdmaVBVACtlEnableSubmitInternal().
1975 */
1976 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1977 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1978 {
1979 ASSERT_GUEST(pCmd->u.cmd.cbCmd == sizeof(VBVAENABLE));
1980
1981 VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable = (VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
1982 uint32_t const u32Offset = pEnable->u32Offset;
1983 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1984
1985 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1986 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVAEnableProcess -> %Rrc\n", rc), rc);
1987
1988 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1989 {
1990 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1991 ASSERT_GUEST_MSG_RC_RETURN(rc, ("VBoxVBVAExHPPause -> %Rrc\n", rc), rc);
1992 }
1993 return VINF_SUCCESS;
1994 }
1995
1996 /*
1997 * See vdmaVBVACtlDisableSubmitInternal().
1998 */
1999 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
2000 {
2001 int rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
2002 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVADisableProcess -> %Rrc\n", rc), rc);
2003
2004 /* do vgaUpdateDisplayAll right away */
2005 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
2006 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
2007
2008 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */);
2009 }
2010
2011 default:
2012 ASSERT_GUEST_LOGREL_MSG_FAILED(("unexpected ctl type %d\n", enmType));
2013 return VERR_INVALID_PARAMETER;
2014 }
2015}
2016
2017
2018/**
2019 * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2020 *
2021 * @param pDevIns Device instance data.
2022 * @param uPageNo Page frame number.
2023 * @param pbVram Pointer to the VRAM.
2024 * @param fIn Flag whether this is a page in or out op.
2025 * @thread VDMA
2026 *
2027 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
2028 */
2029static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn)
2030{
2031 RTGCPHYS GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT;
2032 PGMPAGEMAPLOCK Lock;
2033
2034 if (fIn)
2035 {
2036 const void *pvPage;
2037 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2038 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtrReadOnly %RGp -> %Rrc\n", GCPhysPage, rc), rc);
2039
2040 memcpy(pbVram, pvPage, PAGE_SIZE);
2041 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2042 }
2043 else
2044 {
2045 void *pvPage;
2046 int rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2047 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtr %RGp -> %Rrc\n", GCPhysPage, rc), rc);
2048
2049 memcpy(pvPage, pbVram, PAGE_SIZE);
2050 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056/**
2057 * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2058 *
2059 * @return 0 on success, -1 on failure.
2060 *
2061 * @thread VDMA
2062 */
2063static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const RT_UNTRUSTED_VOLATILE_GUEST *pHdr,
2064 uint32_t cbCmd, const VBOXCMDVBVA_PAGING_TRANSFER_DATA RT_UNTRUSTED_VOLATILE_GUEST *pData)
2065{
2066 /*
2067 * Extract and validate information.
2068 */
2069 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1);
2070
2071 bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
2072 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2073
2074 uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
2075 ASSERT_GUEST_MSG_RETURN(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1);
2076 VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX);
2077
2078 VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM;
2079 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2080 ASSERT_GUEST_MSG_RETURN(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1);
2081 ASSERT_GUEST_MSG_RETURN(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1);
2082 uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT;
2083 ASSERT_GUEST_MSG_RETURN(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1);
2084
2085 RT_UNTRUSTED_VALIDATED_FENCE();
2086
2087 /*
2088 * Execute the command.
2089 */
2090 uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam;
2091 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE)
2092 {
2093 uint32_t uPageNo = pData->aPageNumbers[iPage];
2094 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2095 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn);
2096 ASSERT_GUEST_MSG_RETURN(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1);
2097 }
2098 return 0;
2099}
2100
2101
2102/**
2103 * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL.
2104 *
2105 * @returns 0 on success, -1 on failure.
2106 * @param pVGAState The VGA state.
2107 * @param pFill The fill command (volatile).
2108 *
2109 * @thread VDMA
2110 */
2111static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *pFill)
2112{
2113 /*
2114 * Copy and validate input.
2115 */
2116 VBOXCMDVBVA_PAGING_FILL FillSafe;
2117 RT_COPY_VOLATILE(FillSafe, *pFill);
2118 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2119
2120 VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM;
2121 ASSERT_GUEST_MSG_RETURN(!(offVRAM & X86_PAGE_OFFSET_MASK), ("offVRAM=%#x\n", offVRAM), -1);
2122 ASSERT_GUEST_MSG_RETURN(offVRAM <= pVGAState->vram_size, ("offVRAM=%#x\n", offVRAM), -1);
2123
2124 uint32_t cbFill = FillSafe.u32CbFill;
2125 ASSERT_GUEST_STMT(!(cbFill & 3), cbFill &= ~(uint32_t)3);
2126 ASSERT_GUEST_MSG_RETURN( cbFill < pVGAState->vram_size
2127 && offVRAM <= pVGAState->vram_size - cbFill,
2128 ("offVRAM=%#x cbFill=%#x\n", offVRAM, cbFill), -1);
2129
2130 RT_UNTRUSTED_VALIDATED_FENCE();
2131
2132 /*
2133 * Execute.
2134 */
2135 uint32_t *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM);
2136 uint32_t const u32Color = FillSafe.u32Pattern;
2137
2138 uint32_t cLoops = cbFill / 4;
2139 while (cLoops-- > 0)
2140 pu32Vram[cLoops] = u32Color;
2141
2142 return 0;
2143}
2144
2145/**
2146 * Process command data.
2147 *
2148 * @returns zero or positive is success, negative failure.
2149 * @param pVdma The VDMA channel.
2150 * @param pCmd The command data to process. Assume volatile.
2151 * @param cbCmd The amount of command data.
2152 *
2153 * @thread VDMA
2154 */
2155static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma,
2156 const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
2157{
2158 uint8_t bOpCode = pCmd->u8OpCode;
2159 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2160 switch (bOpCode)
2161 {
2162 case VBOXCMDVBVA_OPTYPE_NOPCMD:
2163 return 0;
2164
2165 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
2166 return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd,
2167 &((VBOXCMDVBVA_PAGING_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->Data);
2168
2169 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
2170 ASSERT_GUEST_RETURN(cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL), -1);
2171 return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *)pCmd);
2172
2173 default:
2174 ASSERT_GUEST_RETURN(pVdma->CrSrvInfo.pfnCmd != NULL, -1);
2175 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
2176 }
2177}
2178
2179# if 0
2180typedef struct VBOXCMDVBVA_PAGING_TRANSFER
2181{
2182 VBOXCMDVBVA_HDR Hdr;
2183 /* for now can only contain offVRAM.
2184 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
2185 VBOXCMDVBVA_ALLOCINFO Alloc;
2186 uint32_t u32Reserved;
2187 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
2188} VBOXCMDVBVA_PAGING_TRANSFER;
2189# endif
2190
2191AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
2192AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
2193AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
2194AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
2195
2196# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
2197
2198/**
2199 * Worker for vboxVDMACrCmdProcess.
2200 *
2201 * @returns 8-bit result.
2202 * @param pVdma The VDMA channel.
2203 * @param pCmd The command. Consider volatile!
2204 * @param cbCmd The size of what @a pCmd points to. At least
2205 * sizeof(VBOXCMDVBVA_HDR).
2206 * @param fRecursion Set if recursive call, false if not.
2207 *
2208 * @thread VDMA
2209 */
2210static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
2211 uint32_t cbCmd, bool fRecursion)
2212{
2213 int8_t i8Result = 0;
2214 uint8_t const bOpCode = pCmd->u8OpCode;
2215 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2216 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode));
2217 switch (bOpCode)
2218 {
2219 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
2220 {
2221 /*
2222 * Extract the command physical address and size.
2223 */
2224 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1);
2225 RTGCPHYS GCPhysCmd = ((VBOXCMDVBVA_SYSMEMCMD RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->phCmd;
2226 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2227 uint32_t cbCmdPart = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK);
2228
2229 uint32_t cbRealCmd = pCmd->u8Flags;
2230 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
2231 ASSERT_GUEST_MSG_RETURN(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1);
2232 ASSERT_GUEST_MSG_RETURN(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1);
2233
2234 /*
2235 * Lock down the first page of the memory specified by the command.
2236 */
2237 PGMPAGEMAPLOCK Lock;
2238 PVGASTATE pVGAState = pVdma->pVGAState;
2239 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2240 VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL;
2241 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock);
2242 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("VDMA: %RGp -> %Rrc\n", GCPhysCmd, rc), -1);
2243 Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK));
2244
2245 /*
2246 * All fits within one page? We can handle that pretty efficiently.
2247 */
2248 if (cbRealCmd <= cbCmdPart)
2249 {
2250 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
2251 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2252 }
2253 else
2254 {
2255 /*
2256 * To keep things damn simple, just double buffer cross page or
2257 * multipage requests.
2258 */
2259 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16));
2260 if (pbCmdBuf)
2261 {
2262 memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart);
2263 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2264 pRealCmdHdr = NULL;
2265
2266 rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart);
2267 if (RT_SUCCESS(rc))
2268 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd);
2269 else
2270 LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd));
2271 RTMemTmpFree(pbCmdBuf);
2272 }
2273 else
2274 {
2275 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2276 LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd));
2277 i8Result = -1;
2278 }
2279 }
2280 return i8Result;
2281 }
2282
2283 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2284 {
2285 Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */
2286 ASSERT_GUEST_RETURN(!fRecursion, -1);
2287
2288 /* Skip current command. */
2289 cbCmd -= sizeof(*pCmd);
2290 pCmd++;
2291
2292 /* Process subcommands. */
2293 while (cbCmd > 0)
2294 {
2295 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1);
2296
2297 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2298 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2299 ASSERT_GUEST_MSG_RETURN(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1);
2300
2301 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/);
2302 ASSERT_GUEST_MSG_RETURN(i8Result >= 0, ("vboxVDMACrCmdVbvaProcess -> %d\n", i8Result), i8Result);
2303
2304 /* Advance to the next command. */
2305 pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCmd + cbCurCmd);
2306 cbCmd -= cbCurCmd;
2307 }
2308 return 0;
2309 }
2310
2311 default:
2312 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2313 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
2314 return i8Result;
2315 }
2316}
2317
2318/**
2319 * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD.
2320 *
2321 * @thread VDMA
2322 */
2323static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd)
2324{
2325 if ( cbCmd > 0
2326 && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP)
2327 { /* nop */ }
2328 else
2329 {
2330 ASSERT_GUEST_RETURN_VOID(cbCmd >= sizeof(VBOXCMDVBVA_HDR));
2331 VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)pbCmd;
2332
2333 /* check if the command is cancelled */
2334 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2335 {
2336 /* Process it. */
2337 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/);
2338 }
2339 else
2340 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2341 }
2342
2343}
2344
2345/**
2346 * Worker for vboxVDMAConstruct().
2347 */
2348static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2349{
2350 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
2351 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof(*pCmd));
2352 int rc;
2353 if (pCmd)
2354 {
2355 PVGASTATE pVGAState = pVdma->pVGAState;
2356 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2357 pCmd->cbVRam = pVGAState->vram_size;
2358 pCmd->pLed = &pVGAState->Led3D;
2359 pCmd->CrClientInfo.hClient = pVdma;
2360 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2361 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2362 if (RT_SUCCESS(rc))
2363 {
2364 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2365 if (RT_SUCCESS(rc))
2366 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2367 else if (rc != VERR_NOT_SUPPORTED)
2368 WARN(("vboxVDMACrCtlGetRc returned %Rrc\n", rc));
2369 }
2370 else
2371 WARN(("vboxVDMACrCtlPost failed %Rrc\n", rc));
2372
2373 vboxVDMACrCtlRelease(&pCmd->Hdr);
2374 }
2375 else
2376 rc = VERR_NO_MEMORY;
2377
2378 if (!RT_SUCCESS(rc))
2379 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2380
2381 return rc;
2382}
2383
2384/**
2385 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync}
2386 *
2387 * @note Some indirect completion magic, you gotta love this code!
2388 */
2389DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2390{
2391 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2392 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2393 VBOXVDMACMD RT_UNTRUSTED_VOLATILE_GUEST *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2394 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2395
2396 AssertRC(rc);
2397 pDr->rc = rc;
2398
2399 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2400 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2401 AssertRC(rc);
2402
2403 return rc;
2404}
2405
2406/**
2407 * Worker for vboxVDMACmdExecBlt().
2408 */
2409static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc,
2410 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2411 const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl)
2412{
2413 /*
2414 * We do not support color conversion.
2415 */
2416 AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION);
2417
2418 /* we do not support stretching (checked by caller) */
2419 Assert(pDstRectl->height == pSrcRectl->height);
2420 Assert(pDstRectl->width == pSrcRectl->width);
2421
2422 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2423 AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t));
2424 uint32_t cbVRamSize = pVdma->pVGAState->vram_size;
2425 uint8_t *pbDstSurf = pbRam + offDst;
2426 uint8_t *pbSrcSurf = pbRam + offSrc;
2427
2428 if ( pDstDesc->width == pDstRectl->width
2429 && pSrcDesc->width == pSrcRectl->width
2430 && pSrcDesc->width == pDstDesc->width
2431 && pSrcDesc->pitch == pDstDesc->pitch)
2432 {
2433 Assert(!pDstRectl->left);
2434 Assert(!pSrcRectl->left);
2435 uint32_t offBoth = pDstDesc->pitch * pDstRectl->top;
2436 uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height;
2437
2438 if ( cbToCopy <= cbVRamSize
2439 && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy
2440 && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy)
2441 {
2442 RT_UNTRUSTED_VALIDATED_FENCE();
2443 memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy);
2444 }
2445 else
2446 return VERR_INVALID_PARAMETER;
2447 }
2448 else
2449 {
2450 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2451 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2452 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2453 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2454 Assert(cbDstLine <= pDstDesc->pitch);
2455 uint32_t cbDstSkip = pDstDesc->pitch;
2456 uint8_t *pbDstStart = pbDstSurf + offDstStart;
2457
2458 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2459# ifdef VBOX_STRICT
2460 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2461 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2462# endif
2463 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2464 Assert(cbSrcLine <= pSrcDesc->pitch);
2465 uint32_t cbSrcSkip = pSrcDesc->pitch;
2466 const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart;
2467
2468 Assert(cbDstLine == cbSrcLine);
2469
2470 for (uint32_t i = 0; ; ++i)
2471 {
2472 if ( cbDstLine <= cbVRamSize
2473 && (uintptr_t)pbDstStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine
2474 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine)
2475 {
2476 RT_UNTRUSTED_VALIDATED_FENCE(); /** @todo this could potentially be buzzkiller. */
2477 memcpy(pbDstStart, pbSrcStart, cbDstLine);
2478 }
2479 else
2480 return VERR_INVALID_PARAMETER;
2481 if (i == pDstRectl->height)
2482 break;
2483 pbDstStart += cbDstSkip;
2484 pbSrcStart += cbSrcSkip;
2485 }
2486 }
2487 return VINF_SUCCESS;
2488}
2489
2490#if 0 /* unused */
2491static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2492{
2493 if (!pRectl1->width)
2494 *pRectl1 = *pRectl2;
2495 else
2496 {
2497 int16_t x21 = pRectl1->left + pRectl1->width;
2498 int16_t x22 = pRectl2->left + pRectl2->width;
2499 if (pRectl1->left > pRectl2->left)
2500 {
2501 pRectl1->left = pRectl2->left;
2502 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2503 }
2504 else if (x21 < x22)
2505 pRectl1->width = x22 - pRectl1->left;
2506
2507 x21 = pRectl1->top + pRectl1->height;
2508 x22 = pRectl2->top + pRectl2->height;
2509 if (pRectl1->top > pRectl2->top)
2510 {
2511 pRectl1->top = pRectl2->top;
2512 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2513 }
2514 else if (x21 < x22)
2515 pRectl1->height = x22 - pRectl1->top;
2516 }
2517}
2518#endif /* unused */
2519
2520/**
2521 * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec().
2522 *
2523 * @returns number of bytes (positive) of the full command on success,
2524 * otherwise a negative error status (VERR_XXX).
2525 *
2526 * @param pVdma The VDMA channel.
2527 * @param pBlt Blit command buffer. This is to be considered
2528 * volatile!
2529 * @param cbBuffer Number of bytes accessible at @a pBtl.
2530 */
2531static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt,
2532 uint32_t cbBuffer)
2533{
2534 /*
2535 * Validate and make a local copy of the blt command up to the rectangle array.
2536 */
2537 AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER);
2538 VBOXVDMACMD_DMA_PRESENT_BLT BltSafe;
2539 RT_BCOPY_VOLATILE(&BltSafe, (void const *)pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects));
2540 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2541
2542 AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER);
2543 uint32_t const cbBlt = RT_UOFFSETOF_DYN(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]);
2544 AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER);
2545
2546 /*
2547 * We do not support stretching.
2548 */
2549 AssertReturn(BltSafe.srcRectl.width == BltSafe.dstRectl.width, VERR_INVALID_FUNCTION);
2550 AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION);
2551
2552 Assert(BltSafe.cDstSubRects);
2553
2554 RT_UNTRUSTED_VALIDATED_FENCE();
2555
2556 /*
2557 * Do the work.
2558 */
2559 //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless
2560 if (BltSafe.cDstSubRects)
2561 {
2562 for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i)
2563 {
2564 VBOXVDMA_RECTL dstSubRectl;
2565 dstSubRectl.left = pBlt->aDstSubRects[i].left;
2566 dstSubRectl.top = pBlt->aDstSubRects[i].top;
2567 dstSubRectl.width = pBlt->aDstSubRects[i].width;
2568 dstSubRectl.height = pBlt->aDstSubRects[i].height;
2569 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2570
2571 VBOXVDMA_RECTL srcSubRectl = dstSubRectl;
2572
2573 dstSubRectl.left += BltSafe.dstRectl.left;
2574 dstSubRectl.top += BltSafe.dstRectl.top;
2575
2576 srcSubRectl.left += BltSafe.srcRectl.left;
2577 srcSubRectl.top += BltSafe.srcRectl.top;
2578
2579 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2580 &dstSubRectl, &srcSubRectl);
2581 AssertRCReturn(rc, rc);
2582
2583 //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless
2584 }
2585 }
2586 else
2587 {
2588 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2589 &BltSafe.dstRectl, &BltSafe.srcRectl);
2590 AssertRCReturn(rc, rc);
2591
2592 //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless
2593 }
2594
2595 return cbBlt;
2596}
2597
2598
2599/**
2600 * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and
2601 * vboxVDMACmdExec().
2602 *
2603 * @returns number of bytes (positive) of the full command on success,
2604 * otherwise a negative error status (VERR_XXX).
2605 *
2606 * @param pVdma The VDMA channel.
2607 * @param pTransfer Transfer command buffer. This is to be considered
2608 * volatile!
2609 * @param cbBuffer Number of bytes accessible at @a pTransfer.
2610 */
2611static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
2612 uint32_t cbBuffer)
2613{
2614 /*
2615 * Make a copy of the command (it's volatile).
2616 */
2617 AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2618 VBOXVDMACMD_DMA_BPB_TRANSFER TransferSafeCopy;
2619 RT_COPY_VOLATILE(TransferSafeCopy, *pTransfer);
2620 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2621
2622 PVGASTATE pVGAState = pVdma->pVGAState;
2623 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2624 uint8_t *pbRam = pVGAState->vram_ptrR3;
2625 uint32_t cbTransfer = TransferSafeCopy.cbTransferSize;
2626
2627 /*
2628 * Validate VRAM offset.
2629 */
2630 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2631 AssertReturn( cbTransfer <= pVGAState->vram_size
2632 && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer,
2633 VERR_INVALID_PARAMETER);
2634
2635 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2636 AssertReturn( cbTransfer <= pVGAState->vram_size
2637 && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer,
2638 VERR_INVALID_PARAMETER);
2639 RT_UNTRUSTED_VALIDATED_FENCE();
2640
2641 /*
2642 * Transfer loop.
2643 */
2644 uint32_t cbTransfered = 0;
2645 int rc = VINF_SUCCESS;
2646 do
2647 {
2648 uint32_t cbSubTransfer = cbTransfer;
2649
2650 const void *pvSrc;
2651 bool fSrcLocked = false;
2652 PGMPAGEMAPLOCK SrcLock;
2653 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2654 pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered;
2655 else
2656 {
2657 RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered;
2658 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock);
2659 AssertRC(rc);
2660 if (RT_SUCCESS(rc))
2661 {
2662 fSrcLocked = true;
2663 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK));
2664 }
2665 else
2666 break;
2667 }
2668
2669 void *pvDst;
2670 PGMPAGEMAPLOCK DstLock;
2671 bool fDstLocked = false;
2672 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2673 pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered;
2674 else
2675 {
2676 RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered;
2677 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock);
2678 AssertRC(rc);
2679 if (RT_SUCCESS(rc))
2680 {
2681 fDstLocked = true;
2682 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK));
2683 }
2684 }
2685
2686 if (RT_SUCCESS(rc))
2687 {
2688 memcpy(pvDst, pvSrc, cbSubTransfer);
2689 cbTransfered += cbSubTransfer;
2690 cbTransfer -= cbSubTransfer;
2691 }
2692 else
2693 cbTransfer = 0; /* force break below */
2694
2695 if (fSrcLocked)
2696 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2697 if (fDstLocked)
2698 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2699 } while (cbTransfer);
2700
2701 if (RT_SUCCESS(rc))
2702 return sizeof(TransferSafeCopy);
2703 return rc;
2704}
2705
2706/**
2707 * Worker for vboxVDMACommandProcess().
2708 *
2709 * @param pVdma Tthe VDMA channel.
2710 * @param pbBuffer Command buffer, considered volatile.
2711 * @param cbBuffer The number of bytes at @a pbBuffer.
2712 * @param pCmdDr The command. For setting the async flag on chromium
2713 * requests.
2714 * @param pfAsyncCmd Flag to set if async command completion on chromium
2715 * requests. Input stat is false, so it only ever need to
2716 * be set to true.
2717 */
2718static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *pbBuffer, uint32_t cbBuffer,
2719 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmdDr, bool *pfAsyncCmd)
2720{
2721 AssertReturn(pbBuffer, VERR_INVALID_POINTER);
2722
2723 for (;;)
2724 {
2725 AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2726
2727 VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *)pbBuffer;
2728 VBOXVDMACMD_TYPE enmCmdType = pCmd->enmType;
2729 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2730
2731 ASSERT_GUEST_MSG_RETURN( enmCmdType == VBOXVDMACMD_TYPE_CHROMIUM_CMD
2732 || enmCmdType == VBOXVDMACMD_TYPE_DMA_PRESENT_BLT
2733 || enmCmdType == VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER
2734 || enmCmdType == VBOXVDMACMD_TYPE_DMA_NOP
2735 || enmCmdType == VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ,
2736 ("enmCmdType=%d\n", enmCmdType),
2737 VERR_INVALID_FUNCTION);
2738 RT_UNTRUSTED_VALIDATED_FENCE();
2739
2740 int cbProcessed;
2741 switch (enmCmdType)
2742 {
2743 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2744 {
2745 VBOXVDMACMD_CHROMIUM_CMD RT_UNTRUSTED_VOLATILE_GUEST *pCrCmd = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_CHROMIUM_CMD);
2746 uint32_t const cbBody = VBOXVDMACMD_BODY_SIZE(cbBuffer);
2747 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER);
2748
2749 PVGASTATE pVGAState = pVdma->pVGAState;
2750 AssertReturn(pVGAState->pDrv->pfnCrHgsmiCommandProcess, VERR_NOT_SUPPORTED);
2751
2752 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2753 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2754 *pfAsyncCmd = true;
2755 return VINF_SUCCESS;
2756 }
2757
2758 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2759 {
2760 VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2761 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2762 Assert(cbProcessed >= 0);
2763 break;
2764 }
2765
2766 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2767 {
2768 VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer
2769 = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2770 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2771 Assert(cbProcessed >= 0);
2772 break;
2773 }
2774
2775 case VBOXVDMACMD_TYPE_DMA_NOP:
2776 return VINF_SUCCESS;
2777
2778 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2779 return VINF_SUCCESS;
2780
2781 default:
2782 AssertFailedReturn(VERR_INVALID_FUNCTION);
2783 }
2784
2785 /* Advance buffer or return. */
2786 if (cbProcessed >= 0)
2787 {
2788 Assert(cbProcessed > 0);
2789 cbProcessed += VBOXVDMACMD_HEADER_SIZE();
2790 if ((uint32_t)cbProcessed >= cbBuffer)
2791 {
2792 Assert((uint32_t)cbProcessed == cbBuffer);
2793 return VINF_SUCCESS;
2794 }
2795
2796 cbBuffer -= cbProcessed;
2797 pbBuffer += cbProcessed;
2798 }
2799 else
2800 {
2801 RT_UNTRUSTED_VALIDATED_FENCE();
2802 return cbProcessed; /* error status */
2803 }
2804 }
2805}
2806
2807/**
2808 * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal().
2809 *
2810 * @thread VDMA
2811 */
2812static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2813{
2814 RT_NOREF(hThreadSelf);
2815 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2816 PVGASTATE pVGAState = pVdma->pVGAState;
2817 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2818 int rc;
2819
2820 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2821
2822 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2823 {
2824 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd = NULL;
2825 uint32_t cbCmd = 0;
2826 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd);
2827 switch (enmType)
2828 {
2829 case VBVAEXHOST_DATA_TYPE_CMD:
2830 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd);
2831 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2832 VBVARaiseIrq(pVGAState, 0);
2833 break;
2834
2835 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2836 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd);
2837 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2838 break;
2839
2840 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2841 {
2842 bool fContinue = true;
2843 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue);
2844 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2845 if (fContinue)
2846 break;
2847 }
2848 RT_FALL_THRU();
2849
2850 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2851 rc = RTSemEventWaitNoResume(pVdma->Thread.hEvent, RT_INDEFINITE_WAIT);
2852 AssertMsg(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc));
2853 break;
2854
2855 default:
2856 WARN(("unexpected type %d\n", enmType));
2857 break;
2858 }
2859 }
2860
2861 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2862
2863 return VINF_SUCCESS;
2864}
2865
2866/**
2867 * Worker for vboxVDMACommand.
2868 *
2869 * @returns VBox status code of the operation.
2870 * @param pVdma VDMA instance data.
2871 * @param pCmd The command to process. Consider content volatile.
2872 * @param cbCmd Number of valid bytes at @a pCmd. This is at least
2873 * sizeof(VBOXVDMACBUF_DR).
2874 * @param pfAsyncCmd Flag to set if async command completion on chromium
2875 * requests. Input stat is false, so it only ever need to
2876 * be set to true.
2877 * @thread EMT
2878 */
2879static int vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
2880 uint32_t cbCmd, bool *pfAsyncCmd)
2881{
2882 /*
2883 * Get the command buffer (volatile).
2884 */
2885 uint16_t const cbCmdBuf = pCmd->cbBuf;
2886 uint16_t const fCmdFlags = pCmd->fFlags;
2887 uint64_t const offVramBuf_or_GCPhysBuf = pCmd->Location.offVramBuf;
2888 AssertCompile(sizeof(pCmd->Location.offVramBuf) == sizeof(pCmd->Location.phBuf));
2889 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2890
2891 const uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmdBuf;
2892 PGMPAGEMAPLOCK Lock;
2893 bool fReleaseLocked = false;
2894 if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2895 {
2896 pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2897 AssertReturn((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd],
2898 VERR_INVALID_PARAMETER);
2899 RT_UNTRUSTED_VALIDATED_FENCE();
2900 }
2901 else if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2902 {
2903 AssertReturn( offVramBuf_or_GCPhysBuf <= pVdma->pVGAState->vram_size
2904 && offVramBuf_or_GCPhysBuf + cbCmdBuf <= pVdma->pVGAState->vram_size,
2905 VERR_INVALID_PARAMETER);
2906 RT_UNTRUSTED_VALIDATED_FENCE();
2907
2908 pbCmdBuf = (uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *)pVdma->pVGAState->vram_ptrR3 + offVramBuf_or_GCPhysBuf;
2909 }
2910 else
2911 {
2912 /* Make sure it doesn't cross a page. */
2913 AssertReturn((uint32_t)(offVramBuf_or_GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE,
2914 VERR_INVALID_PARAMETER);
2915 RT_UNTRUSTED_VALIDATED_FENCE();
2916
2917 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, offVramBuf_or_GCPhysBuf, 0 /*fFlags*/,
2918 (const void **)&pbCmdBuf, &Lock);
2919 AssertRCReturn(rc, rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2920 fReleaseLocked = true;
2921 }
2922
2923 /*
2924 * Process the command.
2925 */
2926 int rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf, pCmd, pfAsyncCmd);
2927 AssertRC(rc);
2928
2929 /* Clean up comand buffer. */
2930 if (fReleaseLocked)
2931 PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock);
2932 return rc;
2933}
2934
2935# if 0 /** @todo vboxVDMAControlProcess is unused */
2936static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2937{
2938 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2939 pCmd->i32Result = VINF_SUCCESS;
2940 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2941 AssertRC(rc);
2942}
2943# endif
2944
2945#ifdef VBOX_VDMA_WITH_WATCHDOG
2946
2947/**
2948 * @callback_method_impl{TMTIMER, VDMA watchdog timer.}
2949 */
2950static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2951{
2952 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2953 PVGASTATE pVGAState = pVdma->pVGAState;
2954 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2955}
2956
2957/**
2958 * Handles VBOXVDMA_CTL_TYPE_WATCHDOG for vboxVDMAControl.
2959 */
2960static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2961{
2962 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2963 if (cMillis)
2964 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2965 else
2966 TMTimerStop(pVdma->WatchDogTimer);
2967 return VINF_SUCCESS;
2968}
2969
2970#endif /* VBOX_VDMA_WITH_WATCHDOG */
2971
2972/**
2973 * Called by vgaR3Construct() to initialize the state.
2974 *
2975 * @returns VBox status code.
2976 */
2977int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2978{
2979 RT_NOREF(cPipeElements);
2980 int rc;
2981 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2982 Assert(pVdma);
2983 if (pVdma)
2984 {
2985 pVdma->pHgsmi = pVGAState->pHGSMI;
2986 pVdma->pVGAState = pVGAState;
2987
2988#ifdef VBOX_VDMA_WITH_WATCHDOG
2989 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2990 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2991 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2992 AssertRC(rc);
2993#else
2994 rc = VINF_SUCCESS;
2995#endif
2996 if (RT_SUCCESS(rc))
2997 {
2998 VBoxVDMAThreadInit(&pVdma->Thread);
2999
3000 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
3001 if (RT_SUCCESS(rc))
3002 {
3003 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
3004 if (RT_SUCCESS(rc))
3005 {
3006 rc = RTCritSectInit(&pVdma->CalloutCritSect);
3007 if (RT_SUCCESS(rc))
3008 {
3009 pVGAState->pVdma = pVdma;
3010
3011 /* No HGCM service if VMSVGA is enabled. */
3012 if (!pVGAState->fVMSVGAEnabled)
3013 {
3014 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
3015 }
3016 return VINF_SUCCESS;
3017 }
3018
3019 WARN(("RTCritSectInit failed %Rrc\n", rc));
3020 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3021 }
3022 else
3023 WARN(("VBoxVBVAExHSInit failed %Rrc\n", rc));
3024 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3025 }
3026 else
3027 WARN(("RTSemEventMultiCreate failed %Rrc\n", rc));
3028
3029 /* the timer is cleaned up automatically */
3030 }
3031 RTMemFree(pVdma);
3032 }
3033 else
3034 rc = VERR_OUT_OF_RESOURCES;
3035 return rc;
3036}
3037
3038/**
3039 * Called by vgaR3Reset() to do reset.
3040 */
3041void vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
3042{
3043 vdmaVBVACtlDisableSync(pVdma);
3044}
3045
3046/**
3047 * Called by vgaR3Destruct() to do cleanup.
3048 */
3049void vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
3050{
3051 if (!pVdma)
3052 return;
3053
3054 if (pVdma->pVGAState->fVMSVGAEnabled)
3055 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
3056 else
3057 {
3058 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
3059 * as the result of the SharedOpenGL HGCM service unloading.
3060 */
3061 vdmaVBVACtlDisableSync(pVdma);
3062 }
3063 VBoxVDMAThreadCleanup(&pVdma->Thread);
3064 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3065 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3066 RTCritSectDelete(&pVdma->CalloutCritSect);
3067 RTMemFree(pVdma);
3068}
3069
3070/**
3071 * Handle VBVA_VDMA_CTL, see vbvaChannelHandler
3072 *
3073 * @param pVdma The VDMA channel.
3074 * @param pCmd The control command to handle. Considered volatile.
3075 * @param cbCmd The size of the command. At least sizeof(VBOXVDMA_CTL).
3076 */
3077void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, VBOXVDMA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
3078{
3079 RT_NOREF(cbCmd);
3080 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
3081
3082 VBOXVDMA_CTL_TYPE enmCtl = pCmd->enmCtl;
3083 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
3084
3085 int rc;
3086 if (enmCtl < VBOXVDMA_CTL_TYPE_END)
3087 {
3088 RT_UNTRUSTED_VALIDATED_FENCE();
3089
3090 switch (enmCtl)
3091 {
3092 case VBOXVDMA_CTL_TYPE_ENABLE:
3093 rc = VINF_SUCCESS;
3094 break;
3095 case VBOXVDMA_CTL_TYPE_DISABLE:
3096 rc = VINF_SUCCESS;
3097 break;
3098 case VBOXVDMA_CTL_TYPE_FLUSH:
3099 rc = VINF_SUCCESS;
3100 break;
3101 case VBOXVDMA_CTL_TYPE_WATCHDOG:
3102#ifdef VBOX_VDMA_WITH_WATCHDOG
3103 rc = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
3104#else
3105 rc = VERR_NOT_SUPPORTED;
3106#endif
3107 break;
3108 default:
3109 AssertFailedBreakStmt(rc = VERR_IPE_NOT_REACHED_DEFAULT_CASE);
3110 }
3111 }
3112 else
3113 {
3114 RT_UNTRUSTED_VALIDATED_FENCE();
3115 ASSERT_GUEST_FAILED();
3116 rc = VERR_NOT_SUPPORTED;
3117 }
3118
3119 pCmd->i32Result = rc;
3120 rc = VBoxSHGSMICommandComplete(pIns, pCmd);
3121 AssertRC(rc);
3122}
3123
3124/**
3125 * Handle VBVA_VDMA_CMD, see vbvaChannelHandler().
3126 *
3127 * @param pVdma The VDMA channel.
3128 * @param pCmd The command to handle. Considered volatile.
3129 * @param cbCmd The size of the command. At least sizeof(VBOXVDMACBUF_DR).
3130 * @thread EMT
3131 */
3132void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
3133{
3134 /*
3135 * Process the command.
3136 */
3137 bool fAsyncCmd = false;
3138 int rc = vboxVDMACommandProcess(pVdma, pCmd, cbCmd, &fAsyncCmd);
3139
3140 /*
3141 * Complete the command unless it's asynchronous (e.g. chromium).
3142 */
3143 if (!fAsyncCmd)
3144 {
3145 pCmd->rc = rc;
3146 int rc2 = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
3147 AssertRC(rc2);
3148 }
3149}
3150
3151
3152/**
3153 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3154 * Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() }
3155 */
3156static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3157 int rc, void *pvContext)
3158{
3159 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
3160 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pGCtl
3161 = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCtl->u.cmd.pvCmd - sizeof(VBOXCMDVBVA_CTL));
3162 AssertRC(rc);
3163 pGCtl->i32Result = rc;
3164
3165 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
3166 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
3167 AssertRC(rc);
3168
3169 VBoxVBVAExHCtlFree(pVbva, pCtl);
3170}
3171
3172/**
3173 * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit().
3174 */
3175static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType,
3176 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd,
3177 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3178{
3179 int rc;
3180 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
3181 if (pHCtl)
3182 {
3183 pHCtl->u.cmd.pvCmd = pbCmd;
3184 pHCtl->u.cmd.cbCmd = cbCmd;
3185 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
3186 if (RT_SUCCESS(rc))
3187 return VINF_SUCCESS;
3188
3189 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3190 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3191 }
3192 else
3193 {
3194 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3195 rc = VERR_NO_MEMORY;
3196 }
3197 return rc;
3198}
3199
3200/**
3201 * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL.
3202 */
3203static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType,
3204 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
3205{
3206 Assert(cbCtl >= sizeof(VBOXCMDVBVA_CTL)); /* Checked by callers caller, vbvaChannelHandler(). */
3207
3208 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
3209 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType,
3210 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)(pCtl + 1),
3211 cbCtl - sizeof(VBOXCMDVBVA_CTL),
3212 vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3213 if (RT_SUCCESS(rc))
3214 return VINF_SUCCESS;
3215
3216 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3217 pCtl->i32Result = rc;
3218 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3219 AssertRC(rc);
3220 return VINF_SUCCESS;
3221}
3222
3223/**
3224 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()}
3225 */
3226static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3227 int rc, void *pvCompletion)
3228{
3229 VBOXCRCMDCTL *pVboxCtl = (VBOXCRCMDCTL *)pCtl->u.cmd.pvCmd;
3230 if (pVboxCtl->u.pfnInternal)
3231 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
3232 VBoxVBVAExHCtlFree(pVbva, pCtl);
3233}
3234
3235/**
3236 * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync().
3237 */
3238static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3239 PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion)
3240{
3241 pCmd->u.pfnInternal = (PFNRT)pfnCompletion;
3242 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
3243 (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
3244 if (RT_FAILURE(rc))
3245 {
3246 if (rc == VERR_INVALID_STATE)
3247 {
3248 pCmd->u.pfnInternal = NULL;
3249 PVGASTATE pVGAState = pVdma->pVGAState;
3250 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
3251 if (!RT_SUCCESS(rc))
3252 WARN(("pfnCrHgsmiControlProcess failed %Rrc\n", rc));
3253
3254 return rc;
3255 }
3256 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3257 return rc;
3258 }
3259
3260 return VINF_SUCCESS;
3261}
3262
3263/**
3264 * Called from vdmaVBVACtlThreadCreatedEnable().
3265 */
3266static int vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3267{
3268 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3269 {
3270 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3271 if (!RT_SUCCESS(rc))
3272 {
3273 WARN(("pfnVBVAEnable failed %Rrc\n", rc));
3274 for (uint32_t j = 0; j < i; j++)
3275 {
3276 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3277 }
3278
3279 return rc;
3280 }
3281 }
3282 return VINF_SUCCESS;
3283}
3284
3285/**
3286 * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess().
3287 */
3288static int vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3289{
3290 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3291 pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i);
3292 return VINF_SUCCESS;
3293}
3294
3295/**
3296 * Hook that is called by vboxVDMAWorkerThread when it starts.
3297 *
3298 * @thread VDMA
3299 */
3300static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3301 void *pvThreadContext, void *pvContext)
3302{
3303 RT_NOREF(pThread);
3304 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3305 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3306
3307 if (RT_SUCCESS(rc))
3308 {
3309 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3310 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3311 if (rc == VINF_SUCCESS)
3312 {
3313 /* we need to inform Main about VBVA enable/disable
3314 * main expects notifications to be done from the main thread
3315 * submit it there */
3316 PVGASTATE pVGAState = pVdma->pVGAState;
3317
3318 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3319 vdmaVBVANotifyEnable(pVGAState);
3320 else
3321 vdmaVBVANotifyDisable(pVGAState);
3322 }
3323 else if (RT_FAILURE(rc))
3324 WARN(("vboxVDMACrGuestCtlProcess failed %Rrc\n", rc));
3325 }
3326 else
3327 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %Rrc\n", rc));
3328
3329 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3330}
3331
3332/**
3333 * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync().
3334 */
3335static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, bool fPaused,
3336 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3337{
3338 int rc;
3339 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva,
3340 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3341 if (pHCtl)
3342 {
3343 pHCtl->u.cmd.pvCmd = pEnable;
3344 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3345 pHCtl->pfnComplete = pfnComplete;
3346 pHCtl->pvComplete = pvComplete;
3347
3348 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3349 if (RT_SUCCESS(rc))
3350 return VINF_SUCCESS;
3351
3352 WARN(("VBoxVDMAThreadCreate failed %Rrc\n", rc));
3353 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3354 }
3355 else
3356 {
3357 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3358 rc = VERR_NO_MEMORY;
3359 }
3360
3361 return rc;
3362}
3363
3364/**
3365 * Worker for vboxVDMASaveLoadExecPerform().
3366 */
3367static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3368{
3369 VBVAENABLE Enable = {0};
3370 Enable.u32Flags = VBVA_F_ENABLE;
3371 Enable.u32Offset = offVram;
3372
3373 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3374 Data.rc = VERR_NOT_IMPLEMENTED;
3375 int rc = RTSemEventCreate(&Data.hEvent);
3376 if (!RT_SUCCESS(rc))
3377 {
3378 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3379 return rc;
3380 }
3381
3382 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3383 if (RT_SUCCESS(rc))
3384 {
3385 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3386 if (RT_SUCCESS(rc))
3387 {
3388 rc = Data.rc;
3389 if (!RT_SUCCESS(rc))
3390 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3391 }
3392 else
3393 WARN(("RTSemEventWait failed %Rrc\n", rc));
3394 }
3395 else
3396 WARN(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3397
3398 RTSemEventDestroy(Data.hEvent);
3399
3400 return rc;
3401}
3402
3403/**
3404 * Worker for vdmaVBVACtlEnableDisableSubmitInternal().
3405 */
3406static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
3407 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3408{
3409 int rc;
3410 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3411 {
3412 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3413 return VINF_SUCCESS;
3414 }
3415
3416 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3417 if (!pHCtl)
3418 {
3419 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3420 return VERR_NO_MEMORY;
3421 }
3422
3423 pHCtl->u.cmd.pvCmd = pEnable;
3424 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3425 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3426 if (RT_SUCCESS(rc))
3427 return VINF_SUCCESS;
3428
3429 WARN(("vdmaVBVACtlSubmit failed rc %Rrc\n", rc));
3430 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3431 return rc;
3432}
3433
3434/**
3435 * Worker for vdmaVBVACtlEnableDisableSubmit().
3436 */
3437static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
3438 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3439{
3440 bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE;
3441 if (fEnable)
3442 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3443 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3444}
3445
3446/**
3447 * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE.
3448 */
3449static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable)
3450{
3451 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3452 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3453 if (RT_SUCCESS(rc))
3454 return VINF_SUCCESS;
3455
3456 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %Rrc\n", rc));
3457 pEnable->Hdr.i32Result = rc;
3458 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3459 AssertRC(rc);
3460 return VINF_SUCCESS;
3461}
3462
3463/**
3464 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3465 * Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().}
3466 */
3467static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3468 int rc, void *pvContext)
3469{
3470 RT_NOREF(pVbva, pCtl);
3471 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext;
3472 pData->rc = rc;
3473 rc = RTSemEventSignal(pData->hEvent);
3474 if (!RT_SUCCESS(rc))
3475 WARN(("RTSemEventSignal failed %Rrc\n", rc));
3476}
3477
3478
3479/**
3480 *
3481 */
3482static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3483{
3484 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3485 Data.rc = VERR_NOT_IMPLEMENTED;
3486 Data.hEvent = NIL_RTSEMEVENT;
3487 int rc = RTSemEventCreate(&Data.hEvent);
3488 if (RT_SUCCESS(rc))
3489 {
3490 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3491 if (RT_SUCCESS(rc))
3492 {
3493 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3494 if (RT_SUCCESS(rc))
3495 {
3496 rc = Data.rc;
3497 if (!RT_SUCCESS(rc))
3498 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3499 }
3500 else
3501 WARN(("RTSemEventWait failed %Rrc\n", rc));
3502 }
3503 else
3504 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3505
3506 RTSemEventDestroy(Data.hEvent);
3507 }
3508 else
3509 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3510 return rc;
3511}
3512
3513/**
3514 * Worker for vboxVDMASaveStateExecPrep().
3515 */
3516static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3517{
3518 VBVAEXHOSTCTL Ctl;
3519 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3520 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3521}
3522
3523/**
3524 * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone().
3525 */
3526static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3527{
3528 VBVAEXHOSTCTL Ctl;
3529 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3530 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3531}
3532
3533/**
3534 * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh().
3535 */
3536static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3537{
3538 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3539 switch (rc)
3540 {
3541 case VINF_SUCCESS:
3542 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3543 case VINF_ALREADY_INITIALIZED:
3544 case VINF_EOF:
3545 case VERR_INVALID_STATE:
3546 return VINF_SUCCESS;
3547 default:
3548 Assert(!RT_FAILURE(rc));
3549 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3550 }
3551}
3552
3553
3554/**
3555 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit}
3556 */
3557int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3558 struct VBOXCRCMDCTL *pCmd,
3559 uint32_t cbCmd,
3560 PFNCRCTLCOMPLETION pfnCompletion,
3561 void *pvCompletion)
3562{
3563 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3564 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3565 if (pVdma == NULL)
3566 return VERR_INVALID_STATE;
3567 pCmd->CalloutList.List.pNext = NULL;
3568 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3569}
3570
3571/**
3572 * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb.
3573 */
3574typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3575{
3576 struct VBOXVDMAHOST *pVdma;
3577 uint32_t fProcessing;
3578 int rc;
3579} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3580
3581/**
3582 * @callback_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.}
3583 */
3584static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3585{
3586 RT_NOREF(pCmd, cbCmd);
3587 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion;
3588
3589 pData->rc = rc;
3590
3591 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3592
3593 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3594
3595 pData->fProcessing = 0;
3596
3597 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3598}
3599
3600/**
3601 * @callback_method_impl{FNVBOXCRCLIENT_CALLOUT, Worker for vboxVDMACrCtlHgsmiSetup }
3602 *
3603 * @note r=bird: not to be confused with the callout function below. sigh.
3604 */
3605static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd,
3606 VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3607{
3608 pEntry->pfnCb = pfnCb;
3609 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3610 if (RT_SUCCESS(rc))
3611 {
3612 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3613 RTCritSectLeave(&pVdma->CalloutCritSect);
3614
3615 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3616 }
3617 else
3618 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3619
3620 return rc;
3621}
3622
3623
3624/**
3625 * Worker for vboxCmdVBVACmdHostCtlSync.
3626 */
3627static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3628{
3629 int rc = VINF_SUCCESS;
3630 for (;;)
3631 {
3632 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3633 if (RT_SUCCESS(rc))
3634 {
3635 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3636 if (pEntry)
3637 RTListNodeRemove(&pEntry->Node);
3638 RTCritSectLeave(&pVdma->CalloutCritSect);
3639
3640 if (!pEntry)
3641 break;
3642
3643 pEntry->pfnCb(pEntry);
3644 }
3645 else
3646 {
3647 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3648 break;
3649 }
3650 }
3651
3652 return rc;
3653}
3654
3655/**
3656 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync}
3657 */
3658DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd)
3659{
3660 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3661 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3662 if (pVdma == NULL)
3663 return VERR_INVALID_STATE;
3664
3665 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3666 Data.pVdma = pVdma;
3667 Data.fProcessing = 1;
3668 Data.rc = VERR_INTERNAL_ERROR;
3669 RTListInit(&pCmd->CalloutList.List);
3670 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3671 if (!RT_SUCCESS(rc))
3672 {
3673 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %Rrc", rc));
3674 return rc;
3675 }
3676
3677 while (Data.fProcessing)
3678 {
3679 /* Poll infrequently to make sure no completed message has been missed. */
3680 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3681
3682 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3683
3684 if (Data.fProcessing)
3685 RTThreadYield();
3686 }
3687
3688 /* extra check callouts */
3689 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3690
3691 /* 'Our' message has been processed, so should reset the semaphore.
3692 * There is still possible that another message has been processed
3693 * and the semaphore has been signalled again.
3694 * Reset only if there are no other messages completed.
3695 */
3696 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3697 Assert(c >= 0);
3698 if (!c)
3699 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3700
3701 rc = Data.rc;
3702 if (!RT_SUCCESS(rc))
3703 WARN(("host call failed %Rrc", rc));
3704
3705 return rc;
3706}
3707
3708/**
3709 * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler().
3710 *
3711 * @returns VBox status code
3712 * @param pVGAState The VGA state.
3713 * @param pCtl The control command.
3714 * @param cbCtl The size of it. This is at least
3715 * sizeof(VBOXCMDVBVA_CTL).
3716 * @thread EMT
3717 */
3718int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
3719{
3720 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3721 uint32_t uType = pCtl->u32Type;
3722 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
3723
3724 if ( uType == VBOXCMDVBVACTL_TYPE_3DCTL
3725 || uType == VBOXCMDVBVACTL_TYPE_RESIZE
3726 || uType == VBOXCMDVBVACTL_TYPE_ENABLE)
3727 {
3728 RT_UNTRUSTED_VALIDATED_FENCE();
3729
3730 switch (uType)
3731 {
3732 case VBOXCMDVBVACTL_TYPE_3DCTL:
3733 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3734
3735 case VBOXCMDVBVACTL_TYPE_RESIZE:
3736 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3737
3738 case VBOXCMDVBVACTL_TYPE_ENABLE:
3739 ASSERT_GUEST_BREAK(cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE));
3740 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCtl);
3741
3742 default:
3743 AssertFailed();
3744 }
3745 }
3746
3747 pCtl->i32Result = VERR_INVALID_PARAMETER;
3748 int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3749 AssertRC(rc);
3750 return VINF_SUCCESS;
3751}
3752
3753/**
3754 * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler().
3755 *
3756 * @thread EMT
3757 */
3758int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3759{
3760 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3761 {
3762 WARN(("vdma VBVA is disabled\n"));
3763 return VERR_INVALID_STATE;
3764 }
3765
3766 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3767}
3768
3769/**
3770 * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler().
3771 *
3772 * @thread EMT
3773 */
3774int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3775{
3776 WARN(("flush\n"));
3777 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3778 {
3779 WARN(("vdma VBVA is disabled\n"));
3780 return VERR_INVALID_STATE;
3781 }
3782 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3783}
3784
3785/**
3786 * Called from vgaTimerRefresh().
3787 */
3788void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState)
3789{
3790 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3791 return;
3792 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3793}
3794
3795bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3796{
3797 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3798}
3799
3800
3801
3802/*
3803 *
3804 *
3805 * Saved state.
3806 * Saved state.
3807 * Saved state.
3808 *
3809 *
3810 */
3811
3812int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3813{
3814 int rc = vdmaVBVAPause(pVdma);
3815 if (RT_SUCCESS(rc))
3816 return VINF_SUCCESS;
3817
3818 if (rc != VERR_INVALID_STATE)
3819 {
3820 WARN(("vdmaVBVAPause failed %Rrc\n", rc));
3821 return rc;
3822 }
3823
3824# ifdef DEBUG_misha
3825 WARN(("debug prep"));
3826# endif
3827
3828 PVGASTATE pVGAState = pVdma->pVGAState;
3829 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3830 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof(*pCmd));
3831 if (pCmd)
3832 {
3833 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3834 AssertRC(rc);
3835 if (RT_SUCCESS(rc))
3836 rc = vboxVDMACrCtlGetRc(pCmd);
3837 vboxVDMACrCtlRelease(pCmd);
3838 return rc;
3839 }
3840 return VERR_NO_MEMORY;
3841}
3842
3843int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3844{
3845 int rc = vdmaVBVAResume(pVdma);
3846 if (RT_SUCCESS(rc))
3847 return VINF_SUCCESS;
3848
3849 if (rc != VERR_INVALID_STATE)
3850 {
3851 WARN(("vdmaVBVAResume failed %Rrc\n", rc));
3852 return rc;
3853 }
3854
3855# ifdef DEBUG_misha
3856 WARN(("debug done"));
3857# endif
3858
3859 PVGASTATE pVGAState = pVdma->pVGAState;
3860 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3861 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof(*pCmd));
3862 Assert(pCmd);
3863 if (pCmd)
3864 {
3865 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3866 AssertRC(rc);
3867 if (RT_SUCCESS(rc))
3868 rc = vboxVDMACrCtlGetRc(pCmd);
3869 vboxVDMACrCtlRelease(pCmd);
3870 return rc;
3871 }
3872 return VERR_NO_MEMORY;
3873}
3874
3875int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3876{
3877 int rc;
3878
3879 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3880 {
3881 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3882 AssertRCReturn(rc, rc);
3883 return VINF_SUCCESS;
3884 }
3885
3886 PVGASTATE pVGAState = pVdma->pVGAState;
3887 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3888
3889 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pVdma->CmdVbva.pVBVA - (uintptr_t)pu8VramBase));
3890 AssertRCReturn(rc, rc);
3891
3892 VBVAEXHOSTCTL HCtl;
3893 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3894 HCtl.u.state.pSSM = pSSM;
3895 HCtl.u.state.u32Version = 0;
3896 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3897}
3898
3899int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3900{
3901 uint32_t u32;
3902 int rc = SSMR3GetU32(pSSM, &u32);
3903 AssertLogRelRCReturn(rc, rc);
3904
3905 if (u32 != UINT32_MAX)
3906 {
3907 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3908 AssertLogRelRCReturn(rc, rc);
3909
3910 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3911
3912 VBVAEXHOSTCTL HCtl;
3913 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3914 HCtl.u.state.pSSM = pSSM;
3915 HCtl.u.state.u32Version = u32Version;
3916 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3917 AssertLogRelRCReturn(rc, rc);
3918
3919 rc = vdmaVBVAResume(pVdma);
3920 AssertLogRelRCReturn(rc, rc);
3921
3922 return VINF_SUCCESS;
3923 }
3924
3925 return VINF_SUCCESS;
3926}
3927
3928int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3929{
3930 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3931 return VINF_SUCCESS;
3932
3933/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3934 * the purpose of this code is. */
3935 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3936 if (!pHCtl)
3937 {
3938 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3939 return VERR_NO_MEMORY;
3940 }
3941
3942 /* sanity */
3943 pHCtl->u.cmd.pvCmd = NULL;
3944 pHCtl->u.cmd.cbCmd = 0;
3945
3946 /* NULL completion will just free the ctl up */
3947 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3948 if (RT_FAILURE(rc))
3949 {
3950 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3951 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3952 return rc;
3953 }
3954
3955 return VINF_SUCCESS;
3956}
3957
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette