VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 75111

Last change on this file since 75111 was 73097, checked in by vboxsync, 6 years ago

*: Made RT_UOFFSETOF, RT_OFFSETOF, RT_UOFFSETOF_ADD and RT_OFFSETOF_ADD work like builtin_offsetof() and require compile time resolvable requests, adding RT_UOFFSETOF_DYN for the dynamic questions that can only be answered at runtime.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 132.2 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 73097 2018-07-12 21:06:33Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VGA
23#include <VBox/VMMDev.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pgm.h>
26#include <VBoxVideo.h>
27#include <VBox/AssertGuest.h>
28#include <iprt/semaphore.h>
29#include <iprt/thread.h>
30#include <iprt/mem.h>
31#include <iprt/asm.h>
32#include <iprt/list.h>
33#include <iprt/param.h>
34
35#include "DevVGA.h"
36#include "HGSMI/SHGSMIHost.h"
37
38#include <VBoxVideo3D.h>
39#include <VBoxVideoHost3D.h>
40
41#ifdef DEBUG_misha
42# define VBOXVDBG_MEMCACHE_DISABLE
43#endif
44
45#ifndef VBOXVDBG_MEMCACHE_DISABLE
46# include <iprt/memcache.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef DEBUG_misha
54# define WARN_BP() do { AssertFailed(); } while (0)
55#else
56# define WARN_BP() do { } while (0)
57#endif
58#define WARN(_msg) do { \
59 LogRel(_msg); \
60 WARN_BP(); \
61 } while (0)
62
63#define VBOXVDMATHREAD_STATE_TERMINATED 0
64#define VBOXVDMATHREAD_STATE_CREATING 1
65#define VBOXVDMATHREAD_STATE_CREATED 3
66#define VBOXVDMATHREAD_STATE_TERMINATING 4
67
68
69/*********************************************************************************************************************************
70* Structures and Typedefs *
71*********************************************************************************************************************************/
72struct VBOXVDMATHREAD;
73
74typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
75
76#ifdef VBOX_WITH_CRHGSMI
77static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
78#endif
79
80
81typedef struct VBOXVDMATHREAD
82{
83 RTTHREAD hWorkerThread;
84 RTSEMEVENT hEvent;
85 volatile uint32_t u32State;
86 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
87 void *pvChanged;
88} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
89
90
91/* state transformations:
92 *
93 * submitter | processor
94 *
95 * LISTENING ---> PROCESSING
96 *
97 * */
98#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
99#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
100
101#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
102#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
103#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
104
105typedef struct VBVAEXHOSTCONTEXT
106{
107 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA;
108 /** Maximum number of data bytes addressible relative to pVBVA. */
109 uint32_t cbMaxData;
110 volatile int32_t i32State;
111 volatile int32_t i32EnableState;
112 volatile uint32_t u32cCtls;
113 /* critical section for accessing ctl lists */
114 RTCRITSECT CltCritSect;
115 RTLISTANCHOR GuestCtlList;
116 RTLISTANCHOR HostCtlList;
117#ifndef VBOXVDBG_MEMCACHE_DISABLE
118 RTMEMCACHE CtlCache;
119#endif
120} VBVAEXHOSTCONTEXT;
121
122typedef enum
123{
124 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
125 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
126 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
127 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
128 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
129 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
130 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
131 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
132 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
133 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
134 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
135 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
136 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
137} VBVAEXHOSTCTL_TYPE;
138
139struct VBVAEXHOSTCTL;
140
141typedef DECLCALLBACK(void) FNVBVAEXHOSTCTL_COMPLETE(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
142typedef FNVBVAEXHOSTCTL_COMPLETE *PFNVBVAEXHOSTCTL_COMPLETE;
143
144typedef struct VBVAEXHOSTCTL
145{
146 RTLISTNODE Node;
147 VBVAEXHOSTCTL_TYPE enmType;
148 union
149 {
150 struct
151 {
152 void RT_UNTRUSTED_VOLATILE_GUEST *pvCmd;
153 uint32_t cbCmd;
154 } cmd;
155
156 struct
157 {
158 PSSMHANDLE pSSM;
159 uint32_t u32Version;
160 } state;
161 } u;
162 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
163 void *pvComplete;
164} VBVAEXHOSTCTL;
165
166/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
167 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
168 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
169 * see mor edetailed comments in headers for function definitions */
170typedef enum
171{
172 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
173 VBVAEXHOST_DATA_TYPE_CMD,
174 VBVAEXHOST_DATA_TYPE_HOSTCTL,
175 VBVAEXHOST_DATA_TYPE_GUESTCTL
176} VBVAEXHOST_DATA_TYPE;
177
178
179#ifdef VBOX_WITH_CRHGSMI
180typedef struct VBOXVDMA_SOURCE
181{
182 VBVAINFOSCREEN Screen;
183 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
184} VBOXVDMA_SOURCE;
185#endif
186
187typedef struct VBOXVDMAHOST
188{
189 PHGSMIINSTANCE pHgsmi; /**< Same as VGASTATE::pHgsmi. */
190 PVGASTATE pVGAState;
191#ifdef VBOX_WITH_CRHGSMI
192 VBVAEXHOSTCONTEXT CmdVbva;
193 VBOXVDMATHREAD Thread;
194 VBOXCRCMD_SVRINFO CrSrvInfo;
195 VBVAEXHOSTCTL* pCurRemainingHostCtl;
196 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
197 int32_t volatile i32cHostCrCtlCompleted;
198 RTCRITSECT CalloutCritSect;
199// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
200#endif
201#ifdef VBOX_VDMA_WITH_WATCHDOG
202 PTMTIMERR3 WatchDogTimer;
203#endif
204} VBOXVDMAHOST, *PVBOXVDMAHOST;
205
206
207/**
208 * List selector for VBoxVBVAExHCtlSubmit(), vdmaVBVACtlSubmit().
209 */
210typedef enum
211{
212 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
213 VBVAEXHOSTCTL_SOURCE_HOST
214} VBVAEXHOSTCTL_SOURCE;
215
216
217/*********************************************************************************************************************************
218* Internal Functions *
219*********************************************************************************************************************************/
220#ifdef VBOX_WITH_CRHGSMI
221static int vdmaVBVANotifyDisable(PVGASTATE pVGAState);
222static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
223static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
224static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread);
225static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
226 uint32_t cbBuffer);
227static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
228static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
229 int rc, void *pvContext);
230
231/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
232 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
233#endif /* VBOX_WITH_CRHGSMI */
234
235
236
237#ifdef VBOX_WITH_CRHGSMI
238
239/**
240 * Creates a host control command.
241 */
242static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
243{
244# ifndef VBOXVDBG_MEMCACHE_DISABLE
245 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemCacheAlloc(pCmdVbva->CtlCache);
246# else
247 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemAlloc(sizeof(VBVAEXHOSTCTL));
248# endif
249 if (pCtl)
250 {
251 RT_ZERO(*pCtl);
252 pCtl->enmType = enmType;
253 }
254 else
255 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
256 return pCtl;
257}
258
259/**
260 * Destroys a host control command.
261 */
262static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
263{
264# ifndef VBOXVDBG_MEMCACHE_DISABLE
265 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
266# else
267 RTMemFree(pCtl);
268# endif
269}
270
271
272
273/**
274 * Works the VBVA state.
275 */
276static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
277{
278 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
279
280 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
281 return VINF_SUCCESS;
282 return VERR_SEM_BUSY;
283}
284
285/**
286 * Worker for vboxVBVAExHPDataGetInner() and VBoxVBVAExHPCheckHostCtlOnDisable()
287 * that gets the next control command.
288 *
289 * @returns Pointer to command if found, NULL if not.
290 * @param pCmdVbva The VBVA command context.
291 * @param pfHostCtl Where to indicate whether it's a host or guest
292 * control command.
293 * @param fHostOnlyMode Whether to only fetch host commands, or both.
294 */
295static VBVAEXHOSTCTL *vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
296{
297 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
298
299 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
300 return NULL;
301
302 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
303 if (RT_SUCCESS(rc))
304 {
305 VBVAEXHOSTCTL *pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
306 if (pCtl)
307 *pfHostCtl = true;
308 else if (!fHostOnlyMode)
309 {
310 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
311 {
312 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
313 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
314 * and there are no HostCtl commands*/
315 Assert(pCtl);
316 *pfHostCtl = false;
317 }
318 }
319
320 if (pCtl)
321 {
322 RTListNodeRemove(&pCtl->Node);
323 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
324 }
325
326 RTCritSectLeave(&pCmdVbva->CltCritSect);
327
328 return pCtl;
329 }
330 else
331 WARN(("RTCritSectEnter failed %Rrc\n", rc));
332
333 return NULL;
334}
335
336/**
337 * Worker for vboxVDMACrHgcmHandleEnableRemainingHostCommand().
338 */
339static VBVAEXHOSTCTL *VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
340{
341 bool fHostCtl = false;
342 VBVAEXHOSTCTL *pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
343 Assert(!pCtl || fHostCtl);
344 return pCtl;
345}
346
347/**
348 * Worker for vboxVBVAExHPCheckProcessCtlInternal() and
349 * vboxVDMACrGuestCtlProcess() / VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED.
350 */
351static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
352{
353 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
354 {
355 WARN(("Invalid state\n"));
356 return VERR_INVALID_STATE;
357 }
358
359 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
360 return VINF_SUCCESS;
361}
362
363/**
364 * Works the VBVA state in response to VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME.
365 */
366static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
367{
368 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
369 {
370 WARN(("Invalid state\n"));
371 return VERR_INVALID_STATE;
372 }
373
374 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
375 return VINF_SUCCESS;
376}
377
378/**
379 * Worker for vboxVBVAExHPDataGetInner that processes PAUSE and RESUME requests.
380 *
381 * Unclear why these cannot be handled the normal way.
382 *
383 * @returns true if handled, false if not.
384 * @param pCmdVbva The VBVA context.
385 * @param pCtl The host control command.
386 */
387static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
388{
389 switch (pCtl->enmType)
390 {
391 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
392 VBoxVBVAExHPPause(pCmdVbva);
393 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
394 return true;
395
396 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
397 VBoxVBVAExHPResume(pCmdVbva);
398 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
399 return true;
400
401 default:
402 return false;
403 }
404}
405
406/**
407 * Works the VBVA state.
408 */
409static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
410{
411 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
412
413 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
414}
415
416/**
417 * Works the VBVA state.
418 */
419static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
420{
421 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
422 if (pCmdVbva->pVBVA)
423 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
424}
425
426/**
427 * Works the VBVA state.
428 */
429static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
430{
431 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
432 if (pCmdVbva->pVBVA)
433 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
434}
435
436/**
437 * Worker for vboxVBVAExHPDataGetInner.
438 *
439 * @retval VINF_SUCCESS
440 * @retval VINF_EOF
441 * @retval VINF_TRY_AGAIN
442 * @retval VERR_INVALID_STATE
443 *
444 * @thread VDMA
445 */
446static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
447{
448 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
449 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
450
451 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; /* This is shared with the guest, so careful! */
452
453 /*
454 * Inspect records.
455 */
456 uint32_t idxRecordFirst = ASMAtomicUoReadU32(&pVBVA->indexRecordFirst);
457 uint32_t idxRecordFree = ASMAtomicReadU32(&pVBVA->indexRecordFree);
458 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
459 Log(("first = %d, free = %d\n", idxRecordFirst, idxRecordFree));
460 if (idxRecordFirst == idxRecordFree)
461 return VINF_EOF; /* No records to process. Return without assigning output variables. */
462 AssertReturn(idxRecordFirst < VBVA_MAX_RECORDS, VERR_INVALID_STATE);
463 RT_UNTRUSTED_VALIDATED_FENCE();
464
465 /*
466 * Read the record size and check that it has been completly recorded.
467 */
468 uint32_t const cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[idxRecordFirst].cbRecord);
469 uint32_t const cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
470 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
471 if ( (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
472 || !cbRecord)
473 return VINF_TRY_AGAIN; /* The record is being recorded, try again. */
474 Assert(cbRecord);
475
476 /*
477 * Get and validate the data area.
478 */
479 uint32_t const offData = ASMAtomicReadU32(&pVBVA->off32Data);
480 uint32_t cbMaxData = ASMAtomicReadU32(&pVBVA->cbData);
481 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
482 AssertLogRelMsgStmt(cbMaxData <= pCmdVbva->cbMaxData, ("%#x vs %#x\n", cbMaxData, pCmdVbva->cbMaxData),
483 cbMaxData = pCmdVbva->cbMaxData);
484 AssertLogRelMsgReturn( cbRecord <= cbMaxData
485 && offData <= cbMaxData - cbRecord,
486 ("offData=%#x cbRecord=%#x cbMaxData=%#x cbRecord\n", offData, cbRecord, cbMaxData),
487 VERR_INVALID_STATE);
488 RT_UNTRUSTED_VALIDATED_FENCE();
489
490 /*
491 * Just set the return values and we're done.
492 */
493 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)&pVBVA->au8Data[offData];
494 *pcbCmd = cbRecord;
495 return VINF_SUCCESS;
496}
497
498/**
499 * Completion routine advancing our end of the ring and data buffers forward.
500 *
501 * @param pCmdVbva The VBVA context.
502 * @param cbCmd The size of the data.
503 */
504static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
505{
506 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
507 if (pVBVA)
508 {
509 /* Move data head. */
510 uint32_t const cbData = pVBVA->cbData;
511 uint32_t const offData = pVBVA->off32Data;
512 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
513 if (cbData > 0)
514 ASMAtomicWriteU32(&pVBVA->off32Data, (offData + cbCmd) % cbData);
515 else
516 ASMAtomicWriteU32(&pVBVA->off32Data, 0);
517
518 /* Increment record pointer. */
519 uint32_t const idxRecFirst = pVBVA->indexRecordFirst;
520 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
521 ASMAtomicWriteU32(&pVBVA->indexRecordFirst, (idxRecFirst + 1) % RT_ELEMENTS(pVBVA->aRecords));
522 }
523}
524
525/**
526 * Control command completion routine used by many.
527 */
528static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
529{
530 if (pCtl->pfnComplete)
531 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
532 else
533 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
534}
535
536
537/**
538 * Worker for VBoxVBVAExHPDataGet.
539 * @thread VDMA
540 */
541static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGetInner(struct VBVAEXHOSTCONTEXT *pCmdVbva,
542 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
543{
544 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
545 VBVAEXHOSTCTL *pCtl;
546 bool fHostClt;
547
548 for (;;)
549 {
550 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
551 if (pCtl)
552 {
553 if (fHostClt)
554 {
555 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
556 {
557 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
558 *pcbCmd = sizeof (*pCtl);
559 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
560 }
561 continue; /* Processed by vboxVBVAExHPCheckProcessCtlInternal, get next. */
562 }
563 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
564 *pcbCmd = sizeof (*pCtl);
565 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
566 }
567
568 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
569 return VBVAEXHOST_DATA_TYPE_NO_DATA;
570
571 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppbCmd, pcbCmd);
572 switch (rc)
573 {
574 case VINF_SUCCESS:
575 return VBVAEXHOST_DATA_TYPE_CMD;
576 case VINF_EOF:
577 return VBVAEXHOST_DATA_TYPE_NO_DATA;
578 case VINF_TRY_AGAIN:
579 RTThreadSleep(1);
580 continue;
581 default:
582 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
583 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %Rrc\n", rc));
584 return VBVAEXHOST_DATA_TYPE_NO_DATA;
585 }
586 }
587 /* not reached */
588}
589
590/**
591 * Called by vboxVDMAWorkerThread to get the next command to process.
592 * @thread VDMA
593 */
594static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva,
595 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
596{
597 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
598 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
599 {
600 vboxVBVAExHPHgEventClear(pCmdVbva);
601 vboxVBVAExHPProcessorRelease(pCmdVbva);
602
603 /*
604 * We need to prevent racing between us clearing the flag and command check/submission thread, i.e.
605 * 1. we check the queue -> and it is empty
606 * 2. submitter adds command to the queue
607 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
608 * 4. we clear the "processing" state
609 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
610 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
611 */
612 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
613 if (RT_SUCCESS(rc))
614 {
615 /* we are the processor now */
616 enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
617 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
618 {
619 vboxVBVAExHPProcessorRelease(pCmdVbva);
620 return VBVAEXHOST_DATA_TYPE_NO_DATA;
621 }
622
623 vboxVBVAExHPHgEventSet(pCmdVbva);
624 }
625 }
626
627 return enmType;
628}
629
630/**
631 * Checks for pending VBVA command or (internal) control command.
632 */
633DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
634{
635 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
636 if (pVBVA)
637 {
638 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
639 uint32_t indexRecordFree = pVBVA->indexRecordFree;
640 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
641
642 if (indexRecordFirst != indexRecordFree)
643 return true;
644 }
645
646 return ASMAtomicReadU32(&pCmdVbva->u32cCtls) > 0;
647}
648
649/** Checks whether the new commands are ready for processing
650 * @returns
651 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
652 * VINF_EOF - no commands in a queue
653 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
654 * VERR_INVALID_STATE - the VBVA is paused or pausing */
655static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
656{
657 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
658 if (RT_SUCCESS(rc))
659 {
660 /* we are the processor now */
661 if (vboxVBVAExHSHasCommands(pCmdVbva))
662 {
663 vboxVBVAExHPHgEventSet(pCmdVbva);
664 return VINF_SUCCESS;
665 }
666
667 vboxVBVAExHPProcessorRelease(pCmdVbva);
668 return VINF_EOF;
669 }
670 if (rc == VERR_SEM_BUSY)
671 return VINF_ALREADY_INITIALIZED;
672 return VERR_INVALID_STATE;
673}
674
675/**
676 * Worker for vboxVDMAConstruct() that initializes the give VBVA host context.
677 */
678static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
679{
680 RT_ZERO(*pCmdVbva);
681 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
682 if (RT_SUCCESS(rc))
683 {
684# ifndef VBOXVDBG_MEMCACHE_DISABLE
685 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
686 0, /* size_t cbAlignment */
687 UINT32_MAX, /* uint32_t cMaxObjects */
688 NULL, /* PFNMEMCACHECTOR pfnCtor*/
689 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
690 NULL, /* void *pvUser*/
691 0 /* uint32_t fFlags*/
692 );
693 if (RT_SUCCESS(rc))
694# endif
695 {
696 RTListInit(&pCmdVbva->GuestCtlList);
697 RTListInit(&pCmdVbva->HostCtlList);
698 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
699 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
700 return VINF_SUCCESS;
701 }
702# ifndef VBOXVDBG_MEMCACHE_DISABLE
703 WARN(("RTMemCacheCreate failed %Rrc\n", rc));
704# endif
705 }
706 else
707 WARN(("RTCritSectInit failed %Rrc\n", rc));
708
709 return rc;
710}
711
712/**
713 * Checks if VBVA state is some form of enabled.
714 */
715DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
716{
717 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED;
718}
719
720/**
721 * Checks if VBVA state is disabled.
722 */
723DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
724{
725 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
726}
727
728/**
729 * Worker for vdmaVBVAEnableProcess().
730 *
731 * @thread VDMA
732 */
733static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA,
734 uint8_t *pbVRam, uint32_t cbVRam)
735{
736 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
737 {
738 WARN(("VBVAEx is enabled already\n"));
739 return VERR_INVALID_STATE;
740 }
741
742 uintptr_t offVRam = (uintptr_t)pVBVA - (uintptr_t)pbVRam;
743 AssertLogRelMsgReturn(offVRam < cbVRam - sizeof(*pVBVA), ("%#p cbVRam=%#x\n", offVRam, cbVRam), VERR_OUT_OF_RANGE);
744 RT_UNTRUSTED_VALIDATED_FENCE();
745
746 pCmdVbva->pVBVA = pVBVA;
747 pCmdVbva->cbMaxData = cbVRam - offVRam - RT_UOFFSETOF(VBVABUFFER, au8Data);
748 pVBVA->hostFlags.u32HostEvents = 0;
749 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
750 return VINF_SUCCESS;
751}
752
753/**
754 * Works the enable state.
755 * @thread VDMA, CR, EMT, ...
756 */
757static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
758{
759 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
760 return VINF_SUCCESS;
761
762 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
763 return VINF_SUCCESS;
764}
765
766/**
767 * Worker for vboxVDMADestruct() and vboxVDMAConstruct().
768 */
769static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
770{
771 /* ensure the processor is stopped */
772 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
773
774 /* ensure no one tries to submit the command */
775 if (pCmdVbva->pVBVA)
776 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
777
778 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
779 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
780
781 RTCritSectDelete(&pCmdVbva->CltCritSect);
782
783# ifndef VBOXVDBG_MEMCACHE_DISABLE
784 RTMemCacheDestroy(pCmdVbva->CtlCache);
785# endif
786
787 RT_ZERO(*pCmdVbva);
788}
789
790
791/**
792 * Worker for vboxVBVAExHSSaveStateLocked().
793 * @thread VDMA
794 */
795static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
796{
797 RT_NOREF(pCmdVbva);
798 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
799 AssertRCReturn(rc, rc);
800 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
801 AssertRCReturn(rc, rc);
802 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pCtl->u.cmd.pvCmd - (uintptr_t)pu8VramBase));
803 AssertRCReturn(rc, rc);
804
805 return VINF_SUCCESS;
806}
807
808/**
809 * Worker for VBoxVBVAExHSSaveState().
810 * @thread VDMA
811 */
812static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
813{
814 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
815 {
816 WARN(("vbva not paused\n"));
817 return VERR_INVALID_STATE;
818 }
819
820 int rc;
821 VBVAEXHOSTCTL* pCtl;
822 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
823 {
824 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
825 AssertRCReturn(rc, rc);
826 }
827
828 rc = SSMR3PutU32(pSSM, 0);
829 AssertRCReturn(rc, rc);
830
831 return VINF_SUCCESS;
832}
833
834/**
835 * Handles VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for vboxVDMACrHostCtlProcess, saving
836 * state on the VDMA thread.
837 *
838 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
839 * @thread VDMA
840 */
841static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
842{
843 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
844 AssertRCReturn(rc, rc);
845
846 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
847 if (RT_FAILURE(rc))
848 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
849
850 RTCritSectLeave(&pCmdVbva->CltCritSect);
851 return rc;
852}
853
854
855/**
856 * Worker for vboxVBVAExHSLoadStateLocked.
857 * @retval VINF_EOF if end stuff to load.
858 * @thread VDMA
859 */
860static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
861{
862 RT_NOREF(u32Version);
863 uint32_t u32;
864 int rc = SSMR3GetU32(pSSM, &u32);
865 AssertLogRelRCReturn(rc, rc);
866
867 if (!u32)
868 return VINF_EOF;
869
870 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
871 if (!pHCtl)
872 {
873 WARN(("VBoxVBVAExHCtlCreate failed\n"));
874 return VERR_NO_MEMORY;
875 }
876
877 rc = SSMR3GetU32(pSSM, &u32);
878 AssertLogRelRCReturn(rc, rc);
879 pHCtl->u.cmd.cbCmd = u32;
880
881 rc = SSMR3GetU32(pSSM, &u32);
882 AssertLogRelRCReturn(rc, rc);
883 pHCtl->u.cmd.pvCmd = pu8VramBase + u32;
884
885 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
886 ++pCmdVbva->u32cCtls;
887
888 return VINF_SUCCESS;
889}
890
891/**
892 * Worker for VBoxVBVAExHSLoadState.
893 * @thread VDMA
894 */
895static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
896{
897 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
898 {
899 WARN(("vbva not stopped\n"));
900 return VERR_INVALID_STATE;
901 }
902
903 int rc;
904 do
905 {
906 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
907 AssertLogRelRCReturn(rc, rc);
908 } while (rc != VINF_EOF);
909
910 return VINF_SUCCESS;
911}
912
913/**
914 * Handles VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for vboxVDMACrHostCtlProcess(),
915 * loading state on the VDMA thread.
916 *
917 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
918 * @thread VDMA
919 */
920static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
921{
922 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
923 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
924 AssertRCReturn(rc, rc);
925
926 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
927 if (RT_FAILURE(rc))
928 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
929
930 RTCritSectLeave(&pCmdVbva->CltCritSect);
931 return rc;
932}
933
934
935
936/**
937 * Queues a control command to the VDMA worker thread.
938 *
939 * The @a enmSource argument decides which list (guest/host) it's queued on.
940 *
941 */
942static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
943 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
944{
945 int rc;
946 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
947 {
948 pCtl->pfnComplete = pfnComplete;
949 pCtl->pvComplete = pvComplete;
950
951 rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
952 if (RT_SUCCESS(rc))
953 {
954 /* Recheck that we're enabled after we've got the lock. */
955 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
956 {
957 /* Queue it. */
958 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
959 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
960 else
961 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
962 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
963
964 RTCritSectLeave(&pCmdVbva->CltCritSect);
965
966 /* Work the state or something. */
967 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
968 }
969 else
970 {
971 RTCritSectLeave(&pCmdVbva->CltCritSect);
972 Log(("cmd vbva not enabled (race)\n"));
973 rc = VERR_INVALID_STATE;
974 }
975 }
976 else
977 AssertRC(rc);
978 }
979 else
980 {
981 Log(("cmd vbva not enabled\n"));
982 rc = VERR_INVALID_STATE;
983 }
984 return rc;
985}
986
987/**
988 * Submits the control command and notifies the VDMA thread.
989 */
990static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
991 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
992{
993 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
994 if (RT_SUCCESS(rc))
995 {
996 if (rc == VINF_SUCCESS)
997 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
998 Assert(rc == VINF_ALREADY_INITIALIZED);
999 }
1000 else
1001 Log(("VBoxVBVAExHCtlSubmit failed %Rrc\n", rc));
1002
1003 return rc;
1004}
1005
1006
1007/**
1008 * Call VDMA thread creation notification callback.
1009 */
1010void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
1011{
1012 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
1013 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1014 void *pvChanged = pThread->pvChanged;
1015
1016 pThread->pfnChanged = NULL;
1017 pThread->pvChanged = NULL;
1018
1019 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
1020
1021 if (pfnChanged)
1022 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1023}
1024
1025/**
1026 * Call VDMA thread termination notification callback.
1027 */
1028void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
1029{
1030 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1031 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1032 void *pvChanged = pThread->pvChanged;
1033
1034 pThread->pfnChanged = NULL;
1035 pThread->pvChanged = NULL;
1036
1037 if (pfnChanged)
1038 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1039}
1040
1041/**
1042 * Check if VDMA thread is terminating.
1043 */
1044DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
1045{
1046 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
1047}
1048
1049/**
1050 * Init VDMA thread.
1051 */
1052void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
1053{
1054 RT_ZERO(*pThread);
1055 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1056}
1057
1058/**
1059 * Clean up VDMA thread.
1060 */
1061int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
1062{
1063 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1064 switch (u32State)
1065 {
1066 case VBOXVDMATHREAD_STATE_TERMINATED:
1067 return VINF_SUCCESS;
1068
1069 case VBOXVDMATHREAD_STATE_TERMINATING:
1070 {
1071 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
1072 if (RT_SUCCESS(rc))
1073 {
1074 RTSemEventDestroy(pThread->hEvent);
1075 pThread->hEvent = NIL_RTSEMEVENT;
1076 pThread->hWorkerThread = NIL_RTTHREAD;
1077 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
1078 }
1079 else
1080 WARN(("RTThreadWait failed %Rrc\n", rc));
1081 return rc;
1082 }
1083
1084 default:
1085 WARN(("invalid state"));
1086 return VERR_INVALID_STATE;
1087 }
1088}
1089
1090/**
1091 * Start VDMA thread.
1092 */
1093int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread,
1094 PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated)
1095{
1096 int rc = VBoxVDMAThreadCleanup(pThread);
1097 if (RT_SUCCESS(rc))
1098 {
1099 rc = RTSemEventCreate(&pThread->hEvent);
1100 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
1101 pThread->pfnChanged = pfnCreated;
1102 pThread->pvChanged = pvCreated;
1103 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1104 if (RT_SUCCESS(rc))
1105 return VINF_SUCCESS;
1106
1107 WARN(("RTThreadCreate failed %Rrc\n", rc));
1108 RTSemEventDestroy(pThread->hEvent);
1109 pThread->hEvent = NIL_RTSEMEVENT;
1110 pThread->hWorkerThread = NIL_RTTHREAD;
1111 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1112 }
1113 else
1114 WARN(("VBoxVDMAThreadCleanup failed %Rrc\n", rc));
1115 return rc;
1116}
1117
1118/**
1119 * Notifies the VDMA thread.
1120 * @thread !VDMA
1121 */
1122static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
1123{
1124 int rc = RTSemEventSignal(pThread->hEvent);
1125 AssertRC(rc);
1126 return rc;
1127}
1128
1129/**
1130 * State worker for VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD &
1131 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrHostCtlProcess(), and
1132 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrGuestCtlProcess().
1133 *
1134 * @thread VDMA
1135 */
1136static int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void *pvTerminated, bool fNotify)
1137{
1138 for (;;)
1139 {
1140 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1141 switch (u32State)
1142 {
1143 case VBOXVDMATHREAD_STATE_CREATED:
1144 pThread->pfnChanged = pfnTerminated;
1145 pThread->pvChanged = pvTerminated;
1146 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
1147 if (fNotify)
1148 {
1149 int rc = VBoxVDMAThreadEventNotify(pThread);
1150 AssertRC(rc);
1151 }
1152 return VINF_SUCCESS;
1153
1154 case VBOXVDMATHREAD_STATE_TERMINATING:
1155 case VBOXVDMATHREAD_STATE_TERMINATED:
1156 WARN(("thread is marked to termination or terminated\nn"));
1157 return VERR_INVALID_STATE;
1158
1159 case VBOXVDMATHREAD_STATE_CREATING:
1160 /* wait till the thread creation is completed */
1161 WARN(("concurrent thread create/destron\n"));
1162 RTThreadYield();
1163 continue;
1164
1165 default:
1166 WARN(("invalid state"));
1167 return VERR_INVALID_STATE;
1168 }
1169 }
1170}
1171
1172
1173
1174/*
1175 *
1176 *
1177 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1178 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1179 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1180 *
1181 *
1182 */
1183
1184/** Completion callback for vboxVDMACrCtlPostAsync(). */
1185typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1186/** Pointer to a vboxVDMACrCtlPostAsync completion callback. */
1187typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1188
1189/**
1190 * Private wrapper around VBOXVDMACMD_CHROMIUM_CTL.
1191 */
1192typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1193{
1194 uint32_t uMagic; /**< VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC */
1195 uint32_t cRefs;
1196 int32_t volatile rc;
1197 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1198 void *pvCompletion;
1199 RTSEMEVENT hEvtDone;
1200 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1201} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1202/** Magic number for VBOXVDMACMD_CHROMIUM_CTL_PRIVATE (Michael Wolff). */
1203# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC UINT32_C(0x19530827)
1204
1205/** Converts from a VBOXVDMACMD_CHROMIUM_CTL::Cmd pointer to a pointer to the
1206 * containing structure. */
1207# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) RT_FROM_MEMBER(pCmd, VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)
1208
1209/**
1210 * Creates a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1211 */
1212static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1213{
1214 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr;
1215 pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1216 if (pHdr)
1217 {
1218 pHdr->uMagic = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1219 pHdr->cRefs = 1;
1220 pHdr->rc = VERR_NOT_IMPLEMENTED;
1221 pHdr->hEvtDone = NIL_RTSEMEVENT;
1222 pHdr->Cmd.enmType = enmCmd;
1223 pHdr->Cmd.cbCmd = cbCmd;
1224 return &pHdr->Cmd;
1225 }
1226 return NULL;
1227}
1228
1229/**
1230 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1231 */
1232DECLINLINE(void) vboxVDMACrCtlRelease(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1233{
1234 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1235 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1236
1237 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1238 if (!cRefs)
1239 {
1240 pHdr->uMagic = ~VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1241 if (pHdr->hEvtDone != NIL_RTSEMEVENT)
1242 {
1243 RTSemEventDestroy(pHdr->hEvtDone);
1244 pHdr->hEvtDone = NIL_RTSEMEVENT;
1245 }
1246 RTMemFree(pHdr);
1247 }
1248}
1249
1250/**
1251 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1252 */
1253DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1254{
1255 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1256 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1257
1258 uint32_t cRefs = ASMAtomicIncU32(&pHdr->cRefs);
1259 Assert(cRefs > 1);
1260 Assert(cRefs < _1K);
1261 RT_NOREF_PV(cRefs);
1262}
1263
1264/**
1265 * Gets the result from our private chromium control command.
1266 *
1267 * @returns status code.
1268 * @param pCmd The command.
1269 */
1270DECLINLINE(int) vboxVDMACrCtlGetRc(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1271{
1272 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1273 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1274 return pHdr->rc;
1275}
1276
1277/**
1278 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync,
1279 * Some indirect completion magic, you gotta love this code! }
1280 */
1281DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1282{
1283 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1284 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1285 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1286
1287 pHdr->rc = rc;
1288 if (pHdr->pfnCompletion)
1289 pHdr->pfnCompletion(pVGAState, pCmd, pHdr->pvCompletion);
1290 return VINF_SUCCESS;
1291}
1292
1293/**
1294 * @callback_method_impl{FNCRCTLCOMPLETION,
1295 * Completion callback for vboxVDMACrCtlPost. }
1296 */
1297static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void *pvContext)
1298{
1299 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)pvContext;
1300 Assert(pHdr == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd));
1301 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1302 RT_NOREF(pVGAState, pCmd);
1303
1304 int rc = RTSemEventSignal(pHdr->hEvtDone);
1305 AssertRC(rc);
1306
1307 vboxVDMACrCtlRelease(&pHdr->Cmd);
1308}
1309
1310/**
1311 * Worker for vboxVDMACrCtlPost().
1312 */
1313static int vboxVDMACrCtlPostAsync(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd,
1314 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1315{
1316 if ( pVGAState->pDrv
1317 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1318 {
1319 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1320 pHdr->pfnCompletion = pfnCompletion;
1321 pHdr->pvCompletion = pvCompletion;
1322 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1323 return VINF_SUCCESS;
1324 }
1325 return VERR_NOT_SUPPORTED;
1326}
1327
1328/**
1329 * Posts stuff and waits.
1330 */
1331static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1332{
1333 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1334
1335 /* Allocate the semaphore. */
1336 Assert(pHdr->hEvtDone == NIL_RTSEMEVENT);
1337 int rc = RTSemEventCreate(&pHdr->hEvtDone);
1338 AssertRCReturn(rc, rc);
1339
1340 /* Grab a reference for the completion routine. */
1341 vboxVDMACrCtlRetain(&pHdr->Cmd);
1342
1343 /* Submit and wait for it. */
1344 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, pHdr);
1345 if (RT_SUCCESS(rc))
1346 rc = RTSemEventWaitNoResume(pHdr->hEvtDone, RT_INDEFINITE_WAIT);
1347 else
1348 {
1349 if (rc != VERR_NOT_SUPPORTED)
1350 AssertRC(rc);
1351 vboxVDMACrCtlRelease(pCmd);
1352 }
1353 return rc;
1354}
1355
1356
1357/**
1358 * Structure for passing data between vboxVDMACrHgcmSubmitSync() and the
1359 * completion routine vboxVDMACrHgcmSubmitSyncCompletion().
1360 */
1361typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1362{
1363 int volatile rc;
1364 RTSEMEVENT hEvent;
1365} VDMA_VBVA_CTL_CYNC_COMPLETION;
1366
1367/**
1368 * @callback_method_impl{FNCRCTLCOMPLETION,
1369 * Completion callback for vboxVDMACrHgcmSubmitSync() that signals the
1370 * waiting thread.}
1371 */
1372static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1373{
1374 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1375 pData->rc = rc;
1376 rc = RTSemEventSignal(pData->hEvent);
1377 AssertLogRelRC(rc);
1378
1379 RT_NOREF(pCmd, cbCmd);
1380}
1381
1382/**
1383 * Worker for vboxVDMACrHgcmHandleEnable() and vdmaVBVAEnableProcess() that
1384 * works pVGAState->pDrv->pfnCrHgcmCtlSubmit.
1385 *
1386 * @thread VDMA
1387 */
1388static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1389{
1390 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1391 Data.rc = VERR_NOT_IMPLEMENTED;
1392 int rc = RTSemEventCreate(&Data.hEvent);
1393 if (!RT_SUCCESS(rc))
1394 {
1395 WARN(("RTSemEventCreate failed %Rrc\n", rc));
1396 return rc;
1397 }
1398
1399 pCtl->CalloutList.List.pNext = NULL;
1400
1401 PVGASTATE pVGAState = pVdma->pVGAState;
1402 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1403 if (RT_SUCCESS(rc))
1404 {
1405 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1406 if (RT_SUCCESS(rc))
1407 {
1408 rc = Data.rc;
1409 if (!RT_SUCCESS(rc))
1410 {
1411 WARN(("pfnCrHgcmCtlSubmit command failed %Rrc\n", rc));
1412 }
1413
1414 }
1415 else
1416 WARN(("RTSemEventWait failed %Rrc\n", rc));
1417 }
1418 else
1419 WARN(("pfnCrHgcmCtlSubmit failed %Rrc\n", rc));
1420
1421
1422 RTSemEventDestroy(Data.hEvent);
1423
1424 return rc;
1425}
1426
1427
1428/**
1429 * Worker for vboxVDMAReset().
1430 */
1431static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1432{
1433 VBVAEXHOSTCTL HCtl;
1434 RT_ZERO(HCtl);
1435 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1436 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1437 if (RT_SUCCESS(rc))
1438 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1439 else
1440 Log(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1441 return rc;
1442}
1443
1444
1445/**
1446 * @interface_method_impl{VBOXCRCMDCTL_HGCMENABLE_DATA,pfnRHCmd,
1447 * Used by vboxVDMACrHgcmNotifyTerminatingCb() and called by
1448 * crVBoxServerCrCmdDisablePostProcess() during crServerTearDown() to drain
1449 * command queues or something.}
1450 */
1451static DECLCALLBACK(uint8_t *)
1452vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1453{
1454 struct VBOXVDMAHOST *pVdma = hClient;
1455
1456 if (!pVdma->pCurRemainingHostCtl)
1457 VBoxVBVAExHSDisable(&pVdma->CmdVbva); /* disable VBVA, all subsequent host commands will go HGCM way */
1458 else
1459 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1460
1461 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1462 if (pVdma->pCurRemainingHostCtl)
1463 {
1464 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1465 return (uint8_t *)pVdma->pCurRemainingHostCtl->u.cmd.pvCmd;
1466 }
1467
1468 *pcbCtl = 0;
1469 return NULL;
1470}
1471
1472/**
1473 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTermDone,
1474 * Called by crServerTearDown().}
1475 */
1476static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1477{
1478# ifdef VBOX_STRICT
1479 struct VBOXVDMAHOST *pVdma = hClient;
1480 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1481 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1482# else
1483 RT_NOREF(hClient);
1484# endif
1485}
1486
1487/**
1488 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTerm,
1489 * Called by crServerTearDown().}
1490 */
1491static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient,
1492 VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1493{
1494 struct VBOXVDMAHOST *pVdma = hClient;
1495
1496 VBVAEXHOSTCTL HCtl;
1497 RT_ZERO(HCtl);
1498 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1499 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1500
1501 pHgcmEnableData->hRHCmd = pVdma;
1502 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1503
1504 if (rc == VERR_INVALID_STATE)
1505 rc = VINF_SUCCESS;
1506 else if (RT_FAILURE(rc))
1507 WARN(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1508
1509 return rc;
1510}
1511
1512/**
1513 * Worker for vdmaVBVAEnableProcess() and vdmaVBVADisableProcess().
1514 *
1515 * @thread VDMA
1516 */
1517static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1518{
1519 VBOXCRCMDCTL_ENABLE Enable;
1520 RT_ZERO(Enable);
1521 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1522 Enable.Data.hRHCmd = pVdma;
1523 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1524
1525 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1526 Assert(!pVdma->pCurRemainingHostCtl);
1527 if (RT_SUCCESS(rc))
1528 {
1529 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1530 return VINF_SUCCESS;
1531 }
1532
1533 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1534 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1535 return rc;
1536}
1537
1538/**
1539 * Handles VBVAEXHOSTCTL_TYPE_GHH_ENABLE and VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED
1540 * for vboxVDMACrGuestCtlProcess().
1541 *
1542 * @thread VDMA
1543 */
1544static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1545{
1546 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1547 {
1548 WARN(("vdma VBVA is already enabled\n"));
1549 return VERR_INVALID_STATE;
1550 }
1551
1552 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA
1553 = (VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1554 if (!pVBVA)
1555 {
1556 WARN(("invalid offset %d (%#x)\n", u32Offset, u32Offset));
1557 return VERR_INVALID_PARAMETER;
1558 }
1559
1560 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA, pVdma->pVGAState->vram_ptrR3, pVdma->pVGAState->vram_size);
1561 if (RT_SUCCESS(rc))
1562 {
1563 if (!pVdma->CrSrvInfo.pfnEnable)
1564 {
1565 /* "HGCM-less" mode. All inited. */
1566 return VINF_SUCCESS;
1567 }
1568
1569 VBOXCRCMDCTL_DISABLE Disable;
1570 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1571 Disable.Data.hNotifyTerm = pVdma;
1572 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1573 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1574 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1575 if (RT_SUCCESS(rc))
1576 {
1577 PVGASTATE pVGAState = pVdma->pVGAState;
1578 VBOXCRCMD_SVRENABLE_INFO Info;
1579 Info.hCltScr = pVGAState->pDrv;
1580 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1581 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1582 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1583 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1584 if (RT_SUCCESS(rc))
1585 return VINF_SUCCESS;
1586
1587 WARN(("pfnEnable failed %Rrc\n", rc));
1588 vboxVDMACrHgcmHandleEnable(pVdma);
1589 }
1590 else
1591 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1592
1593 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1594 }
1595 else
1596 WARN(("VBoxVBVAExHSEnable failed %Rrc\n", rc));
1597
1598 return rc;
1599}
1600
1601/**
1602 * Worker for several vboxVDMACrHostCtlProcess() commands.
1603 *
1604 * @returns IPRT status code.
1605 * @param pVdma The VDMA channel.
1606 * @param fDoHgcmEnable ???
1607 * @thread VDMA
1608 */
1609static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1610{
1611 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1612 {
1613 Log(("vdma VBVA is already disabled\n"));
1614 return VINF_SUCCESS;
1615 }
1616
1617 if (!pVdma->CrSrvInfo.pfnDisable)
1618 {
1619 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
1620 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1621 return VINF_SUCCESS;
1622 }
1623
1624 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1625 if (RT_SUCCESS(rc))
1626 {
1627 if (fDoHgcmEnable)
1628 {
1629 PVGASTATE pVGAState = pVdma->pVGAState;
1630
1631 /* disable is a bit tricky
1632 * we need to ensure the host ctl commands do not come out of order
1633 * and do not come over HGCM channel until after it is enabled */
1634 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1635 if (RT_SUCCESS(rc))
1636 {
1637 vdmaVBVANotifyDisable(pVGAState);
1638 return VINF_SUCCESS;
1639 }
1640
1641 VBOXCRCMD_SVRENABLE_INFO Info;
1642 Info.hCltScr = pVGAState->pDrv;
1643 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1644 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1645 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1646 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); /** @todo ignoring return code */
1647 }
1648 }
1649 else
1650 WARN(("pfnDisable failed %Rrc\n", rc));
1651
1652 return rc;
1653}
1654
1655/**
1656 * Handles VBVAEXHOST_DATA_TYPE_HOSTCTL for vboxVDMAWorkerThread.
1657 *
1658 * @returns VBox status code.
1659 * @param pVdma The VDMA channel.
1660 * @param pCmd The control command to process. Should be
1661 * safe, i.e. not shared with guest.
1662 * @param pfContinue Where to return whether to continue or not.
1663 * @thread VDMA
1664 */
1665static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1666{
1667 *pfContinue = true;
1668
1669 int rc;
1670 switch (pCmd->enmType)
1671 {
1672 /*
1673 * See vdmaVBVACtlOpaqueHostSubmit() and its callers.
1674 */
1675 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1676 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1677 {
1678 if (pVdma->CrSrvInfo.pfnHostCtl)
1679 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, (uint8_t *)pCmd->u.cmd.pvCmd, pCmd->u.cmd.cbCmd);
1680 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1681 }
1682 else
1683 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
1684 return VERR_INVALID_STATE;
1685
1686 /*
1687 * See vdmaVBVACtlDisableSync().
1688 */
1689 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1690 rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
1691 if (RT_SUCCESS(rc))
1692 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */ );
1693 else
1694 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1695 return rc;
1696
1697 /*
1698 * See vboxVDMACrHgcmNotifyTerminatingCb().
1699 */
1700 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1701 rc = vdmaVBVADisableProcess(pVdma, false /* fDoHgcmEnable */);
1702 if (RT_SUCCESS(rc))
1703 {
1704 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true /* fNotify */);
1705 if (RT_SUCCESS(rc))
1706 *pfContinue = false;
1707 else
1708 WARN(("VBoxVDMAThreadTerm failed %Rrc\n", rc));
1709 }
1710 else
1711 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1712 return rc;
1713
1714 /*
1715 * See vboxVDMASaveStateExecPerform().
1716 */
1717 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1718 rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM);
1719 if (RT_SUCCESS(rc))
1720 {
1721 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1722 if (pVdma->CrSrvInfo.pfnSaveState)
1723 rc = pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1724 }
1725 else
1726 WARN(("VBoxVBVAExHSSaveState failed %Rrc\n", rc));
1727 return rc;
1728
1729 /*
1730 * See vboxVDMASaveLoadExecPerform().
1731 */
1732 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1733 rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1734 if (RT_SUCCESS(rc))
1735 {
1736 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1737 if (pVdma->CrSrvInfo.pfnLoadState)
1738 {
1739 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1740 if (RT_FAILURE(rc))
1741 WARN(("pfnLoadState failed %Rrc\n", rc));
1742 }
1743 }
1744 else
1745 WARN(("VBoxVBVAExHSLoadState failed %Rrc\n", rc));
1746 return rc;
1747
1748 /*
1749 * See vboxVDMASaveLoadDone().
1750 */
1751 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1752 {
1753 PVGASTATE pVGAState = pVdma->pVGAState;
1754 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1755 {
1756 VBVAINFOSCREEN CurScreen;
1757 VBVAINFOVIEW CurView;
1758 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1759 AssertLogRelMsgRCReturn(rc, ("VBVAGetInfoViewAndScreen [screen #%u] -> %#x\n", i, rc), rc);
1760
1761 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1762 AssertLogRelMsgRCReturn(rc, ("VBVAInfoScreen [screen #%u] -> %#x\n", i, rc), rc);
1763 }
1764
1765 return VINF_SUCCESS;
1766 }
1767
1768 default:
1769 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1770 return VERR_INVALID_PARAMETER;
1771 }
1772}
1773
1774/**
1775 * Worker for vboxVDMACrGuestCtlResizeEntryProcess.
1776 *
1777 * @returns VINF_SUCCESS or VERR_INVALID_PARAMETER.
1778 * @param pVGAState The VGA device state.
1779 * @param pScreen The screen info (safe copy).
1780 */
1781static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1782{
1783 const uint32_t idxView = pScreen->u32ViewIndex;
1784 const uint16_t fFlags = pScreen->u16Flags;
1785
1786 if (fFlags & VBVA_SCREEN_F_DISABLED)
1787 {
1788 if ( idxView < pVGAState->cMonitors
1789 || idxView == UINT32_C(0xFFFFFFFF))
1790 {
1791 RT_UNTRUSTED_VALIDATED_FENCE();
1792
1793 RT_ZERO(*pScreen);
1794 pScreen->u32ViewIndex = idxView;
1795 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1796 return VINF_SUCCESS;
1797 }
1798 }
1799 else
1800 {
1801 if (fFlags & VBVA_SCREEN_F_BLANK2)
1802 {
1803 if ( idxView >= pVGAState->cMonitors
1804 && idxView != UINT32_C(0xFFFFFFFF))
1805 return VERR_INVALID_PARAMETER;
1806 RT_UNTRUSTED_VALIDATED_FENCE();
1807
1808 /* Special case for blanking using current video mode.
1809 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1810 */
1811 RT_ZERO(*pScreen);
1812 pScreen->u32ViewIndex = idxView;
1813 pScreen->u16Flags = fFlags;
1814 return VINF_SUCCESS;
1815 }
1816
1817 if ( idxView < pVGAState->cMonitors
1818 && pScreen->u16BitsPerPixel <= 32
1819 && pScreen->u32Width <= UINT16_MAX
1820 && pScreen->u32Height <= UINT16_MAX
1821 && pScreen->u32LineSize <= UINT16_MAX * 4)
1822 {
1823 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1824 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1825 {
1826 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1827 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1828 && u64ScreenSize <= pVGAState->vram_size
1829 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1830 return VINF_SUCCESS;
1831 }
1832 }
1833 }
1834
1835 LogFunc(("Failed\n"));
1836 return VERR_INVALID_PARAMETER;
1837}
1838
1839/**
1840 * Handles on entry in a VBVAEXHOSTCTL_TYPE_GHH_RESIZE command.
1841 *
1842 * @returns IPRT status code.
1843 * @param pVdma The VDMA channel
1844 * @param pEntry The entry to handle. Considered volatile.
1845 *
1846 * @thread VDMA
1847 */
1848static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma,
1849 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry)
1850{
1851 PVGASTATE pVGAState = pVdma->pVGAState;
1852
1853 VBVAINFOSCREEN Screen;
1854 RT_COPY_VOLATILE(Screen, pEntry->Screen);
1855 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1856
1857 /* Verify and cleanup local copy of the input data. */
1858 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1859 if (RT_FAILURE(rc))
1860 {
1861 WARN(("invalid screen data\n"));
1862 return rc;
1863 }
1864 RT_UNTRUSTED_VALIDATED_FENCE();
1865
1866 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1867 RT_BCOPY_VOLATILE(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1868 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1869
1870 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1871
1872 if (pVdma->CrSrvInfo.pfnResize)
1873 {
1874 /* Also inform the HGCM service, if it is there. */
1875 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1876 if (RT_FAILURE(rc))
1877 {
1878 WARN(("pfnResize failed %Rrc\n", rc));
1879 return rc;
1880 }
1881 }
1882
1883 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1884 VBVAINFOVIEW View;
1885 View.u32ViewOffset = 0;
1886 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1887 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1888
1889 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1890
1891 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1892 i >= 0;
1893 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1894 {
1895 Screen.u32ViewIndex = i;
1896
1897 VBVAINFOSCREEN CurScreen;
1898 VBVAINFOVIEW CurView;
1899
1900 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1901 AssertRC(rc);
1902
1903 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1904 continue;
1905
1906 /* The view does not change if _BLANK2 is set. */
1907 if ( (!fDisable || !CurView.u32ViewSize)
1908 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1909 {
1910 View.u32ViewIndex = Screen.u32ViewIndex;
1911
1912 rc = VBVAInfoView(pVGAState, &View);
1913 if (RT_FAILURE(rc))
1914 {
1915 WARN(("VBVAInfoView failed %Rrc\n", rc));
1916 break;
1917 }
1918 }
1919
1920 rc = VBVAInfoScreen(pVGAState, &Screen);
1921 if (RT_FAILURE(rc))
1922 {
1923 WARN(("VBVAInfoScreen failed %Rrc\n", rc));
1924 break;
1925 }
1926 }
1927
1928 return rc;
1929}
1930
1931
1932/**
1933 * Processes VBVAEXHOST_DATA_TYPE_GUESTCTL for vboxVDMAWorkerThread and
1934 * vdmaVBVACtlThreadCreatedEnable.
1935 *
1936 * @returns VBox status code.
1937 * @param pVdma The VDMA channel.
1938 * @param pCmd The command to process. Maybe safe (not shared
1939 * with guest).
1940 *
1941 * @thread VDMA
1942 */
1943static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1944{
1945 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1946 switch (enmType)
1947 {
1948 /*
1949 * See handling of VBOXCMDVBVACTL_TYPE_3DCTL in vboxCmdVBVACmdCtl().
1950 */
1951 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1952 ASSERT_GUEST_LOGREL_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
1953 ASSERT_GUEST_LOGREL_RETURN(pVdma->CrSrvInfo.pfnGuestCtl, VERR_INVALID_STATE);
1954 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr,
1955 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd,
1956 pCmd->u.cmd.cbCmd);
1957
1958 /*
1959 * See handling of VBOXCMDVBVACTL_TYPE_RESIZE in vboxCmdVBVACmdCtl().
1960 */
1961 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1962 {
1963 ASSERT_GUEST_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
1964 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1965 ASSERT_GUEST_LOGREL_MSG_RETURN( !(cbCmd % sizeof(VBOXCMDVBVA_RESIZE_ENTRY))
1966 && cbCmd > 0,
1967 ("cbCmd=%#x\n", cbCmd), VERR_INVALID_PARAMETER);
1968
1969 uint32_t const cElements = cbCmd / sizeof(VBOXCMDVBVA_RESIZE_ENTRY);
1970 VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *pResize
1971 = (VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
1972 for (uint32_t i = 0; i < cElements; ++i)
1973 {
1974 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry = &pResize->aEntries[i];
1975 int rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1976 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("vboxVDMACrGuestCtlResizeEntryProcess failed for #%u: %Rrc\n", i, rc), rc);
1977 }
1978 return VINF_SUCCESS;
1979 }
1980
1981 /*
1982 * See vdmaVBVACtlEnableSubmitInternal().
1983 */
1984 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1985 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1986 {
1987 ASSERT_GUEST(pCmd->u.cmd.cbCmd == sizeof(VBVAENABLE));
1988
1989 VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable = (VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
1990 uint32_t const u32Offset = pEnable->u32Offset;
1991 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
1992
1993 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1994 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVAEnableProcess -> %Rrc\n", rc), rc);
1995
1996 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1997 {
1998 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1999 ASSERT_GUEST_MSG_RC_RETURN(rc, ("VBoxVBVAExHPPause -> %Rrc\n", rc), rc);
2000 }
2001 return VINF_SUCCESS;
2002 }
2003
2004 /*
2005 * See vdmaVBVACtlDisableSubmitInternal().
2006 */
2007 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
2008 {
2009 int rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
2010 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVADisableProcess -> %Rrc\n", rc), rc);
2011
2012 /* do vgaUpdateDisplayAll right away */
2013 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
2014 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
2015
2016 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */);
2017 }
2018
2019 default:
2020 ASSERT_GUEST_LOGREL_MSG_FAILED(("unexpected ctl type %d\n", enmType));
2021 return VERR_INVALID_PARAMETER;
2022 }
2023}
2024
2025
2026/**
2027 * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2028 *
2029 * @param fIn - whether this is a page in or out op.
2030 * @thread VDMA
2031 *
2032 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
2033 */
2034static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn)
2035{
2036 RTGCPHYS GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT;
2037 PGMPAGEMAPLOCK Lock;
2038
2039 if (fIn)
2040 {
2041 const void *pvPage;
2042 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2043 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtrReadOnly %RGp -> %Rrc\n", GCPhysPage, rc), rc);
2044
2045 memcpy(pbVram, pvPage, PAGE_SIZE);
2046 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2047 }
2048 else
2049 {
2050 void *pvPage;
2051 int rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2052 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtr %RGp -> %Rrc\n", GCPhysPage, rc), rc);
2053
2054 memcpy(pvPage, pbVram, PAGE_SIZE);
2055 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2056 }
2057
2058 return VINF_SUCCESS;
2059}
2060
2061/**
2062 * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2063 *
2064 * @return 0 on success, -1 on failure.
2065 *
2066 * @thread VDMA
2067 */
2068static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const RT_UNTRUSTED_VOLATILE_GUEST *pHdr,
2069 uint32_t cbCmd, const VBOXCMDVBVA_PAGING_TRANSFER_DATA RT_UNTRUSTED_VOLATILE_GUEST *pData)
2070{
2071 /*
2072 * Extract and validate information.
2073 */
2074 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1);
2075
2076 bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
2077 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2078
2079 uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
2080 ASSERT_GUEST_MSG_RETURN(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1);
2081 VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX);
2082
2083 VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM;
2084 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2085 ASSERT_GUEST_MSG_RETURN(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1);
2086 ASSERT_GUEST_MSG_RETURN(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1);
2087 uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT;
2088 ASSERT_GUEST_MSG_RETURN(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1);
2089
2090 RT_UNTRUSTED_VALIDATED_FENCE();
2091
2092 /*
2093 * Execute the command.
2094 */
2095 uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam;
2096 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE)
2097 {
2098 uint32_t uPageNo = pData->aPageNumbers[iPage];
2099 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2100 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn);
2101 ASSERT_GUEST_MSG_RETURN(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1);
2102 }
2103 return 0;
2104}
2105
2106
2107/**
2108 * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL.
2109 *
2110 * @returns 0 on success, -1 on failure.
2111 * @param pVGAState The VGA state.
2112 * @param pFill The fill command (volatile).
2113 *
2114 * @thread VDMA
2115 */
2116static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *pFill)
2117{
2118 /*
2119 * Copy and validate input.
2120 */
2121 VBOXCMDVBVA_PAGING_FILL FillSafe;
2122 RT_COPY_VOLATILE(FillSafe, *pFill);
2123 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2124
2125 VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM;
2126 ASSERT_GUEST_MSG_RETURN(!(offVRAM & X86_PAGE_OFFSET_MASK), ("offVRAM=%#x\n", offVRAM), -1);
2127 ASSERT_GUEST_MSG_RETURN(offVRAM <= pVGAState->vram_size, ("offVRAM=%#x\n", offVRAM), -1);
2128
2129 uint32_t cbFill = FillSafe.u32CbFill;
2130 ASSERT_GUEST_STMT(!(cbFill & 3), cbFill &= ~(uint32_t)3);
2131 ASSERT_GUEST_MSG_RETURN( cbFill < pVGAState->vram_size
2132 && offVRAM <= pVGAState->vram_size - cbFill,
2133 ("offVRAM=%#x cbFill=%#x\n", offVRAM, cbFill), -1);
2134
2135 RT_UNTRUSTED_VALIDATED_FENCE();
2136
2137 /*
2138 * Execute.
2139 */
2140 uint32_t *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM);
2141 uint32_t const u32Color = FillSafe.u32Pattern;
2142
2143 uint32_t cLoops = cbFill / 4;
2144 while (cLoops-- > 0)
2145 pu32Vram[cLoops] = u32Color;
2146
2147 return 0;
2148}
2149
2150/**
2151 * Process command data.
2152 *
2153 * @returns zero or positive is success, negative failure.
2154 * @param pVdma The VDMA channel.
2155 * @param pCmd The command data to process. Assume volatile.
2156 * @param cbCmd The amount of command data.
2157 *
2158 * @thread VDMA
2159 */
2160static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma,
2161 const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
2162{
2163 uint8_t bOpCode = pCmd->u8OpCode;
2164 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2165 switch (bOpCode)
2166 {
2167 case VBOXCMDVBVA_OPTYPE_NOPCMD:
2168 return 0;
2169
2170 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
2171 return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd,
2172 &((VBOXCMDVBVA_PAGING_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->Data);
2173
2174 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
2175 ASSERT_GUEST_RETURN(cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL), -1);
2176 return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *)pCmd);
2177
2178 default:
2179 ASSERT_GUEST_RETURN(pVdma->CrSrvInfo.pfnCmd != NULL, -1);
2180 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
2181 }
2182}
2183
2184# if 0
2185typedef struct VBOXCMDVBVA_PAGING_TRANSFER
2186{
2187 VBOXCMDVBVA_HDR Hdr;
2188 /* for now can only contain offVRAM.
2189 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
2190 VBOXCMDVBVA_ALLOCINFO Alloc;
2191 uint32_t u32Reserved;
2192 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
2193} VBOXCMDVBVA_PAGING_TRANSFER;
2194# endif
2195
2196AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
2197AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
2198AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
2199AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
2200
2201# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
2202
2203/**
2204 * Worker for vboxVDMACrCmdProcess.
2205 *
2206 * @returns 8-bit result.
2207 * @param pVdma The VDMA channel.
2208 * @param pCmd The command. Consider volatile!
2209 * @param cbCmd The size of what @a pCmd points to. At least
2210 * sizeof(VBOXCMDVBVA_HDR).
2211 * @param fRecursion Set if recursive call, false if not.
2212 *
2213 * @thread VDMA
2214 */
2215static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
2216 uint32_t cbCmd, bool fRecursion)
2217{
2218 int8_t i8Result = 0;
2219 uint8_t const bOpCode = pCmd->u8OpCode;
2220 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2221 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode));
2222 switch (bOpCode)
2223 {
2224 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
2225 {
2226 /*
2227 * Extract the command physical address and size.
2228 */
2229 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1);
2230 RTGCPHYS GCPhysCmd = ((VBOXCMDVBVA_SYSMEMCMD RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->phCmd;
2231 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2232 uint32_t cbCmdPart = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK);
2233
2234 uint32_t cbRealCmd = pCmd->u8Flags;
2235 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
2236 ASSERT_GUEST_MSG_RETURN(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1);
2237 ASSERT_GUEST_MSG_RETURN(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1);
2238
2239 /*
2240 * Lock down the first page of the memory specified by the command.
2241 */
2242 PGMPAGEMAPLOCK Lock;
2243 PVGASTATE pVGAState = pVdma->pVGAState;
2244 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2245 VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL;
2246 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock);
2247 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("VDMA: %RGp -> %Rrc\n", GCPhysCmd, rc), -1);
2248 Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK));
2249
2250 /*
2251 * All fits within one page? We can handle that pretty efficiently.
2252 */
2253 if (cbRealCmd <= cbCmdPart)
2254 {
2255 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
2256 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2257 }
2258 else
2259 {
2260 /*
2261 * To keep things damn simple, just double buffer cross page or
2262 * multipage requests.
2263 */
2264 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16));
2265 if (pbCmdBuf)
2266 {
2267 memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart);
2268 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2269 pRealCmdHdr = NULL;
2270
2271 rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart);
2272 if (RT_SUCCESS(rc))
2273 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd);
2274 else
2275 LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd));
2276 RTMemTmpFree(pbCmdBuf);
2277 }
2278 else
2279 {
2280 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2281 LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd));
2282 i8Result = -1;
2283 }
2284 }
2285 return i8Result;
2286 }
2287
2288 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2289 {
2290 Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */
2291 ASSERT_GUEST_RETURN(!fRecursion, -1);
2292
2293 /* Skip current command. */
2294 cbCmd -= sizeof(*pCmd);
2295 pCmd++;
2296
2297 /* Process subcommands. */
2298 while (cbCmd > 0)
2299 {
2300 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1);
2301
2302 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2303 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2304 ASSERT_GUEST_MSG_RETURN(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1);
2305
2306 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/);
2307 ASSERT_GUEST_MSG_RETURN(i8Result >= 0, ("vboxVDMACrCmdVbvaProcess -> %d\n", i8Result), i8Result);
2308
2309 /* Advance to the next command. */
2310 pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCmd + cbCurCmd);
2311 cbCmd -= cbCurCmd;
2312 }
2313 return 0;
2314 }
2315
2316 default:
2317 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2318 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
2319 return i8Result;
2320 }
2321}
2322
2323/**
2324 * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD.
2325 *
2326 * @thread VDMA
2327 */
2328static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd)
2329{
2330 if ( cbCmd > 0
2331 && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP)
2332 { /* nop */ }
2333 else
2334 {
2335 ASSERT_GUEST_RETURN_VOID(cbCmd >= sizeof(VBOXCMDVBVA_HDR));
2336 VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)pbCmd;
2337
2338 /* check if the command is cancelled */
2339 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2340 {
2341 /* Process it. */
2342 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/);
2343 }
2344 else
2345 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2346 }
2347
2348}
2349
2350/**
2351 * Worker for vboxVDMAConstruct().
2352 */
2353static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2354{
2355 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
2356 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof(*pCmd));
2357 int rc;
2358 if (pCmd)
2359 {
2360 PVGASTATE pVGAState = pVdma->pVGAState;
2361 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2362 pCmd->cbVRam = pVGAState->vram_size;
2363 pCmd->pLed = &pVGAState->Led3D;
2364 pCmd->CrClientInfo.hClient = pVdma;
2365 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2366 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2367 if (RT_SUCCESS(rc))
2368 {
2369 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2370 if (RT_SUCCESS(rc))
2371 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2372 else if (rc != VERR_NOT_SUPPORTED)
2373 WARN(("vboxVDMACrCtlGetRc returned %Rrc\n", rc));
2374 }
2375 else
2376 WARN(("vboxVDMACrCtlPost failed %Rrc\n", rc));
2377
2378 vboxVDMACrCtlRelease(&pCmd->Hdr);
2379 }
2380 else
2381 rc = VERR_NO_MEMORY;
2382
2383 if (!RT_SUCCESS(rc))
2384 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2385
2386 return rc;
2387}
2388
2389/**
2390 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync,
2391 * Some indirect completion magic, you gotta love this code! }
2392 */
2393DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2394{
2395 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2396 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2397 VBOXVDMACMD RT_UNTRUSTED_VOLATILE_GUEST *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2398 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2399
2400 AssertRC(rc);
2401 pDr->rc = rc;
2402
2403 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2404 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2405 AssertRC(rc);
2406
2407 return rc;
2408}
2409
2410/**
2411 * Worker for vboxVDMACmdExecBlt().
2412 */
2413static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc,
2414 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2415 const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl)
2416{
2417 /*
2418 * We do not support color conversion.
2419 */
2420 AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION);
2421
2422 /* we do not support stretching (checked by caller) */
2423 Assert(pDstRectl->height == pSrcRectl->height);
2424 Assert(pDstRectl->width == pSrcRectl->width);
2425
2426 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2427 AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t));
2428 uint32_t cbVRamSize = pVdma->pVGAState->vram_size;
2429 uint8_t *pbDstSurf = pbRam + offDst;
2430 uint8_t *pbSrcSurf = pbRam + offSrc;
2431
2432 if ( pDstDesc->width == pDstRectl->width
2433 && pSrcDesc->width == pSrcRectl->width
2434 && pSrcDesc->width == pDstDesc->width
2435 && pSrcDesc->pitch == pDstDesc->pitch)
2436 {
2437 Assert(!pDstRectl->left);
2438 Assert(!pSrcRectl->left);
2439 uint32_t offBoth = pDstDesc->pitch * pDstRectl->top;
2440 uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height;
2441
2442 if ( cbToCopy <= cbVRamSize
2443 && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy
2444 && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy)
2445 {
2446 RT_UNTRUSTED_VALIDATED_FENCE();
2447 memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy);
2448 }
2449 else
2450 return VERR_INVALID_PARAMETER;
2451 }
2452 else
2453 {
2454 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2455 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2456 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2457 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2458 Assert(cbDstLine <= pDstDesc->pitch);
2459 uint32_t cbDstSkip = pDstDesc->pitch;
2460 uint8_t *pbDstStart = pbDstSurf + offDstStart;
2461
2462 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2463# ifdef VBOX_STRICT
2464 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2465 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2466# endif
2467 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2468 Assert(cbSrcLine <= pSrcDesc->pitch);
2469 uint32_t cbSrcSkip = pSrcDesc->pitch;
2470 const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart;
2471
2472 Assert(cbDstLine == cbSrcLine);
2473
2474 for (uint32_t i = 0; ; ++i)
2475 {
2476 if ( cbDstLine <= cbVRamSize
2477 && (uintptr_t)pbDstStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine
2478 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine)
2479 {
2480 RT_UNTRUSTED_VALIDATED_FENCE(); /** @todo this could potentially be buzzkiller. */
2481 memcpy(pbDstStart, pbSrcStart, cbDstLine);
2482 }
2483 else
2484 return VERR_INVALID_PARAMETER;
2485 if (i == pDstRectl->height)
2486 break;
2487 pbDstStart += cbDstSkip;
2488 pbSrcStart += cbSrcSkip;
2489 }
2490 }
2491 return VINF_SUCCESS;
2492}
2493
2494#if 0 /* unused */
2495static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2496{
2497 if (!pRectl1->width)
2498 *pRectl1 = *pRectl2;
2499 else
2500 {
2501 int16_t x21 = pRectl1->left + pRectl1->width;
2502 int16_t x22 = pRectl2->left + pRectl2->width;
2503 if (pRectl1->left > pRectl2->left)
2504 {
2505 pRectl1->left = pRectl2->left;
2506 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2507 }
2508 else if (x21 < x22)
2509 pRectl1->width = x22 - pRectl1->left;
2510
2511 x21 = pRectl1->top + pRectl1->height;
2512 x22 = pRectl2->top + pRectl2->height;
2513 if (pRectl1->top > pRectl2->top)
2514 {
2515 pRectl1->top = pRectl2->top;
2516 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2517 }
2518 else if (x21 < x22)
2519 pRectl1->height = x22 - pRectl1->top;
2520 }
2521}
2522#endif /* unused */
2523
2524/**
2525 * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec().
2526 *
2527 * @returns number of bytes (positive) of the full command on success,
2528 * otherwise a negative error status (VERR_XXX).
2529 *
2530 * @param pVdma The VDMA channel.
2531 * @param pBlt Blit command buffer. This is to be considered
2532 * volatile!
2533 * @param cbBuffer Number of bytes accessible at @a pBtl.
2534 */
2535static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt,
2536 uint32_t cbBuffer)
2537{
2538 /*
2539 * Validate and make a local copy of the blt command up to the rectangle array.
2540 */
2541 AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER);
2542 VBOXVDMACMD_DMA_PRESENT_BLT BltSafe;
2543 RT_BCOPY_VOLATILE(&BltSafe, (void const *)pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects));
2544 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2545
2546 AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER);
2547 uint32_t const cbBlt = RT_UOFFSETOF_DYN(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]);
2548 AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER);
2549
2550 /*
2551 * We do not support stretching.
2552 */
2553 AssertReturn(BltSafe.srcRectl.width == BltSafe.dstRectl.width, VERR_INVALID_FUNCTION);
2554 AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION);
2555
2556 Assert(BltSafe.cDstSubRects);
2557
2558 RT_UNTRUSTED_VALIDATED_FENCE();
2559
2560 /*
2561 * Do the work.
2562 */
2563 //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless
2564 if (BltSafe.cDstSubRects)
2565 {
2566 for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i)
2567 {
2568 VBOXVDMA_RECTL dstSubRectl;
2569 dstSubRectl.left = pBlt->aDstSubRects[i].left;
2570 dstSubRectl.top = pBlt->aDstSubRects[i].top;
2571 dstSubRectl.width = pBlt->aDstSubRects[i].width;
2572 dstSubRectl.height = pBlt->aDstSubRects[i].height;
2573 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2574
2575 VBOXVDMA_RECTL srcSubRectl = dstSubRectl;
2576
2577 dstSubRectl.left += BltSafe.dstRectl.left;
2578 dstSubRectl.top += BltSafe.dstRectl.top;
2579
2580 srcSubRectl.left += BltSafe.srcRectl.left;
2581 srcSubRectl.top += BltSafe.srcRectl.top;
2582
2583 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2584 &dstSubRectl, &srcSubRectl);
2585 AssertRCReturn(rc, rc);
2586
2587 //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless
2588 }
2589 }
2590 else
2591 {
2592 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2593 &BltSafe.dstRectl, &BltSafe.srcRectl);
2594 AssertRCReturn(rc, rc);
2595
2596 //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless
2597 }
2598
2599 return cbBlt;
2600}
2601
2602
2603/**
2604 * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and
2605 * vboxVDMACmdExec().
2606 *
2607 * @returns number of bytes (positive) of the full command on success,
2608 * otherwise a negative error status (VERR_XXX).
2609 *
2610 * @param pVdma The VDMA channel.
2611 * @param pTransfer Transfer command buffer. This is to be considered
2612 * volatile!
2613 * @param cbBuffer Number of bytes accessible at @a pTransfer.
2614 */
2615static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
2616 uint32_t cbBuffer)
2617{
2618 /*
2619 * Make a copy of the command (it's volatile).
2620 */
2621 AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2622 VBOXVDMACMD_DMA_BPB_TRANSFER TransferSafeCopy;
2623 RT_COPY_VOLATILE(TransferSafeCopy, *pTransfer);
2624 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2625
2626 PVGASTATE pVGAState = pVdma->pVGAState;
2627 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2628 uint8_t *pbRam = pVGAState->vram_ptrR3;
2629 uint32_t cbTransfer = TransferSafeCopy.cbTransferSize;
2630
2631 /*
2632 * Validate VRAM offset.
2633 */
2634 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2635 AssertReturn( cbTransfer <= pVGAState->vram_size
2636 && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer,
2637 VERR_INVALID_PARAMETER);
2638
2639 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2640 AssertReturn( cbTransfer <= pVGAState->vram_size
2641 && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer,
2642 VERR_INVALID_PARAMETER);
2643 RT_UNTRUSTED_VALIDATED_FENCE();
2644
2645 /*
2646 * Transfer loop.
2647 */
2648 uint32_t cbTransfered = 0;
2649 int rc = VINF_SUCCESS;
2650 do
2651 {
2652 uint32_t cbSubTransfer = cbTransfer;
2653
2654 const void *pvSrc;
2655 bool fSrcLocked = false;
2656 PGMPAGEMAPLOCK SrcLock;
2657 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2658 pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered;
2659 else
2660 {
2661 RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered;
2662 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock);
2663 AssertRC(rc);
2664 if (RT_SUCCESS(rc))
2665 {
2666 fSrcLocked = true;
2667 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK));
2668 }
2669 else
2670 break;
2671 }
2672
2673 void *pvDst;
2674 PGMPAGEMAPLOCK DstLock;
2675 bool fDstLocked = false;
2676 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2677 pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered;
2678 else
2679 {
2680 RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered;
2681 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock);
2682 AssertRC(rc);
2683 if (RT_SUCCESS(rc))
2684 {
2685 fDstLocked = true;
2686 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK));
2687 }
2688 }
2689
2690 if (RT_SUCCESS(rc))
2691 {
2692 memcpy(pvDst, pvSrc, cbSubTransfer);
2693 cbTransfered += cbSubTransfer;
2694 cbTransfer -= cbSubTransfer;
2695 }
2696 else
2697 cbTransfer = 0; /* force break below */
2698
2699 if (fSrcLocked)
2700 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2701 if (fDstLocked)
2702 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2703 } while (cbTransfer);
2704
2705 if (RT_SUCCESS(rc))
2706 return sizeof(TransferSafeCopy);
2707 return rc;
2708}
2709
2710/**
2711 * Worker for vboxVDMACommandProcess().
2712 *
2713 * @param pVdma Tthe VDMA channel.
2714 * @param pbBuffer Command buffer, considered volatile.
2715 * @param cbBuffer The number of bytes at @a pbBuffer.
2716 * @param pCmdDr The command. For setting the async flag on chromium
2717 * requests.
2718 * @param pfAsyncCmd Flag to set if async command completion on chromium
2719 * requests. Input stat is false, so it only ever need to
2720 * be set to true.
2721 */
2722static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *pbBuffer, uint32_t cbBuffer,
2723 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmdDr, bool *pfAsyncCmd)
2724{
2725 AssertReturn(pbBuffer, VERR_INVALID_POINTER);
2726
2727 for (;;)
2728 {
2729 AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2730
2731 VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *)pbBuffer;
2732 VBOXVDMACMD_TYPE enmCmdType = pCmd->enmType;
2733 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2734
2735 ASSERT_GUEST_MSG_RETURN( enmCmdType == VBOXVDMACMD_TYPE_CHROMIUM_CMD
2736 || enmCmdType == VBOXVDMACMD_TYPE_DMA_PRESENT_BLT
2737 || enmCmdType == VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER
2738 || enmCmdType == VBOXVDMACMD_TYPE_DMA_NOP
2739 || enmCmdType == VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ,
2740 ("enmCmdType=%d\n", enmCmdType),
2741 VERR_INVALID_FUNCTION);
2742 RT_UNTRUSTED_VALIDATED_FENCE();
2743
2744 int cbProcessed;
2745 switch (enmCmdType)
2746 {
2747 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2748 {
2749 VBOXVDMACMD_CHROMIUM_CMD RT_UNTRUSTED_VOLATILE_GUEST *pCrCmd = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_CHROMIUM_CMD);
2750 uint32_t const cbBody = VBOXVDMACMD_BODY_SIZE(cbBuffer);
2751 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER);
2752
2753 PVGASTATE pVGAState = pVdma->pVGAState;
2754 AssertReturn(pVGAState->pDrv->pfnCrHgsmiCommandProcess, VERR_NOT_SUPPORTED);
2755
2756 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2757 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2758 *pfAsyncCmd = true;
2759 return VINF_SUCCESS;
2760 }
2761
2762 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2763 {
2764 VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2765 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2766 Assert(cbProcessed >= 0);
2767 break;
2768 }
2769
2770 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2771 {
2772 VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer
2773 = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2774 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2775 Assert(cbProcessed >= 0);
2776 break;
2777 }
2778
2779 case VBOXVDMACMD_TYPE_DMA_NOP:
2780 return VINF_SUCCESS;
2781
2782 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2783 return VINF_SUCCESS;
2784
2785 default:
2786 AssertFailedReturn(VERR_INVALID_FUNCTION);
2787 }
2788
2789 /* Advance buffer or return. */
2790 if (cbProcessed >= 0)
2791 {
2792 Assert(cbProcessed > 0);
2793 cbProcessed += VBOXVDMACMD_HEADER_SIZE();
2794 if ((uint32_t)cbProcessed >= cbBuffer)
2795 {
2796 Assert((uint32_t)cbProcessed == cbBuffer);
2797 return VINF_SUCCESS;
2798 }
2799
2800 cbBuffer -= cbProcessed;
2801 pbBuffer += cbProcessed;
2802 }
2803 else
2804 {
2805 RT_UNTRUSTED_VALIDATED_FENCE();
2806 return cbProcessed; /* error status */
2807 }
2808 }
2809}
2810
2811/**
2812 * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal().
2813 *
2814 * @thread VDMA
2815 */
2816static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2817{
2818 RT_NOREF(hThreadSelf);
2819 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2820 PVGASTATE pVGAState = pVdma->pVGAState;
2821 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2822 int rc;
2823
2824 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2825
2826 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2827 {
2828 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd = NULL;
2829 uint32_t cbCmd = 0;
2830 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd);
2831 switch (enmType)
2832 {
2833 case VBVAEXHOST_DATA_TYPE_CMD:
2834 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd);
2835 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2836 VBVARaiseIrq(pVGAState, 0);
2837 break;
2838
2839 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2840 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd);
2841 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2842 break;
2843
2844 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2845 {
2846 bool fContinue = true;
2847 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue);
2848 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2849 if (fContinue)
2850 break;
2851 }
2852 RT_FALL_THRU();
2853
2854 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2855 rc = RTSemEventWaitNoResume(pVdma->Thread.hEvent, RT_INDEFINITE_WAIT);
2856 AssertMsg(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc));
2857 break;
2858
2859 default:
2860 WARN(("unexpected type %d\n", enmType));
2861 break;
2862 }
2863 }
2864
2865 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2866
2867 return VINF_SUCCESS;
2868}
2869
2870/**
2871 * Worker for vboxVDMACommand.
2872 *
2873 * @returns VBox status code of the operation.
2874 * @param pVdma VDMA instance data.
2875 * @param pCmd The command to process. Consider content volatile.
2876 * @param cbCmd Number of valid bytes at @a pCmd. This is at least
2877 * sizeof(VBOXVDMACBUF_DR).
2878 * @param pfAsyncCmd Flag to set if async command completion on chromium
2879 * requests. Input stat is false, so it only ever need to
2880 * be set to true.
2881 * @thread EMT
2882 */
2883static int vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
2884 uint32_t cbCmd, bool *pfAsyncCmd)
2885{
2886 /*
2887 * Get the command buffer (volatile).
2888 */
2889 uint16_t const cbCmdBuf = pCmd->cbBuf;
2890 uint16_t const fCmdFlags = pCmd->fFlags;
2891 uint64_t const offVramBuf_or_GCPhysBuf = pCmd->Location.offVramBuf;
2892 AssertCompile(sizeof(pCmd->Location.offVramBuf) == sizeof(pCmd->Location.phBuf));
2893 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
2894
2895 const uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmdBuf;
2896 PGMPAGEMAPLOCK Lock;
2897 bool fReleaseLocked = false;
2898 if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2899 {
2900 pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2901 AssertReturn((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd],
2902 VERR_INVALID_PARAMETER);
2903 RT_UNTRUSTED_VALIDATED_FENCE();
2904 }
2905 else if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2906 {
2907 AssertReturn( offVramBuf_or_GCPhysBuf <= pVdma->pVGAState->vram_size
2908 && offVramBuf_or_GCPhysBuf + cbCmdBuf <= pVdma->pVGAState->vram_size,
2909 VERR_INVALID_PARAMETER);
2910 RT_UNTRUSTED_VALIDATED_FENCE();
2911
2912 pbCmdBuf = (uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *)pVdma->pVGAState->vram_ptrR3 + offVramBuf_or_GCPhysBuf;
2913 }
2914 else
2915 {
2916 /* Make sure it doesn't cross a page. */
2917 AssertReturn((uint32_t)(offVramBuf_or_GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE,
2918 VERR_INVALID_PARAMETER);
2919 RT_UNTRUSTED_VALIDATED_FENCE();
2920
2921 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, offVramBuf_or_GCPhysBuf, 0 /*fFlags*/,
2922 (const void **)&pbCmdBuf, &Lock);
2923 AssertRCReturn(rc, rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2924 fReleaseLocked = true;
2925 }
2926
2927 /*
2928 * Process the command.
2929 */
2930 int rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf, pCmd, pfAsyncCmd);
2931 AssertRC(rc);
2932
2933 /* Clean up comand buffer. */
2934 if (fReleaseLocked)
2935 PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock);
2936 return rc;
2937}
2938
2939# if 0 /** @todo vboxVDMAControlProcess is unused */
2940static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2941{
2942 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2943 pCmd->i32Result = VINF_SUCCESS;
2944 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2945 AssertRC(rc);
2946}
2947# endif
2948
2949#endif /* VBOX_WITH_CRHGSMI */
2950#ifdef VBOX_VDMA_WITH_WATCHDOG
2951
2952/**
2953 * @callback_method_impl{TMTIMER, VDMA watchdog timer.}
2954 */
2955static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2956{
2957 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2958 PVGASTATE pVGAState = pVdma->pVGAState;
2959 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2960}
2961
2962/**
2963 * Handles VBOXVDMA_CTL_TYPE_WATCHDOG for vboxVDMAControl.
2964 */
2965static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2966{
2967 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2968 if (cMillis)
2969 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2970 else
2971 TMTimerStop(pVdma->WatchDogTimer);
2972 return VINF_SUCCESS;
2973}
2974
2975#endif /* VBOX_VDMA_WITH_WATCHDOG */
2976
2977/**
2978 * Called by vgaR3Construct() to initialize the state.
2979 *
2980 * @returns VBox status code.
2981 */
2982int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2983{
2984 RT_NOREF(cPipeElements);
2985 int rc;
2986 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2987 Assert(pVdma);
2988 if (pVdma)
2989 {
2990 pVdma->pHgsmi = pVGAState->pHGSMI;
2991 pVdma->pVGAState = pVGAState;
2992
2993#ifdef VBOX_VDMA_WITH_WATCHDOG
2994 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2995 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2996 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2997 AssertRC(rc);
2998#else
2999 rc = VINF_SUCCESS;
3000#endif
3001 if (RT_SUCCESS(rc))
3002 {
3003#ifdef VBOX_WITH_CRHGSMI
3004 VBoxVDMAThreadInit(&pVdma->Thread);
3005
3006 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
3007 if (RT_SUCCESS(rc))
3008 {
3009 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
3010 if (RT_SUCCESS(rc))
3011 {
3012 rc = RTCritSectInit(&pVdma->CalloutCritSect);
3013 if (RT_SUCCESS(rc))
3014 {
3015#endif
3016 pVGAState->pVdma = pVdma;
3017
3018#ifdef VBOX_WITH_CRHGSMI
3019 /* No HGCM service if VMSVGA is enabled. */
3020 if (!pVGAState->fVMSVGAEnabled)
3021 {
3022 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
3023 }
3024#endif
3025 return VINF_SUCCESS;
3026
3027#ifdef VBOX_WITH_CRHGSMI
3028 }
3029
3030 WARN(("RTCritSectInit failed %Rrc\n", rc));
3031 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3032 }
3033 else
3034 WARN(("VBoxVBVAExHSInit failed %Rrc\n", rc));
3035 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3036 }
3037 else
3038 WARN(("RTSemEventMultiCreate failed %Rrc\n", rc));
3039#endif
3040 /* the timer is cleaned up automatically */
3041 }
3042 RTMemFree(pVdma);
3043 }
3044 else
3045 rc = VERR_OUT_OF_RESOURCES;
3046 return rc;
3047}
3048
3049/**
3050 * Called by vgaR3Reset() to do reset.
3051 */
3052void vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
3053{
3054#ifdef VBOX_WITH_CRHGSMI
3055 vdmaVBVACtlDisableSync(pVdma);
3056#else
3057 RT_NOREF(pVdma);
3058#endif
3059}
3060
3061/**
3062 * Called by vgaR3Destruct() to do cleanup.
3063 */
3064void vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
3065{
3066 if (!pVdma)
3067 return;
3068#ifdef VBOX_WITH_CRHGSMI
3069 if (pVdma->pVGAState->fVMSVGAEnabled)
3070 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
3071 else
3072 {
3073 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
3074 * as the result of the SharedOpenGL HGCM service unloading.
3075 */
3076 vdmaVBVACtlDisableSync(pVdma);
3077 }
3078 VBoxVDMAThreadCleanup(&pVdma->Thread);
3079 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3080 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3081 RTCritSectDelete(&pVdma->CalloutCritSect);
3082#endif
3083 RTMemFree(pVdma);
3084}
3085
3086/**
3087 * Handle VBVA_VDMA_CTL, see vbvaChannelHandler
3088 *
3089 * @param pVdma The VDMA channel.
3090 * @param pCmd The control command to handle. Considered volatile.
3091 * @param cbCmd The size of the command. At least sizeof(VBOXVDMA_CTL).
3092 */
3093void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, VBOXVDMA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
3094{
3095 RT_NOREF(cbCmd);
3096 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
3097
3098 VBOXVDMA_CTL_TYPE enmCtl = pCmd->enmCtl;
3099 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
3100
3101 int rc;
3102 if (enmCtl < VBOXVDMA_CTL_TYPE_END)
3103 {
3104 RT_UNTRUSTED_VALIDATED_FENCE();
3105
3106 switch (enmCtl)
3107 {
3108 case VBOXVDMA_CTL_TYPE_ENABLE:
3109 rc = VINF_SUCCESS;
3110 break;
3111 case VBOXVDMA_CTL_TYPE_DISABLE:
3112 rc = VINF_SUCCESS;
3113 break;
3114 case VBOXVDMA_CTL_TYPE_FLUSH:
3115 rc = VINF_SUCCESS;
3116 break;
3117 case VBOXVDMA_CTL_TYPE_WATCHDOG:
3118#ifdef VBOX_VDMA_WITH_WATCHDOG
3119 rc = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
3120#else
3121 rc = VERR_NOT_SUPPORTED;
3122#endif
3123 break;
3124 default:
3125 AssertFailedBreakStmt(rc = VERR_IPE_NOT_REACHED_DEFAULT_CASE);
3126 }
3127 }
3128 else
3129 {
3130 RT_UNTRUSTED_VALIDATED_FENCE();
3131 ASSERT_GUEST_FAILED();
3132 rc = VERR_NOT_SUPPORTED;
3133 }
3134
3135 pCmd->i32Result = rc;
3136 rc = VBoxSHGSMICommandComplete(pIns, pCmd);
3137 AssertRC(rc);
3138}
3139
3140/**
3141 * Handle VBVA_VDMA_CMD, see vbvaChannelHandler().
3142 *
3143 * @param pVdma The VDMA channel.
3144 * @param pCmd The command to handle. Considered volatile.
3145 * @param cbCmd The size of the command. At least sizeof(VBOXVDMACBUF_DR).
3146 * @thread EMT
3147 */
3148void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
3149{
3150 /*
3151 * Process the command.
3152 */
3153 bool fAsyncCmd = false;
3154#ifdef VBOX_WITH_CRHGSMI
3155 int rc = vboxVDMACommandProcess(pVdma, pCmd, cbCmd, &fAsyncCmd);
3156#else
3157 RT_NOREF(cbCmd);
3158 int rc = VERR_NOT_IMPLEMENTED;
3159#endif
3160
3161 /*
3162 * Complete the command unless it's asynchronous (e.g. chromium).
3163 */
3164 if (!fAsyncCmd)
3165 {
3166 pCmd->rc = rc;
3167 int rc2 = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
3168 AssertRC(rc2);
3169 }
3170}
3171
3172#ifdef VBOX_WITH_CRHGSMI
3173
3174/**
3175 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3176 * Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() }
3177 */
3178static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3179 int rc, void *pvContext)
3180{
3181 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
3182 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pGCtl
3183 = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCtl->u.cmd.pvCmd - sizeof(VBOXCMDVBVA_CTL));
3184 AssertRC(rc);
3185 pGCtl->i32Result = rc;
3186
3187 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
3188 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
3189 AssertRC(rc);
3190
3191 VBoxVBVAExHCtlFree(pVbva, pCtl);
3192}
3193
3194/**
3195 * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit().
3196 */
3197static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType,
3198 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd,
3199 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3200{
3201 int rc;
3202 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
3203 if (pHCtl)
3204 {
3205 pHCtl->u.cmd.pvCmd = pbCmd;
3206 pHCtl->u.cmd.cbCmd = cbCmd;
3207 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
3208 if (RT_SUCCESS(rc))
3209 return VINF_SUCCESS;
3210
3211 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3212 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3213 }
3214 else
3215 {
3216 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3217 rc = VERR_NO_MEMORY;
3218 }
3219 return rc;
3220}
3221
3222/**
3223 * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL.
3224 */
3225static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType,
3226 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
3227{
3228 Assert(cbCtl >= sizeof(VBOXCMDVBVA_CTL)); /* Checked by callers caller, vbvaChannelHandler(). */
3229
3230 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
3231 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType,
3232 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)(pCtl + 1),
3233 cbCtl - sizeof(VBOXCMDVBVA_CTL),
3234 vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3235 if (RT_SUCCESS(rc))
3236 return VINF_SUCCESS;
3237
3238 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3239 pCtl->i32Result = rc;
3240 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3241 AssertRC(rc);
3242 return VINF_SUCCESS;
3243}
3244
3245/**
3246 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()}
3247 */
3248static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3249 int rc, void *pvCompletion)
3250{
3251 VBOXCRCMDCTL *pVboxCtl = (VBOXCRCMDCTL *)pCtl->u.cmd.pvCmd;
3252 if (pVboxCtl->u.pfnInternal)
3253 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
3254 VBoxVBVAExHCtlFree(pVbva, pCtl);
3255}
3256
3257/**
3258 * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync().
3259 */
3260static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3261 PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion)
3262{
3263 pCmd->u.pfnInternal = (PFNRT)pfnCompletion;
3264 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
3265 (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
3266 if (RT_FAILURE(rc))
3267 {
3268 if (rc == VERR_INVALID_STATE)
3269 {
3270 pCmd->u.pfnInternal = NULL;
3271 PVGASTATE pVGAState = pVdma->pVGAState;
3272 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
3273 if (!RT_SUCCESS(rc))
3274 WARN(("pfnCrHgsmiControlProcess failed %Rrc\n", rc));
3275
3276 return rc;
3277 }
3278 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3279 return rc;
3280 }
3281
3282 return VINF_SUCCESS;
3283}
3284
3285/**
3286 * Called from vdmaVBVACtlThreadCreatedEnable().
3287 */
3288static int vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3289{
3290 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3291 {
3292 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3293 if (!RT_SUCCESS(rc))
3294 {
3295 WARN(("pfnVBVAEnable failed %Rrc\n", rc));
3296 for (uint32_t j = 0; j < i; j++)
3297 {
3298 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3299 }
3300
3301 return rc;
3302 }
3303 }
3304 return VINF_SUCCESS;
3305}
3306
3307/**
3308 * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess().
3309 */
3310static int vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3311{
3312 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3313 pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i);
3314 return VINF_SUCCESS;
3315}
3316
3317/**
3318 * Hook that is called by vboxVDMAWorkerThread when it starts.
3319 *
3320 * @thread VDMA
3321 */
3322static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3323 void *pvThreadContext, void *pvContext)
3324{
3325 RT_NOREF(pThread);
3326 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3327 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3328
3329 if (RT_SUCCESS(rc))
3330 {
3331 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3332 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3333 if (rc == VINF_SUCCESS)
3334 {
3335 /* we need to inform Main about VBVA enable/disable
3336 * main expects notifications to be done from the main thread
3337 * submit it there */
3338 PVGASTATE pVGAState = pVdma->pVGAState;
3339
3340 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3341 vdmaVBVANotifyEnable(pVGAState);
3342 else
3343 vdmaVBVANotifyDisable(pVGAState);
3344 }
3345 else if (RT_FAILURE(rc))
3346 WARN(("vboxVDMACrGuestCtlProcess failed %Rrc\n", rc));
3347 }
3348 else
3349 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %Rrc\n", rc));
3350
3351 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3352}
3353
3354/**
3355 * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync().
3356 */
3357static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, bool fPaused,
3358 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3359{
3360 int rc;
3361 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva,
3362 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3363 if (pHCtl)
3364 {
3365 pHCtl->u.cmd.pvCmd = pEnable;
3366 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3367 pHCtl->pfnComplete = pfnComplete;
3368 pHCtl->pvComplete = pvComplete;
3369
3370 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3371 if (RT_SUCCESS(rc))
3372 return VINF_SUCCESS;
3373
3374 WARN(("VBoxVDMAThreadCreate failed %Rrc\n", rc));
3375 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3376 }
3377 else
3378 {
3379 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3380 rc = VERR_NO_MEMORY;
3381 }
3382
3383 return rc;
3384}
3385
3386/**
3387 * Worker for vboxVDMASaveLoadExecPerform().
3388 */
3389static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3390{
3391 VBVAENABLE Enable = {0};
3392 Enable.u32Flags = VBVA_F_ENABLE;
3393 Enable.u32Offset = offVram;
3394
3395 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3396 Data.rc = VERR_NOT_IMPLEMENTED;
3397 int rc = RTSemEventCreate(&Data.hEvent);
3398 if (!RT_SUCCESS(rc))
3399 {
3400 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3401 return rc;
3402 }
3403
3404 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3405 if (RT_SUCCESS(rc))
3406 {
3407 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3408 if (RT_SUCCESS(rc))
3409 {
3410 rc = Data.rc;
3411 if (!RT_SUCCESS(rc))
3412 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3413 }
3414 else
3415 WARN(("RTSemEventWait failed %Rrc\n", rc));
3416 }
3417 else
3418 WARN(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3419
3420 RTSemEventDestroy(Data.hEvent);
3421
3422 return rc;
3423}
3424
3425/**
3426 * Worker for vdmaVBVACtlEnableDisableSubmitInternal().
3427 */
3428static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
3429 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3430{
3431 int rc;
3432 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3433 {
3434 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3435 return VINF_SUCCESS;
3436 }
3437
3438 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3439 if (!pHCtl)
3440 {
3441 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3442 return VERR_NO_MEMORY;
3443 }
3444
3445 pHCtl->u.cmd.pvCmd = pEnable;
3446 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3447 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3448 if (RT_SUCCESS(rc))
3449 return VINF_SUCCESS;
3450
3451 WARN(("vdmaVBVACtlSubmit failed rc %Rrc\n", rc));
3452 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3453 return rc;
3454}
3455
3456/**
3457 * Worker for vdmaVBVACtlEnableDisableSubmit().
3458 */
3459static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
3460 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3461{
3462 bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE;
3463 if (fEnable)
3464 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3465 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3466}
3467
3468/**
3469 * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE.
3470 */
3471static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable)
3472{
3473 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3474 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3475 if (RT_SUCCESS(rc))
3476 return VINF_SUCCESS;
3477
3478 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %Rrc\n", rc));
3479 pEnable->Hdr.i32Result = rc;
3480 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3481 AssertRC(rc);
3482 return VINF_SUCCESS;
3483}
3484
3485/**
3486 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3487 * Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().}
3488 */
3489static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3490 int rc, void *pvContext)
3491{
3492 RT_NOREF(pVbva, pCtl);
3493 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext;
3494 pData->rc = rc;
3495 rc = RTSemEventSignal(pData->hEvent);
3496 if (!RT_SUCCESS(rc))
3497 WARN(("RTSemEventSignal failed %Rrc\n", rc));
3498}
3499
3500
3501/**
3502 *
3503 */
3504static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3505{
3506 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3507 Data.rc = VERR_NOT_IMPLEMENTED;
3508 Data.hEvent = NIL_RTSEMEVENT;
3509 int rc = RTSemEventCreate(&Data.hEvent);
3510 if (RT_SUCCESS(rc))
3511 {
3512 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3513 if (RT_SUCCESS(rc))
3514 {
3515 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3516 if (RT_SUCCESS(rc))
3517 {
3518 rc = Data.rc;
3519 if (!RT_SUCCESS(rc))
3520 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3521 }
3522 else
3523 WARN(("RTSemEventWait failed %Rrc\n", rc));
3524 }
3525 else
3526 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3527
3528 RTSemEventDestroy(Data.hEvent);
3529 }
3530 else
3531 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3532 return rc;
3533}
3534
3535/**
3536 * Worker for vboxVDMASaveStateExecPrep().
3537 */
3538static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3539{
3540 VBVAEXHOSTCTL Ctl;
3541 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3542 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3543}
3544
3545/**
3546 * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone().
3547 */
3548static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3549{
3550 VBVAEXHOSTCTL Ctl;
3551 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3552 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3553}
3554
3555/**
3556 * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh().
3557 */
3558static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3559{
3560 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3561 switch (rc)
3562 {
3563 case VINF_SUCCESS:
3564 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3565 case VINF_ALREADY_INITIALIZED:
3566 case VINF_EOF:
3567 case VERR_INVALID_STATE:
3568 return VINF_SUCCESS;
3569 default:
3570 Assert(!RT_FAILURE(rc));
3571 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3572 }
3573}
3574
3575
3576/**
3577 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit}
3578 */
3579int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3580 struct VBOXCRCMDCTL *pCmd,
3581 uint32_t cbCmd,
3582 PFNCRCTLCOMPLETION pfnCompletion,
3583 void *pvCompletion)
3584{
3585 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3586 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3587 if (pVdma == NULL)
3588 return VERR_INVALID_STATE;
3589 pCmd->CalloutList.List.pNext = NULL;
3590 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3591}
3592
3593/**
3594 * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb.
3595 */
3596typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3597{
3598 struct VBOXVDMAHOST *pVdma;
3599 uint32_t fProcessing;
3600 int rc;
3601} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3602
3603/**
3604 * @interface_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.}
3605 */
3606static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3607{
3608 RT_NOREF(pCmd, cbCmd);
3609 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion;
3610
3611 pData->rc = rc;
3612
3613 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3614
3615 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3616
3617 pData->fProcessing = 0;
3618
3619 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3620}
3621
3622/**
3623 * @callback_method_impl{FNVBOXCRCLIENT_CALLOUT, Worker for vboxVDMACrCtlHgsmiSetup }
3624 *
3625 * @note r=bird: not to be confused with the callout function below. sigh.
3626 */
3627static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd,
3628 VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3629{
3630 pEntry->pfnCb = pfnCb;
3631 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3632 if (RT_SUCCESS(rc))
3633 {
3634 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3635 RTCritSectLeave(&pVdma->CalloutCritSect);
3636
3637 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3638 }
3639 else
3640 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3641
3642 return rc;
3643}
3644
3645
3646/**
3647 * Worker for vboxCmdVBVACmdHostCtlSync.
3648 */
3649static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3650{
3651 int rc = VINF_SUCCESS;
3652 for (;;)
3653 {
3654 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3655 if (RT_SUCCESS(rc))
3656 {
3657 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3658 if (pEntry)
3659 RTListNodeRemove(&pEntry->Node);
3660 RTCritSectLeave(&pVdma->CalloutCritSect);
3661
3662 if (!pEntry)
3663 break;
3664
3665 pEntry->pfnCb(pEntry);
3666 }
3667 else
3668 {
3669 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3670 break;
3671 }
3672 }
3673
3674 return rc;
3675}
3676
3677/**
3678 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync}
3679 */
3680DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd)
3681{
3682 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3683 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3684 if (pVdma == NULL)
3685 return VERR_INVALID_STATE;
3686
3687 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3688 Data.pVdma = pVdma;
3689 Data.fProcessing = 1;
3690 Data.rc = VERR_INTERNAL_ERROR;
3691 RTListInit(&pCmd->CalloutList.List);
3692 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3693 if (!RT_SUCCESS(rc))
3694 {
3695 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %Rrc", rc));
3696 return rc;
3697 }
3698
3699 while (Data.fProcessing)
3700 {
3701 /* Poll infrequently to make sure no completed message has been missed. */
3702 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3703
3704 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3705
3706 if (Data.fProcessing)
3707 RTThreadYield();
3708 }
3709
3710 /* extra check callouts */
3711 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3712
3713 /* 'Our' message has been processed, so should reset the semaphore.
3714 * There is still possible that another message has been processed
3715 * and the semaphore has been signalled again.
3716 * Reset only if there are no other messages completed.
3717 */
3718 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3719 Assert(c >= 0);
3720 if (!c)
3721 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3722
3723 rc = Data.rc;
3724 if (!RT_SUCCESS(rc))
3725 WARN(("host call failed %Rrc", rc));
3726
3727 return rc;
3728}
3729
3730/**
3731 * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler().
3732 *
3733 * @returns VBox status code
3734 * @param pVGAState The VGA state.
3735 * @param pCtl The control command.
3736 * @param cbCtl The size of it. This is at least
3737 * sizeof(VBOXCMDVBVA_CTL).
3738 * @thread EMT
3739 */
3740int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
3741{
3742 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3743 uint32_t uType = pCtl->u32Type;
3744 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
3745
3746 if ( uType == VBOXCMDVBVACTL_TYPE_3DCTL
3747 || uType == VBOXCMDVBVACTL_TYPE_RESIZE
3748 || uType == VBOXCMDVBVACTL_TYPE_ENABLE)
3749 {
3750 RT_UNTRUSTED_VALIDATED_FENCE();
3751
3752 switch (uType)
3753 {
3754 case VBOXCMDVBVACTL_TYPE_3DCTL:
3755 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3756
3757 case VBOXCMDVBVACTL_TYPE_RESIZE:
3758 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3759
3760 case VBOXCMDVBVACTL_TYPE_ENABLE:
3761 ASSERT_GUEST_BREAK(cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE));
3762 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCtl);
3763
3764 default:
3765 AssertFailed();
3766 }
3767 }
3768
3769 pCtl->i32Result = VERR_INVALID_PARAMETER;
3770 int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3771 AssertRC(rc);
3772 return VINF_SUCCESS;
3773}
3774
3775/**
3776 * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler().
3777 *
3778 * @thread EMT
3779 */
3780int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3781{
3782 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3783 {
3784 WARN(("vdma VBVA is disabled\n"));
3785 return VERR_INVALID_STATE;
3786 }
3787
3788 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3789}
3790
3791/**
3792 * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler().
3793 *
3794 * @thread EMT
3795 */
3796int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3797{
3798 WARN(("flush\n"));
3799 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3800 {
3801 WARN(("vdma VBVA is disabled\n"));
3802 return VERR_INVALID_STATE;
3803 }
3804 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3805}
3806
3807/**
3808 * Called from vgaTimerRefresh().
3809 */
3810void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState)
3811{
3812 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3813 return;
3814 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3815}
3816
3817bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3818{
3819 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3820}
3821
3822#endif /* VBOX_WITH_CRHGSMI */
3823
3824
3825/*
3826 *
3827 *
3828 * Saved state.
3829 * Saved state.
3830 * Saved state.
3831 *
3832 *
3833 */
3834
3835int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3836{
3837#ifdef VBOX_WITH_CRHGSMI
3838 int rc = vdmaVBVAPause(pVdma);
3839 if (RT_SUCCESS(rc))
3840 return VINF_SUCCESS;
3841
3842 if (rc != VERR_INVALID_STATE)
3843 {
3844 WARN(("vdmaVBVAPause failed %Rrc\n", rc));
3845 return rc;
3846 }
3847
3848# ifdef DEBUG_misha
3849 WARN(("debug prep"));
3850# endif
3851
3852 PVGASTATE pVGAState = pVdma->pVGAState;
3853 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3854 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof(*pCmd));
3855 if (pCmd)
3856 {
3857 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3858 AssertRC(rc);
3859 if (RT_SUCCESS(rc))
3860 rc = vboxVDMACrCtlGetRc(pCmd);
3861 vboxVDMACrCtlRelease(pCmd);
3862 return rc;
3863 }
3864 return VERR_NO_MEMORY;
3865#else
3866 RT_NOREF(pVdma);
3867 return VINF_SUCCESS;
3868#endif
3869}
3870
3871int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3872{
3873#ifdef VBOX_WITH_CRHGSMI
3874 int rc = vdmaVBVAResume(pVdma);
3875 if (RT_SUCCESS(rc))
3876 return VINF_SUCCESS;
3877
3878 if (rc != VERR_INVALID_STATE)
3879 {
3880 WARN(("vdmaVBVAResume failed %Rrc\n", rc));
3881 return rc;
3882 }
3883
3884# ifdef DEBUG_misha
3885 WARN(("debug done"));
3886# endif
3887
3888 PVGASTATE pVGAState = pVdma->pVGAState;
3889 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3890 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof(*pCmd));
3891 Assert(pCmd);
3892 if (pCmd)
3893 {
3894 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3895 AssertRC(rc);
3896 if (RT_SUCCESS(rc))
3897 rc = vboxVDMACrCtlGetRc(pCmd);
3898 vboxVDMACrCtlRelease(pCmd);
3899 return rc;
3900 }
3901 return VERR_NO_MEMORY;
3902#else
3903 RT_NOREF(pVdma);
3904 return VINF_SUCCESS;
3905#endif
3906}
3907
3908int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3909{
3910 int rc;
3911#ifndef VBOX_WITH_CRHGSMI
3912 RT_NOREF(pVdma, pSSM);
3913
3914#else
3915 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3916#endif
3917 {
3918 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3919 AssertRCReturn(rc, rc);
3920 return VINF_SUCCESS;
3921 }
3922
3923#ifdef VBOX_WITH_CRHGSMI
3924 PVGASTATE pVGAState = pVdma->pVGAState;
3925 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3926
3927 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pVdma->CmdVbva.pVBVA - (uintptr_t)pu8VramBase));
3928 AssertRCReturn(rc, rc);
3929
3930 VBVAEXHOSTCTL HCtl;
3931 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3932 HCtl.u.state.pSSM = pSSM;
3933 HCtl.u.state.u32Version = 0;
3934 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3935#endif
3936}
3937
3938int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3939{
3940 uint32_t u32;
3941 int rc = SSMR3GetU32(pSSM, &u32);
3942 AssertLogRelRCReturn(rc, rc);
3943
3944 if (u32 != UINT32_MAX)
3945 {
3946#ifdef VBOX_WITH_CRHGSMI
3947 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3948 AssertLogRelRCReturn(rc, rc);
3949
3950 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3951
3952 VBVAEXHOSTCTL HCtl;
3953 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3954 HCtl.u.state.pSSM = pSSM;
3955 HCtl.u.state.u32Version = u32Version;
3956 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3957 AssertLogRelRCReturn(rc, rc);
3958
3959 rc = vdmaVBVAResume(pVdma);
3960 AssertLogRelRCReturn(rc, rc);
3961
3962 return VINF_SUCCESS;
3963#else
3964 RT_NOREF(pVdma, u32Version);
3965 WARN(("Unsupported VBVACtl info!\n"));
3966 return VERR_VERSION_MISMATCH;
3967#endif
3968 }
3969
3970 return VINF_SUCCESS;
3971}
3972
3973int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3974{
3975#ifdef VBOX_WITH_CRHGSMI
3976 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3977 return VINF_SUCCESS;
3978
3979/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3980 * the purpose of this code is. */
3981 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3982 if (!pHCtl)
3983 {
3984 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3985 return VERR_NO_MEMORY;
3986 }
3987
3988 /* sanity */
3989 pHCtl->u.cmd.pvCmd = NULL;
3990 pHCtl->u.cmd.cbCmd = 0;
3991
3992 /* NULL completion will just free the ctl up */
3993 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3994 if (RT_FAILURE(rc))
3995 {
3996 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3997 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3998 return rc;
3999 }
4000#else
4001 RT_NOREF(pVdma);
4002#endif
4003 return VINF_SUCCESS;
4004}
4005
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette