VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 69848

Last change on this file since 69848 was 69848, checked in by vboxsync, 7 years ago

Devices/Graphics: VDMA: ability to work when SharedOpenGL HGCM service is disabled (second try).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 117.2 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 69848 2017-11-27 18:08:01Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VGA
23#include <VBox/VMMDev.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pgm.h>
26#include <VBoxVideo.h>
27#include <iprt/semaphore.h>
28#include <iprt/thread.h>
29#include <iprt/mem.h>
30#include <iprt/asm.h>
31#include <iprt/list.h>
32#include <iprt/param.h>
33
34#include "DevVGA.h"
35#include "HGSMI/SHGSMIHost.h"
36
37#include <VBoxVideo3D.h>
38#include <VBoxVideoHost3D.h>
39
40#ifdef DEBUG_misha
41# define VBOXVDBG_MEMCACHE_DISABLE
42#endif
43
44#ifndef VBOXVDBG_MEMCACHE_DISABLE
45# include <iprt/memcache.h>
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#ifdef DEBUG_misha
53# define WARN_BP() do { AssertFailed(); } while (0)
54#else
55# define WARN_BP() do { } while (0)
56#endif
57#define WARN(_msg) do { \
58 LogRel(_msg); \
59 WARN_BP(); \
60 } while (0)
61
62#define VBOXVDMATHREAD_STATE_TERMINATED 0
63#define VBOXVDMATHREAD_STATE_CREATING 1
64#define VBOXVDMATHREAD_STATE_CREATED 3
65#define VBOXVDMATHREAD_STATE_TERMINATING 4
66
67
68/*********************************************************************************************************************************
69* Structures and Typedefs *
70*********************************************************************************************************************************/
71struct VBOXVDMATHREAD;
72
73typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
74
75#ifdef VBOX_WITH_CRHGSMI
76static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
77#endif
78
79
80typedef struct VBOXVDMATHREAD
81{
82 RTTHREAD hWorkerThread;
83 RTSEMEVENT hEvent;
84 volatile uint32_t u32State;
85 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
86 void *pvChanged;
87} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
88
89
90/* state transformations:
91 *
92 * submitter | processor
93 *
94 * LISTENING ---> PROCESSING
95 *
96 * */
97#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
98#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
99
100#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
101#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
102#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
103
104typedef struct VBVAEXHOSTCONTEXT
105{
106 VBVABUFFER *pVBVA;
107 volatile int32_t i32State;
108 volatile int32_t i32EnableState;
109 volatile uint32_t u32cCtls;
110 /* critical section for accessing ctl lists */
111 RTCRITSECT CltCritSect;
112 RTLISTANCHOR GuestCtlList;
113 RTLISTANCHOR HostCtlList;
114#ifndef VBOXVDBG_MEMCACHE_DISABLE
115 RTMEMCACHE CtlCache;
116#endif
117} VBVAEXHOSTCONTEXT;
118
119typedef enum
120{
121 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
122 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
123 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
124 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
125 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
126 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
127 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
128 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
129 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
130 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
131 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
132 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
133 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
134} VBVAEXHOSTCTL_TYPE;
135
136struct VBVAEXHOSTCTL;
137
138typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
139
140typedef struct VBVAEXHOSTCTL
141{
142 RTLISTNODE Node;
143 VBVAEXHOSTCTL_TYPE enmType;
144 union
145 {
146 struct
147 {
148 uint8_t * pu8Cmd;
149 uint32_t cbCmd;
150 } cmd;
151
152 struct
153 {
154 PSSMHANDLE pSSM;
155 uint32_t u32Version;
156 } state;
157 } u;
158 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
159 void *pvComplete;
160} VBVAEXHOSTCTL;
161
162/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
163 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
164 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
165 * see mor edetailed comments in headers for function definitions */
166typedef enum
167{
168 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
169 VBVAEXHOST_DATA_TYPE_CMD,
170 VBVAEXHOST_DATA_TYPE_HOSTCTL,
171 VBVAEXHOST_DATA_TYPE_GUESTCTL
172} VBVAEXHOST_DATA_TYPE;
173
174
175#ifdef VBOX_WITH_CRHGSMI
176typedef struct VBOXVDMA_SOURCE
177{
178 VBVAINFOSCREEN Screen;
179 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
180} VBOXVDMA_SOURCE;
181#endif
182
183typedef struct VBOXVDMAHOST
184{
185 PHGSMIINSTANCE pHgsmi;
186 PVGASTATE pVGAState;
187#ifdef VBOX_WITH_CRHGSMI
188 VBVAEXHOSTCONTEXT CmdVbva;
189 VBOXVDMATHREAD Thread;
190 VBOXCRCMD_SVRINFO CrSrvInfo;
191 VBVAEXHOSTCTL* pCurRemainingHostCtl;
192 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
193 int32_t volatile i32cHostCrCtlCompleted;
194 RTCRITSECT CalloutCritSect;
195// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
196#endif
197#ifdef VBOX_VDMA_WITH_WATCHDOG
198 PTMTIMERR3 WatchDogTimer;
199#endif
200} VBOXVDMAHOST, *PVBOXVDMAHOST;
201
202
203/*********************************************************************************************************************************
204* Internal Functions *
205*********************************************************************************************************************************/
206#ifdef VBOX_WITH_CRHGSMI
207static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
208static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
209
210static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
211static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
212
213/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
214 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
215static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
216
217static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
218static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
219static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
220static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
221static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
222static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
223
224#endif /* VBOX_WITH_CRHGSMI */
225
226
227
228#ifdef VBOX_WITH_CRHGSMI
229
230static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
231{
232# ifndef VBOXVDBG_MEMCACHE_DISABLE
233 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
234# else
235 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
236# endif
237}
238
239static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
240{
241# ifndef VBOXVDBG_MEMCACHE_DISABLE
242 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
243# else
244 RTMemFree(pCtl);
245# endif
246}
247
248static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
249{
250 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
251 if (!pCtl)
252 {
253 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
254 return NULL;
255 }
256
257 pCtl->enmType = enmType;
258 return pCtl;
259}
260
261static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
262{
263 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
264
265 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
266 return VINF_SUCCESS;
267 return VERR_SEM_BUSY;
268}
269
270static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
271{
272 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
273
274 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
275 return NULL;
276
277 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
278 if (RT_SUCCESS(rc))
279 {
280 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
281 if (pCtl)
282 *pfHostCtl = true;
283 else if (!fHostOnlyMode)
284 {
285 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
286 {
287 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
288 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
289 * and there are no HostCtl commands*/
290 Assert(pCtl);
291 *pfHostCtl = false;
292 }
293 }
294
295 if (pCtl)
296 {
297 RTListNodeRemove(&pCtl->Node);
298 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
299 }
300
301 RTCritSectLeave(&pCmdVbva->CltCritSect);
302
303 return pCtl;
304 }
305 else
306 WARN(("RTCritSectEnter failed %d\n", rc));
307
308 return NULL;
309}
310
311static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
312{
313 bool fHostCtl = false;
314 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
315 Assert(!pCtl || fHostCtl);
316 return pCtl;
317}
318
319static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
320{
321 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
322 {
323 WARN(("Invalid state\n"));
324 return VERR_INVALID_STATE;
325 }
326
327 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
328 return VINF_SUCCESS;
329}
330
331static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
332{
333 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
334 {
335 WARN(("Invalid state\n"));
336 return VERR_INVALID_STATE;
337 }
338
339 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
340 return VINF_SUCCESS;
341}
342
343static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
344{
345 switch (pCtl->enmType)
346 {
347 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
348 {
349 VBoxVBVAExHPPause(pCmdVbva);
350 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
351 return true;
352 }
353 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
354 {
355 VBoxVBVAExHPResume(pCmdVbva);
356 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
357 return true;
358 }
359 default:
360 return false;
361 }
362}
363
364static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
365{
366 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
367
368 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
369}
370
371static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
372{
373 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
374 if (pCmdVbva->pVBVA)
375 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
376}
377
378static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
379{
380 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
381 if (pCmdVbva->pVBVA)
382 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
383}
384
385static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
386{
387 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
388 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
389
390 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
391
392 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
393 uint32_t indexRecordFree = pVBVA->indexRecordFree;
394
395 Log(("first = %d, free = %d\n",
396 indexRecordFirst, indexRecordFree));
397
398 if (indexRecordFirst == indexRecordFree)
399 {
400 /* No records to process. Return without assigning output variables. */
401 return VINF_EOF;
402 }
403
404 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
405
406 /* A new record need to be processed. */
407 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
408 {
409 /* the record is being recorded, try again */
410 return VINF_TRY_AGAIN;
411 }
412
413 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
414
415 if (!cbRecord)
416 {
417 /* the record is being recorded, try again */
418 return VINF_TRY_AGAIN;
419 }
420
421 /* we should not get partial commands here actually */
422 Assert(cbRecord);
423
424 /* The size of largest contiguous chunk in the ring biffer. */
425 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
426
427 /* The pointer to data in the ring buffer. */
428 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
429
430 /* Fetch or point the data. */
431 if (u32BytesTillBoundary >= cbRecord)
432 {
433 /* The command does not cross buffer boundary. Return address in the buffer. */
434 *ppCmd = pSrc;
435 *pcbCmd = cbRecord;
436 return VINF_SUCCESS;
437 }
438
439 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
440 return VERR_INVALID_STATE;
441}
442
443static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
444{
445 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
446 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
447
448 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
449}
450
451static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
452{
453 if (pCtl->pfnComplete)
454 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
455 else
456 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
457}
458
459
460static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
461{
462 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
463 VBVAEXHOSTCTL*pCtl;
464 bool fHostClt;
465
466 for (;;)
467 {
468 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
469 if (pCtl)
470 {
471 if (fHostClt)
472 {
473 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
474 {
475 *ppCmd = (uint8_t*)pCtl;
476 *pcbCmd = sizeof (*pCtl);
477 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
478 }
479 continue;
480 }
481 else
482 {
483 *ppCmd = (uint8_t*)pCtl;
484 *pcbCmd = sizeof (*pCtl);
485 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
486 }
487 }
488
489 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
490 return VBVAEXHOST_DATA_TYPE_NO_DATA;
491
492 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
493 switch (rc)
494 {
495 case VINF_SUCCESS:
496 return VBVAEXHOST_DATA_TYPE_CMD;
497 case VINF_EOF:
498 return VBVAEXHOST_DATA_TYPE_NO_DATA;
499 case VINF_TRY_AGAIN:
500 RTThreadSleep(1);
501 continue;
502 default:
503 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
504 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
505 return VBVAEXHOST_DATA_TYPE_NO_DATA;
506 }
507 }
508 /* not reached */
509}
510
511static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
512{
513 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
514 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
515 {
516 vboxVBVAExHPHgEventClear(pCmdVbva);
517 vboxVBVAExHPProcessorRelease(pCmdVbva);
518 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
519 * 1. we check the queue -> and it is empty
520 * 2. submitter adds command to the queue
521 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
522 * 4. we clear the "processing" state
523 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
524 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
525 **/
526 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
527 if (RT_SUCCESS(rc))
528 {
529 /* we are the processor now */
530 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
531 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
532 {
533 vboxVBVAExHPProcessorRelease(pCmdVbva);
534 return VBVAEXHOST_DATA_TYPE_NO_DATA;
535 }
536
537 vboxVBVAExHPHgEventSet(pCmdVbva);
538 }
539 }
540
541 return enmType;
542}
543
544DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
545{
546 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
547
548 if (pVBVA)
549 {
550 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
551 uint32_t indexRecordFree = pVBVA->indexRecordFree;
552
553 if (indexRecordFirst != indexRecordFree)
554 return true;
555 }
556
557 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
558}
559
560/** Checks whether the new commands are ready for processing
561 * @returns
562 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
563 * VINF_EOF - no commands in a queue
564 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
565 * VERR_INVALID_STATE - the VBVA is paused or pausing */
566static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
567{
568 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
569 if (RT_SUCCESS(rc))
570 {
571 /* we are the processor now */
572 if (vboxVBVAExHSHasCommands(pCmdVbva))
573 {
574 vboxVBVAExHPHgEventSet(pCmdVbva);
575 return VINF_SUCCESS;
576 }
577
578 vboxVBVAExHPProcessorRelease(pCmdVbva);
579 return VINF_EOF;
580 }
581 if (rc == VERR_SEM_BUSY)
582 return VINF_ALREADY_INITIALIZED;
583 return VERR_INVALID_STATE;
584}
585
586static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
587{
588 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
589 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
590 if (RT_SUCCESS(rc))
591 {
592# ifndef VBOXVDBG_MEMCACHE_DISABLE
593 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
594 0, /* size_t cbAlignment */
595 UINT32_MAX, /* uint32_t cMaxObjects */
596 NULL, /* PFNMEMCACHECTOR pfnCtor*/
597 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
598 NULL, /* void *pvUser*/
599 0 /* uint32_t fFlags*/
600 );
601 if (RT_SUCCESS(rc))
602# endif
603 {
604 RTListInit(&pCmdVbva->GuestCtlList);
605 RTListInit(&pCmdVbva->HostCtlList);
606 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
607 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
608 return VINF_SUCCESS;
609 }
610# ifndef VBOXVDBG_MEMCACHE_DISABLE
611 else
612 WARN(("RTMemCacheCreate failed %d\n", rc));
613# endif
614 }
615 else
616 WARN(("RTCritSectInit failed %d\n", rc));
617
618 return rc;
619}
620
621DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
622{
623 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
624}
625
626DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
627{
628 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
629}
630
631static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
632{
633 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
634 {
635 WARN(("VBVAEx is enabled already\n"));
636 return VERR_INVALID_STATE;
637 }
638
639 pCmdVbva->pVBVA = pVBVA;
640 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
641 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
642 return VINF_SUCCESS;
643}
644
645static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
646{
647 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
648 return VINF_SUCCESS;
649
650 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
651 return VINF_SUCCESS;
652}
653
654static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
655{
656 /* ensure the processor is stopped */
657 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
658
659 /* ensure no one tries to submit the command */
660 if (pCmdVbva->pVBVA)
661 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
662
663 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
664 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
665
666 RTCritSectDelete(&pCmdVbva->CltCritSect);
667
668# ifndef VBOXVDBG_MEMCACHE_DISABLE
669 RTMemCacheDestroy(pCmdVbva->CtlCache);
670# endif
671
672 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
673}
674
675static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
676{
677 RT_NOREF(pCmdVbva);
678 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
679 AssertRCReturn(rc, rc);
680 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
681 AssertRCReturn(rc, rc);
682 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
683 AssertRCReturn(rc, rc);
684
685 return VINF_SUCCESS;
686}
687
688static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
689{
690 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
691 {
692 WARN(("vbva not paused\n"));
693 return VERR_INVALID_STATE;
694 }
695
696 int rc;
697 VBVAEXHOSTCTL* pCtl;
698 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
699 {
700 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
701 AssertRCReturn(rc, rc);
702 }
703
704 rc = SSMR3PutU32(pSSM, 0);
705 AssertRCReturn(rc, rc);
706
707 return VINF_SUCCESS;
708}
709
710
711/** Saves state
712 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
713 */
714static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
715{
716 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
717 if (RT_FAILURE(rc))
718 {
719 WARN(("RTCritSectEnter failed %d\n", rc));
720 return rc;
721 }
722
723 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
724 if (RT_FAILURE(rc))
725 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
726
727 RTCritSectLeave(&pCmdVbva->CltCritSect);
728
729 return rc;
730}
731
732static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
733{
734 RT_NOREF(u32Version);
735 uint32_t u32;
736 int rc = SSMR3GetU32(pSSM, &u32);
737 AssertLogRelRCReturn(rc, rc);
738
739 if (!u32)
740 return VINF_EOF;
741
742 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
743 if (!pHCtl)
744 {
745 WARN(("VBoxVBVAExHCtlCreate failed\n"));
746 return VERR_NO_MEMORY;
747 }
748
749 rc = SSMR3GetU32(pSSM, &u32);
750 AssertLogRelRCReturn(rc, rc);
751 pHCtl->u.cmd.cbCmd = u32;
752
753 rc = SSMR3GetU32(pSSM, &u32);
754 AssertLogRelRCReturn(rc, rc);
755 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
756
757 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
758 ++pCmdVbva->u32cCtls;
759
760 return VINF_SUCCESS;
761}
762
763
764static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
765{
766 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
767 {
768 WARN(("vbva not stopped\n"));
769 return VERR_INVALID_STATE;
770 }
771
772 int rc;
773
774 do {
775 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
776 AssertLogRelRCReturn(rc, rc);
777 } while (VINF_EOF != rc);
778
779 return VINF_SUCCESS;
780}
781
782/** Loads state
783 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
784 */
785static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
786{
787 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
788 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
789 if (RT_FAILURE(rc))
790 {
791 WARN(("RTCritSectEnter failed %d\n", rc));
792 return rc;
793 }
794
795 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
796 if (RT_FAILURE(rc))
797 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
798
799 RTCritSectLeave(&pCmdVbva->CltCritSect);
800
801 return rc;
802}
803
804typedef enum
805{
806 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
807 VBVAEXHOSTCTL_SOURCE_HOST
808} VBVAEXHOSTCTL_SOURCE;
809
810
811static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
812{
813 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
814 {
815 Log(("cmd vbva not enabled\n"));
816 return VERR_INVALID_STATE;
817 }
818
819 pCtl->pfnComplete = pfnComplete;
820 pCtl->pvComplete = pvComplete;
821
822 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
823 if (RT_SUCCESS(rc))
824 {
825 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
826 {
827 Log(("cmd vbva not enabled\n"));
828 RTCritSectLeave(&pCmdVbva->CltCritSect);
829 return VERR_INVALID_STATE;
830 }
831
832 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
833 {
834 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
835 }
836 else
837 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
838
839 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
840
841 RTCritSectLeave(&pCmdVbva->CltCritSect);
842
843 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
844 }
845 else
846 WARN(("RTCritSectEnter failed %d\n", rc));
847
848 return rc;
849}
850
851void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
852{
853 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
854 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
855 void *pvChanged = pThread->pvChanged;
856
857 pThread->pfnChanged = NULL;
858 pThread->pvChanged = NULL;
859
860 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
861
862 if (pfnChanged)
863 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
864}
865
866void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
867{
868 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
869 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
870 void *pvChanged = pThread->pvChanged;
871
872 pThread->pfnChanged = NULL;
873 pThread->pvChanged = NULL;
874
875 if (pfnChanged)
876 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
877}
878
879DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
880{
881 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
882}
883
884void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
885{
886 memset(pThread, 0, sizeof (*pThread));
887 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
888}
889
890int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
891{
892 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
893 switch (u32State)
894 {
895 case VBOXVDMATHREAD_STATE_TERMINATED:
896 return VINF_SUCCESS;
897 case VBOXVDMATHREAD_STATE_TERMINATING:
898 {
899 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
900 if (!RT_SUCCESS(rc))
901 {
902 WARN(("RTThreadWait failed %d\n", rc));
903 return rc;
904 }
905
906 RTSemEventDestroy(pThread->hEvent);
907
908 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
909 return VINF_SUCCESS;
910 }
911 default:
912 WARN(("invalid state"));
913 return VERR_INVALID_STATE;
914 }
915}
916
917int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
918{
919 int rc = VBoxVDMAThreadCleanup(pThread);
920 if (RT_FAILURE(rc))
921 {
922 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
923 return rc;
924 }
925
926 rc = RTSemEventCreate(&pThread->hEvent);
927 if (RT_SUCCESS(rc))
928 {
929 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
930 pThread->pfnChanged = pfnCreated;
931 pThread->pvChanged = pvCreated;
932 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
933 if (RT_SUCCESS(rc))
934 return VINF_SUCCESS;
935 else
936 WARN(("RTThreadCreate failed %d\n", rc));
937
938 RTSemEventDestroy(pThread->hEvent);
939 }
940 else
941 WARN(("RTSemEventCreate failed %d\n", rc));
942
943 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
944
945 return rc;
946}
947
948DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
949{
950 int rc = RTSemEventSignal(pThread->hEvent);
951 AssertRC(rc);
952 return rc;
953}
954
955DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
956{
957 int rc = RTSemEventWait(pThread->hEvent, cMillies);
958 AssertRC(rc);
959 return rc;
960}
961
962int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
963{
964 int rc;
965 do
966 {
967 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
968 switch (u32State)
969 {
970 case VBOXVDMATHREAD_STATE_CREATED:
971 pThread->pfnChanged = pfnTerminated;
972 pThread->pvChanged = pvTerminated;
973 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
974 if (fNotify)
975 {
976 rc = VBoxVDMAThreadEventNotify(pThread);
977 AssertRC(rc);
978 }
979 return VINF_SUCCESS;
980 case VBOXVDMATHREAD_STATE_TERMINATING:
981 case VBOXVDMATHREAD_STATE_TERMINATED:
982 {
983 WARN(("thread is marked to termination or terminated\nn"));
984 return VERR_INVALID_STATE;
985 }
986 case VBOXVDMATHREAD_STATE_CREATING:
987 {
988 /* wait till the thread creation is completed */
989 WARN(("concurrent thread create/destron\n"));
990 RTThreadYield();
991 continue;
992 }
993 default:
994 WARN(("invalid state"));
995 return VERR_INVALID_STATE;
996 }
997 } while (1);
998
999 WARN(("should never be here\n"));
1000 return VERR_INTERNAL_ERROR;
1001}
1002
1003static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
1004
1005typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1006typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1007
1008typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1009{
1010 uint32_t cRefs;
1011 int32_t rc;
1012 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1013 void *pvCompletion;
1014 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1015} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1016
1017# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
1018
1019static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1020{
1021 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1022 Assert(pHdr);
1023 if (pHdr)
1024 {
1025 pHdr->cRefs = 1;
1026 pHdr->rc = VERR_NOT_IMPLEMENTED;
1027 pHdr->Cmd.enmType = enmCmd;
1028 pHdr->Cmd.cbCmd = cbCmd;
1029 return &pHdr->Cmd;
1030 }
1031
1032 return NULL;
1033}
1034
1035DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1036{
1037 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1038 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1039 if (!cRefs)
1040 RTMemFree(pHdr);
1041}
1042
1043#if 0 /* unused */
1044DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1045{
1046 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1047 ASMAtomicIncU32(&pHdr->cRefs);
1048}
1049#endif /* unused */
1050
1051DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1052{
1053 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1054 return pHdr->rc;
1055}
1056
1057static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1058{
1059 RT_NOREF(pVGAState, pCmd);
1060 RTSemEventSignal((RTSEMEVENT)pvContext);
1061}
1062
1063# if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
1064static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1065{
1066 RT_NOREF(pVGAState, pvContext);
1067 vboxVDMACrCtlRelease(pCmd);
1068}
1069# endif
1070
1071static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1072{
1073 if ( pVGAState->pDrv
1074 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1075 {
1076 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1077 pHdr->pfnCompletion = pfnCompletion;
1078 pHdr->pvCompletion = pvCompletion;
1079 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1080 return VINF_SUCCESS;
1081 }
1082# ifdef DEBUG_misha
1083 Assert(0);
1084# endif
1085 return VERR_NOT_SUPPORTED;
1086}
1087
1088static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1089{
1090 RTSEMEVENT hComplEvent;
1091 int rc = RTSemEventCreate(&hComplEvent);
1092 AssertRC(rc);
1093 if (RT_SUCCESS(rc))
1094 {
1095 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1096# ifdef DEBUG_misha
1097 AssertRC(rc);
1098# endif
1099 if (RT_SUCCESS(rc))
1100 {
1101 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1102 AssertRC(rc);
1103 if (RT_SUCCESS(rc))
1104 {
1105 RTSemEventDestroy(hComplEvent);
1106 }
1107 }
1108 else
1109 {
1110 /* the command is completed */
1111 RTSemEventDestroy(hComplEvent);
1112 }
1113 }
1114 return rc;
1115}
1116
1117typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1118{
1119 int rc;
1120 RTSEMEVENT hEvent;
1121} VDMA_VBVA_CTL_CYNC_COMPLETION;
1122
1123static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1124{
1125 RT_NOREF(pCmd, cbCmd);
1126 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1127 pData->rc = rc;
1128 rc = RTSemEventSignal(pData->hEvent);
1129 if (!RT_SUCCESS(rc))
1130 WARN(("RTSemEventSignal failed %d\n", rc));
1131}
1132
1133static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1134{
1135 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1136 Data.rc = VERR_NOT_IMPLEMENTED;
1137 int rc = RTSemEventCreate(&Data.hEvent);
1138 if (!RT_SUCCESS(rc))
1139 {
1140 WARN(("RTSemEventCreate failed %d\n", rc));
1141 return rc;
1142 }
1143
1144 pCtl->CalloutList.List.pNext = NULL;
1145
1146 PVGASTATE pVGAState = pVdma->pVGAState;
1147 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1148 if (RT_SUCCESS(rc))
1149 {
1150 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1151 if (RT_SUCCESS(rc))
1152 {
1153 rc = Data.rc;
1154 if (!RT_SUCCESS(rc))
1155 {
1156 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1157 }
1158
1159 }
1160 else
1161 WARN(("RTSemEventWait failed %d\n", rc));
1162 }
1163 else
1164 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1165
1166
1167 RTSemEventDestroy(Data.hEvent);
1168
1169 return rc;
1170}
1171
1172static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1173{
1174 VBVAEXHOSTCTL HCtl;
1175 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1176 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1177 if (RT_FAILURE(rc))
1178 {
1179 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1180 return rc;
1181 }
1182
1183 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1184
1185 return VINF_SUCCESS;
1186}
1187
1188static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1189{
1190 struct VBOXVDMAHOST *pVdma = hClient;
1191 if (!pVdma->pCurRemainingHostCtl)
1192 {
1193 /* disable VBVA, all subsequent host commands will go HGCM way */
1194 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1195 }
1196 else
1197 {
1198 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1199 }
1200
1201 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1202 if (pVdma->pCurRemainingHostCtl)
1203 {
1204 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1205 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1206 }
1207
1208 *pcbCtl = 0;
1209 return NULL;
1210}
1211
1212static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1213{
1214# ifdef VBOX_STRICT
1215 struct VBOXVDMAHOST *pVdma = hClient;
1216 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1217 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1218# else
1219 RT_NOREF(hClient);
1220# endif
1221}
1222
1223static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1224{
1225 struct VBOXVDMAHOST *pVdma = hClient;
1226 VBVAEXHOSTCTL HCtl;
1227 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1228 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1229
1230 pHgcmEnableData->hRHCmd = pVdma;
1231 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1232
1233 if (RT_FAILURE(rc))
1234 {
1235 if (rc == VERR_INVALID_STATE)
1236 rc = VINF_SUCCESS;
1237 else
1238 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1239 }
1240
1241 return rc;
1242}
1243
1244static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1245{
1246 VBOXCRCMDCTL_ENABLE Enable;
1247 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1248 Enable.Data.hRHCmd = pVdma;
1249 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1250
1251 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1252 Assert(!pVdma->pCurRemainingHostCtl);
1253 if (RT_SUCCESS(rc))
1254 {
1255 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1256 return VINF_SUCCESS;
1257 }
1258
1259 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1260 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1261
1262 return rc;
1263}
1264
1265static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1266{
1267 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1268 {
1269 WARN(("vdma VBVA is already enabled\n"));
1270 return VERR_INVALID_STATE;
1271 }
1272
1273 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1274 if (!pVBVA)
1275 {
1276 WARN(("invalid offset %d\n", u32Offset));
1277 return VERR_INVALID_PARAMETER;
1278 }
1279
1280 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1281 if (RT_SUCCESS(rc))
1282 {
1283 if (!pVdma->CrSrvInfo.pfnEnable)
1284 {
1285 /* "HGCM-less" mode. All inited. */
1286 return VINF_SUCCESS;
1287 }
1288
1289 VBOXCRCMDCTL_DISABLE Disable;
1290 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1291 Disable.Data.hNotifyTerm = pVdma;
1292 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1293 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1294 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1295 if (RT_SUCCESS(rc))
1296 {
1297 PVGASTATE pVGAState = pVdma->pVGAState;
1298 VBOXCRCMD_SVRENABLE_INFO Info;
1299 Info.hCltScr = pVGAState->pDrv;
1300 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1301 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1302 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1303 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1304 if (RT_SUCCESS(rc))
1305 return VINF_SUCCESS;
1306 else
1307 WARN(("pfnEnable failed %d\n", rc));
1308
1309 vboxVDMACrHgcmHandleEnable(pVdma);
1310 }
1311 else
1312 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1313
1314 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1315 }
1316 else
1317 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1318
1319 return rc;
1320}
1321
1322static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1323{
1324 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1325 {
1326 Log(("vdma VBVA is already disabled\n"));
1327 return VINF_SUCCESS;
1328 }
1329
1330 if (!pVdma->CrSrvInfo.pfnDisable)
1331 {
1332 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
1333 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1334 return VINF_SUCCESS;
1335 }
1336
1337 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1338 if (RT_SUCCESS(rc))
1339 {
1340 if (fDoHgcmEnable)
1341 {
1342 PVGASTATE pVGAState = pVdma->pVGAState;
1343
1344 /* disable is a bit tricky
1345 * we need to ensure the host ctl commands do not come out of order
1346 * and do not come over HGCM channel until after it is enabled */
1347 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1348 if (RT_SUCCESS(rc))
1349 {
1350 vdmaVBVANotifyDisable(pVGAState);
1351 return VINF_SUCCESS;
1352 }
1353
1354 VBOXCRCMD_SVRENABLE_INFO Info;
1355 Info.hCltScr = pVGAState->pDrv;
1356 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1357 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1358 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1359 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1360 }
1361 }
1362 else
1363 WARN(("pfnDisable failed %d\n", rc));
1364
1365 return rc;
1366}
1367
1368static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1369{
1370 *pfContinue = true;
1371
1372 switch (pCmd->enmType)
1373 {
1374 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1375 {
1376 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1377 {
1378 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1379 return VERR_INVALID_STATE;
1380 }
1381 if (!pVdma->CrSrvInfo.pfnHostCtl)
1382 {
1383 /* Should not be. */
1384 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
1385 return VERR_INVALID_STATE;
1386 }
1387 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1388 }
1389 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1390 {
1391 int rc = vdmaVBVADisableProcess(pVdma, true);
1392 if (RT_FAILURE(rc))
1393 {
1394 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1395 return rc;
1396 }
1397
1398 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1399 }
1400 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1401 {
1402 int rc = vdmaVBVADisableProcess(pVdma, false);
1403 if (RT_FAILURE(rc))
1404 {
1405 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1406 return rc;
1407 }
1408
1409 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1410 if (RT_FAILURE(rc))
1411 {
1412 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1413 return rc;
1414 }
1415
1416 *pfContinue = false;
1417 return VINF_SUCCESS;
1418 }
1419 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1420 {
1421 PVGASTATE pVGAState = pVdma->pVGAState;
1422 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1423 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1424 if (RT_FAILURE(rc))
1425 {
1426 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1427 return rc;
1428 }
1429 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1430
1431 if (!pVdma->CrSrvInfo.pfnSaveState)
1432 {
1433 /* Done. */
1434 return VINF_SUCCESS;
1435 }
1436
1437 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1438 }
1439 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1440 {
1441 PVGASTATE pVGAState = pVdma->pVGAState;
1442 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1443
1444 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1445 if (RT_FAILURE(rc))
1446 {
1447 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1448 return rc;
1449 }
1450
1451 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1452 if (!pVdma->CrSrvInfo.pfnLoadState)
1453 {
1454 /* Done. */
1455 return VINF_SUCCESS;
1456 }
1457
1458 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1459 if (RT_FAILURE(rc))
1460 {
1461 WARN(("pfnLoadState failed %d\n", rc));
1462 return rc;
1463 }
1464
1465 return VINF_SUCCESS;
1466 }
1467 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1468 {
1469 PVGASTATE pVGAState = pVdma->pVGAState;
1470
1471 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1472 {
1473 VBVAINFOSCREEN CurScreen;
1474 VBVAINFOVIEW CurView;
1475
1476 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1477 if (RT_FAILURE(rc))
1478 {
1479 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1480 return rc;
1481 }
1482
1483 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1484 if (RT_FAILURE(rc))
1485 {
1486 WARN(("VBVAInfoScreen failed %d\n", rc));
1487 return rc;
1488 }
1489 }
1490
1491 return VINF_SUCCESS;
1492 }
1493 default:
1494 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1495 return VERR_INVALID_PARAMETER;
1496 }
1497}
1498
1499static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1500{
1501 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1502 const uint16_t u16Flags = pScreen->u16Flags;
1503
1504 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1505 {
1506 if ( u32ViewIndex < pVGAState->cMonitors
1507 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1508 {
1509 RT_ZERO(*pScreen);
1510 pScreen->u32ViewIndex = u32ViewIndex;
1511 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1512 return VINF_SUCCESS;
1513 }
1514 }
1515 else
1516 {
1517 if (u16Flags & VBVA_SCREEN_F_BLANK2)
1518 {
1519 if ( u32ViewIndex >= pVGAState->cMonitors
1520 && u32ViewIndex != UINT32_C(0xFFFFFFFF))
1521 {
1522 return VERR_INVALID_PARAMETER;
1523 }
1524
1525 /* Special case for blanking using current video mode.
1526 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1527 */
1528 RT_ZERO(*pScreen);
1529 pScreen->u32ViewIndex = u32ViewIndex;
1530 pScreen->u16Flags = u16Flags;
1531 return VINF_SUCCESS;
1532 }
1533
1534 if ( u32ViewIndex < pVGAState->cMonitors
1535 && pScreen->u16BitsPerPixel <= 32
1536 && pScreen->u32Width <= UINT16_MAX
1537 && pScreen->u32Height <= UINT16_MAX
1538 && pScreen->u32LineSize <= UINT16_MAX * 4)
1539 {
1540 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1541 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1542 {
1543 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1544 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1545 && u64ScreenSize <= pVGAState->vram_size
1546 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1547 {
1548 return VINF_SUCCESS;
1549 }
1550 }
1551 }
1552 }
1553
1554 return VERR_INVALID_PARAMETER;
1555}
1556
1557static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1558{
1559 PVGASTATE pVGAState = pVdma->pVGAState;
1560 VBVAINFOSCREEN Screen = pEntry->Screen;
1561
1562 /* Verify and cleanup local copy of the input data. */
1563 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1564 if (RT_FAILURE(rc))
1565 {
1566 WARN(("invalid screen data\n"));
1567 return rc;
1568 }
1569
1570 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1571 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1572 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1573
1574 if (pVdma->CrSrvInfo.pfnResize)
1575 {
1576 /* Also inform the HGCM service, if it is there. */
1577 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1578 if (RT_FAILURE(rc))
1579 {
1580 WARN(("pfnResize failed %d\n", rc));
1581 return rc;
1582 }
1583 }
1584
1585 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1586 VBVAINFOVIEW View;
1587 View.u32ViewOffset = 0;
1588 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1589 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1590
1591 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1592
1593 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1594 i >= 0;
1595 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1596 {
1597 Screen.u32ViewIndex = i;
1598
1599 VBVAINFOSCREEN CurScreen;
1600 VBVAINFOVIEW CurView;
1601
1602 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1603 AssertRC(rc);
1604
1605 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1606 continue;
1607
1608 /* The view does not change if _BLANK2 is set. */
1609 if ( (!fDisable || !CurView.u32ViewSize)
1610 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1611 {
1612 View.u32ViewIndex = Screen.u32ViewIndex;
1613
1614 rc = VBVAInfoView(pVGAState, &View);
1615 if (RT_FAILURE(rc))
1616 {
1617 WARN(("VBVAInfoView failed %d\n", rc));
1618 break;
1619 }
1620 }
1621
1622 rc = VBVAInfoScreen(pVGAState, &Screen);
1623 if (RT_FAILURE(rc))
1624 {
1625 WARN(("VBVAInfoScreen failed %d\n", rc));
1626 break;
1627 }
1628 }
1629
1630 return rc;
1631}
1632
1633static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1634{
1635 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1636 switch (enmType)
1637 {
1638 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1639 {
1640 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1641 {
1642 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1643 return VERR_INVALID_STATE;
1644 }
1645 if (!pVdma->CrSrvInfo.pfnGuestCtl)
1646 {
1647 /* Unexpected. */
1648 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE in HGCM-less mode\n"));
1649 return VERR_INVALID_STATE;
1650 }
1651 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1652 }
1653 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1654 {
1655 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1656 {
1657 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1658 return VERR_INVALID_STATE;
1659 }
1660
1661 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1662
1663 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1664 {
1665 WARN(("invalid buffer size\n"));
1666 return VERR_INVALID_PARAMETER;
1667 }
1668
1669 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1670 if (!cElements)
1671 {
1672 WARN(("invalid buffer size\n"));
1673 return VERR_INVALID_PARAMETER;
1674 }
1675
1676 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1677
1678 int rc = VINF_SUCCESS;
1679
1680 for (uint32_t i = 0; i < cElements; ++i)
1681 {
1682 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1683 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1684 if (RT_FAILURE(rc))
1685 {
1686 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1687 break;
1688 }
1689 }
1690 return rc;
1691 }
1692 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1693 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1694 {
1695 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1696 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1697 uint32_t u32Offset = pEnable->u32Offset;
1698 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1699 if (!RT_SUCCESS(rc))
1700 {
1701 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1702 return rc;
1703 }
1704
1705 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1706 {
1707 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1708 if (!RT_SUCCESS(rc))
1709 {
1710 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1711 return rc;
1712 }
1713 }
1714
1715 return VINF_SUCCESS;
1716 }
1717 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1718 {
1719 int rc = vdmaVBVADisableProcess(pVdma, true);
1720 if (RT_FAILURE(rc))
1721 {
1722 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1723 return rc;
1724 }
1725
1726 /* do vgaUpdateDisplayAll right away */
1727 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1728 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1729
1730 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1731 }
1732 default:
1733 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1734 return VERR_INVALID_PARAMETER;
1735 }
1736}
1737
1738/**
1739 * @param fIn - whether this is a page in or out op.
1740 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1741 */
1742static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1743{
1744 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1745 PGMPAGEMAPLOCK Lock;
1746 int rc;
1747
1748 if (fIn)
1749 {
1750 const void * pvPage;
1751 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1752 if (!RT_SUCCESS(rc))
1753 {
1754 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1755 return rc;
1756 }
1757
1758 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1759
1760 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1761 }
1762 else
1763 {
1764 void * pvPage;
1765 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1766 if (!RT_SUCCESS(rc))
1767 {
1768 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1769 return rc;
1770 }
1771
1772 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1773
1774 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1775 }
1776
1777 return VINF_SUCCESS;
1778}
1779
1780static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1781{
1782 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1783 {
1784 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1785 if (!RT_SUCCESS(rc))
1786 {
1787 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1788 return rc;
1789 }
1790 }
1791
1792 return VINF_SUCCESS;
1793}
1794
1795static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1796 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1797 uint8_t **ppu8Vram, bool *pfIn)
1798{
1799 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1800 {
1801 WARN(("cmd too small"));
1802 return -1;
1803 }
1804
1805 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1806 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1807 {
1808 WARN(("invalid cmd size"));
1809 return -1;
1810 }
1811 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1812
1813 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1814 if (offVRAM & PAGE_OFFSET_MASK)
1815 {
1816 WARN(("offVRAM address is not on page boundary\n"));
1817 return -1;
1818 }
1819 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1820
1821 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1822 if (offVRAM >= pVGAState->vram_size)
1823 {
1824 WARN(("invalid vram offset"));
1825 return -1;
1826 }
1827
1828 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1829 {
1830 WARN(("invalid cPages %d", cPages));
1831 return -1;
1832 }
1833
1834 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1835 {
1836 WARN(("invalid cPages %d, exceeding vram size", cPages));
1837 return -1;
1838 }
1839
1840 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1841 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1842
1843 *ppPages = pPages;
1844 *pcPages = cPages;
1845 *ppu8Vram = pu8Vram;
1846 *pfIn = fIn;
1847 return 0;
1848}
1849
1850static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1851{
1852 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1853 if (offVRAM & PAGE_OFFSET_MASK)
1854 {
1855 WARN(("offVRAM address is not on page boundary\n"));
1856 return -1;
1857 }
1858
1859 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1860 if (offVRAM >= pVGAState->vram_size)
1861 {
1862 WARN(("invalid vram offset"));
1863 return -1;
1864 }
1865
1866 uint32_t cbFill = pFill->u32CbFill;
1867
1868 if (offVRAM + cbFill >= pVGAState->vram_size)
1869 {
1870 WARN(("invalid cPages"));
1871 return -1;
1872 }
1873
1874 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1875 uint32_t u32Color = pFill->u32Pattern;
1876
1877 Assert(!(cbFill % 4));
1878 for (uint32_t i = 0; i < cbFill / 4; ++i)
1879 {
1880 pu32Vram[i] = u32Color;
1881 }
1882
1883 return 0;
1884}
1885
1886static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1887{
1888 switch (pCmd->u8OpCode)
1889 {
1890 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1891 return 0;
1892 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1893 {
1894 PVGASTATE pVGAState = pVdma->pVGAState;
1895 const VBOXCMDVBVAPAGEIDX *pPages;
1896 uint32_t cPages;
1897 uint8_t *pu8Vram;
1898 bool fIn;
1899 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1900 &pPages, &cPages,
1901 &pu8Vram, &fIn);
1902 if (i8Result < 0)
1903 {
1904 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1905 return i8Result;
1906 }
1907
1908 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1909 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1910 if (!RT_SUCCESS(rc))
1911 {
1912 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1913 return -1;
1914 }
1915
1916 return 0;
1917 }
1918 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1919 {
1920 PVGASTATE pVGAState = pVdma->pVGAState;
1921 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1922 {
1923 WARN(("cmd too small"));
1924 return -1;
1925 }
1926
1927 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1928 }
1929 default:
1930 if (!pVdma->CrSrvInfo.pfnCmd)
1931 {
1932 /* Unexpected. */
1933 WARN(("no HGCM"));
1934 return -1;
1935 }
1936 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1937 }
1938}
1939
1940# if 0
1941typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1942{
1943 VBOXCMDVBVA_HDR Hdr;
1944 /* for now can only contain offVRAM.
1945 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1946 VBOXCMDVBVA_ALLOCINFO Alloc;
1947 uint32_t u32Reserved;
1948 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1949} VBOXCMDVBVA_PAGING_TRANSFER;
1950# endif
1951
1952AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1953AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1954AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1955AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1956
1957# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1958
1959static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1960{
1961 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, opCode(%i)\n", pCmd->u8OpCode));
1962 int8_t i8Result = 0;
1963
1964 switch (pCmd->u8OpCode)
1965 {
1966 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1967 {
1968 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1969 {
1970 WARN(("invalid command size"));
1971 return -1;
1972 }
1973 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1974 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1975 uint32_t cbRealCmd = pCmd->u8Flags;
1976 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1977 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1978 {
1979 WARN(("invalid sysmem cmd size"));
1980 return -1;
1981 }
1982
1983 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1984
1985 PGMPAGEMAPLOCK Lock;
1986 PVGASTATE pVGAState = pVdma->pVGAState;
1987 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1988 const void * pvCmd;
1989 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1990 if (!RT_SUCCESS(rc))
1991 {
1992 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1993 return -1;
1994 }
1995
1996 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1997
1998 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1999
2000 if (cbRealCmd <= cbCmdPart)
2001 {
2002 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
2003 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
2004 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2005 return i8Result;
2006 }
2007
2008 VBOXCMDVBVA_HDR Hdr;
2009 const void *pvCurCmdTail;
2010 uint32_t cbCurCmdTail;
2011 if (cbCmdPart >= sizeof (*pRealCmdHdr))
2012 {
2013 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
2014 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
2015 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
2016 }
2017 else
2018 {
2019 memcpy(&Hdr, pvCmd, cbCmdPart);
2020 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2021 phCmd += cbCmdPart;
2022 Assert(!(phCmd & PAGE_OFFSET_MASK));
2023 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2024 if (!RT_SUCCESS(rc))
2025 {
2026 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2027 return -1;
2028 }
2029
2030 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
2031 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
2032 pRealCmdHdr = &Hdr;
2033 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
2034 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
2035 }
2036
2037 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
2038 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
2039
2040 switch (pRealCmdHdr->u8OpCode)
2041 {
2042 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
2043 {
2044 const uint32_t *pPages;
2045 uint32_t cPages;
2046 uint8_t *pu8Vram;
2047 bool fIn;
2048 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
2049 &pPages, &cPages,
2050 &pu8Vram, &fIn);
2051 if (i8Result < 0)
2052 {
2053 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
2054 /* we need to break, not return, to ensure currently locked page is released */
2055 break;
2056 }
2057
2058 if (cbCurCmdTail & 3)
2059 {
2060 WARN(("command is not alligned properly %d", cbCurCmdTail));
2061 i8Result = -1;
2062 /* we need to break, not return, to ensure currently locked page is released */
2063 break;
2064 }
2065
2066 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
2067 Assert(cCurPages < cPages);
2068
2069 do
2070 {
2071 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
2072 if (!RT_SUCCESS(rc))
2073 {
2074 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
2075 i8Result = -1;
2076 /* we need to break, not return, to ensure currently locked page is released */
2077 break;
2078 }
2079
2080 Assert(cPages >= cCurPages);
2081 cPages -= cCurPages;
2082
2083 if (!cPages)
2084 break;
2085
2086 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2087
2088 Assert(!(phCmd & PAGE_OFFSET_MASK));
2089
2090 phCmd += PAGE_SIZE;
2091 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
2092
2093 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2094 if (!RT_SUCCESS(rc))
2095 {
2096 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2097 /* the page is not locked, return */
2098 return -1;
2099 }
2100
2101 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2102 if (cCurPages > cPages)
2103 cCurPages = cPages;
2104 } while (1);
2105 break;
2106 }
2107 default:
2108 WARN(("command can not be splitted"));
2109 i8Result = -1;
2110 break;
2111 }
2112
2113 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2114 return i8Result;
2115 }
2116 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2117 {
2118 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2119 ++pCmd;
2120 cbCmd -= sizeof (*pCmd);
2121 uint32_t cbCurCmd = 0;
2122 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2123 {
2124 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2125 {
2126 WARN(("invalid command size"));
2127 return -1;
2128 }
2129
2130 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2131 if (cbCmd < cbCurCmd)
2132 {
2133 WARN(("invalid command size"));
2134 return -1;
2135 }
2136
2137 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2138 if (i8Result < 0)
2139 {
2140 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2141 return i8Result;
2142 }
2143 }
2144 return 0;
2145 }
2146 default:
2147 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2148 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
2149 return i8Result;
2150 }
2151}
2152
2153static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2154{
2155 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2156 return;
2157
2158 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2159 {
2160 WARN(("invalid command size"));
2161 return;
2162 }
2163
2164 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2165
2166 /* check if the command is cancelled */
2167 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2168 {
2169 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2170 return;
2171 }
2172
2173 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2174}
2175
2176static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2177{
2178 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2179 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2180 int rc = VERR_NO_MEMORY;
2181 if (pCmd)
2182 {
2183 PVGASTATE pVGAState = pVdma->pVGAState;
2184 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2185 pCmd->cbVRam = pVGAState->vram_size;
2186 pCmd->pLed = &pVGAState->Led3D;
2187 pCmd->CrClientInfo.hClient = pVdma;
2188 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2189 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2190 if (RT_SUCCESS(rc))
2191 {
2192 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2193 if (RT_SUCCESS(rc))
2194 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2195 else if (rc != VERR_NOT_SUPPORTED)
2196 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2197 }
2198 else
2199 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2200
2201 vboxVDMACrCtlRelease(&pCmd->Hdr);
2202 }
2203
2204 if (!RT_SUCCESS(rc))
2205 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2206
2207 return rc;
2208}
2209
2210static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2211
2212/* check if this is external cmd to be passed to chromium backend */
2213static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2214{
2215 PVBOXVDMACMD pDmaCmd = NULL;
2216 uint32_t cbDmaCmd = 0;
2217 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2218 int rc = VINF_NOT_SUPPORTED;
2219
2220 cbDmaCmd = pCmdDr->cbBuf;
2221
2222 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2223 {
2224 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2225 {
2226 AssertMsgFailed(("invalid buffer data!"));
2227 return VERR_INVALID_PARAMETER;
2228 }
2229
2230 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2231 {
2232 AssertMsgFailed(("invalid command buffer data!"));
2233 return VERR_INVALID_PARAMETER;
2234 }
2235
2236 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2237 }
2238 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2239 {
2240 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2241 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2242 {
2243 AssertMsgFailed(("invalid command buffer data from offset!"));
2244 return VERR_INVALID_PARAMETER;
2245 }
2246 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2247 }
2248
2249 if (pDmaCmd)
2250 {
2251 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2252 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2253
2254 switch (pDmaCmd->enmType)
2255 {
2256 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2257 {
2258 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2259 if (cbBody < sizeof (*pCrCmd))
2260 {
2261 AssertMsgFailed(("invalid chromium command buffer size!"));
2262 return VERR_INVALID_PARAMETER;
2263 }
2264 PVGASTATE pVGAState = pVdma->pVGAState;
2265 rc = VINF_SUCCESS;
2266 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2267 {
2268 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2269 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2270 break;
2271 }
2272 else
2273 {
2274 Assert(0);
2275 }
2276
2277 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2278 AssertRC(tmpRc);
2279 break;
2280 }
2281 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2282 {
2283 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2284 if (cbBody < sizeof (*pTransfer))
2285 {
2286 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2287 return VERR_INVALID_PARAMETER;
2288 }
2289
2290 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2291 AssertRC(rc);
2292 if (RT_SUCCESS(rc))
2293 {
2294 pCmdDr->rc = VINF_SUCCESS;
2295 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2296 AssertRC(rc);
2297 rc = VINF_SUCCESS;
2298 }
2299 break;
2300 }
2301 default:
2302 break;
2303 }
2304 }
2305 return rc;
2306}
2307
2308int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2309{
2310 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2311 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2312 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2313 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2314 AssertRC(rc);
2315 pDr->rc = rc;
2316
2317 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2318 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2319 AssertRC(rc);
2320 return rc;
2321}
2322
2323int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2324{
2325 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2326 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2327 pCmdPrivate->rc = rc;
2328 if (pCmdPrivate->pfnCompletion)
2329 {
2330 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2331 }
2332 return VINF_SUCCESS;
2333}
2334
2335static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2336 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2337 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2338{
2339 RT_NOREF(pVdma);
2340 /* we do not support color conversion */
2341 Assert(pDstDesc->format == pSrcDesc->format);
2342 /* we do not support stretching */
2343 Assert(pDstRectl->height == pSrcRectl->height);
2344 Assert(pDstRectl->width == pSrcRectl->width);
2345 if (pDstDesc->format != pSrcDesc->format)
2346 return VERR_INVALID_FUNCTION;
2347 if (pDstDesc->width == pDstRectl->width
2348 && pSrcDesc->width == pSrcRectl->width
2349 && pSrcDesc->width == pDstDesc->width)
2350 {
2351 Assert(!pDstRectl->left);
2352 Assert(!pSrcRectl->left);
2353 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2354 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2355 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2356 }
2357 else
2358 {
2359 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2360 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2361 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2362 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2363 Assert(cbDstLine <= pDstDesc->pitch);
2364 uint32_t cbDstSkip = pDstDesc->pitch;
2365 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2366
2367 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2368# ifdef VBOX_STRICT
2369 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2370 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2371# endif
2372 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2373 Assert(cbSrcLine <= pSrcDesc->pitch);
2374 uint32_t cbSrcSkip = pSrcDesc->pitch;
2375 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2376
2377 Assert(cbDstLine == cbSrcLine);
2378
2379 for (uint32_t i = 0; ; ++i)
2380 {
2381 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2382 if (i == pDstRectl->height)
2383 break;
2384 pvDstStart += cbDstSkip;
2385 pvSrcStart += cbSrcSkip;
2386 }
2387 }
2388 return VINF_SUCCESS;
2389}
2390
2391static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2392{
2393 if (!pRectl1->width)
2394 *pRectl1 = *pRectl2;
2395 else
2396 {
2397 int16_t x21 = pRectl1->left + pRectl1->width;
2398 int16_t x22 = pRectl2->left + pRectl2->width;
2399 if (pRectl1->left > pRectl2->left)
2400 {
2401 pRectl1->left = pRectl2->left;
2402 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2403 }
2404 else if (x21 < x22)
2405 pRectl1->width = x22 - pRectl1->left;
2406
2407 x21 = pRectl1->top + pRectl1->height;
2408 x22 = pRectl2->top + pRectl2->height;
2409 if (pRectl1->top > pRectl2->top)
2410 {
2411 pRectl1->top = pRectl2->top;
2412 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2413 }
2414 else if (x21 < x22)
2415 pRectl1->height = x22 - pRectl1->top;
2416 }
2417}
2418
2419/*
2420 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2421 */
2422static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2423{
2424 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2425 Assert(cbBlt <= cbBuffer);
2426 if (cbBuffer < cbBlt)
2427 return VERR_INVALID_FUNCTION;
2428
2429 /* we do not support stretching for now */
2430 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2431 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2432 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2433 return VERR_INVALID_FUNCTION;
2434 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2435 return VERR_INVALID_FUNCTION;
2436 Assert(pBlt->cDstSubRects);
2437
2438 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2439 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2440
2441 if (pBlt->cDstSubRects)
2442 {
2443 VBOXVDMA_RECTL dstRectl, srcRectl;
2444 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2445 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2446 {
2447 pDstRectl = &pBlt->aDstSubRects[i];
2448 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2449 {
2450 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2451 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2452 dstRectl.width = pDstRectl->width;
2453 dstRectl.height = pDstRectl->height;
2454 pDstRectl = &dstRectl;
2455 }
2456
2457 pSrcRectl = &pBlt->aDstSubRects[i];
2458 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2459 {
2460 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2461 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2462 srcRectl.width = pSrcRectl->width;
2463 srcRectl.height = pSrcRectl->height;
2464 pSrcRectl = &srcRectl;
2465 }
2466
2467 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2468 &pBlt->dstDesc, &pBlt->srcDesc,
2469 pDstRectl,
2470 pSrcRectl);
2471 AssertRC(rc);
2472 if (!RT_SUCCESS(rc))
2473 return rc;
2474
2475 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2476 }
2477 }
2478 else
2479 {
2480 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2481 &pBlt->dstDesc, &pBlt->srcDesc,
2482 &pBlt->dstRectl,
2483 &pBlt->srcRectl);
2484 AssertRC(rc);
2485 if (!RT_SUCCESS(rc))
2486 return rc;
2487
2488 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2489 }
2490
2491 return cbBlt;
2492}
2493
2494static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2495{
2496 if (cbBuffer < sizeof (*pTransfer))
2497 return VERR_INVALID_PARAMETER;
2498
2499 PVGASTATE pVGAState = pVdma->pVGAState;
2500 uint8_t * pvRam = pVGAState->vram_ptrR3;
2501 PGMPAGEMAPLOCK SrcLock;
2502 PGMPAGEMAPLOCK DstLock;
2503 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2504 const void * pvSrc;
2505 void * pvDst;
2506 int rc = VINF_SUCCESS;
2507 uint32_t cbTransfer = pTransfer->cbTransferSize;
2508 uint32_t cbTransfered = 0;
2509 bool bSrcLocked = false;
2510 bool bDstLocked = false;
2511 do
2512 {
2513 uint32_t cbSubTransfer = cbTransfer;
2514 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2515 {
2516 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2517 }
2518 else
2519 {
2520 RTGCPHYS phPage = pTransfer->Src.phBuf;
2521 phPage += cbTransfered;
2522 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2523 AssertRC(rc);
2524 if (RT_SUCCESS(rc))
2525 {
2526 bSrcLocked = true;
2527 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2528 }
2529 else
2530 {
2531 break;
2532 }
2533 }
2534
2535 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2536 {
2537 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2538 }
2539 else
2540 {
2541 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2542 phPage += cbTransfered;
2543 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2544 AssertRC(rc);
2545 if (RT_SUCCESS(rc))
2546 {
2547 bDstLocked = true;
2548 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2549 }
2550 else
2551 {
2552 break;
2553 }
2554 }
2555
2556 if (RT_SUCCESS(rc))
2557 {
2558 memcpy(pvDst, pvSrc, cbSubTransfer);
2559 cbTransfer -= cbSubTransfer;
2560 cbTransfered += cbSubTransfer;
2561 }
2562 else
2563 {
2564 cbTransfer = 0; /* to break */
2565 }
2566
2567 if (bSrcLocked)
2568 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2569 if (bDstLocked)
2570 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2571 } while (cbTransfer);
2572
2573 if (RT_SUCCESS(rc))
2574 return sizeof (*pTransfer);
2575 return rc;
2576}
2577
2578static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2579{
2580 do
2581 {
2582 Assert(pvBuffer);
2583 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2584
2585 if (!pvBuffer)
2586 return VERR_INVALID_PARAMETER;
2587 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2588 return VERR_INVALID_PARAMETER;
2589
2590 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2591 switch (pCmd->enmType)
2592 {
2593 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2594 {
2595# ifdef VBOXWDDM_TEST_UHGSMI
2596 static int count = 0;
2597 static uint64_t start, end;
2598 if (count==0)
2599 {
2600 start = RTTimeNanoTS();
2601 }
2602 ++count;
2603 if (count==100000)
2604 {
2605 end = RTTimeNanoTS();
2606 float ems = (end-start)/1000000.f;
2607 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2608 }
2609# endif
2610 /** @todo post the buffer to chromium */
2611 return VINF_SUCCESS;
2612 }
2613 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2614 {
2615 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2616 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2617 Assert(cbBlt >= 0);
2618 Assert((uint32_t)cbBlt <= cbBuffer);
2619 if (cbBlt >= 0)
2620 {
2621 if ((uint32_t)cbBlt == cbBuffer)
2622 return VINF_SUCCESS;
2623 else
2624 {
2625 cbBuffer -= (uint32_t)cbBlt;
2626 pvBuffer -= cbBlt;
2627 }
2628 }
2629 else
2630 return cbBlt; /* error */
2631 break;
2632 }
2633 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2634 {
2635 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2636 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2637 Assert(cbTransfer >= 0);
2638 Assert((uint32_t)cbTransfer <= cbBuffer);
2639 if (cbTransfer >= 0)
2640 {
2641 if ((uint32_t)cbTransfer == cbBuffer)
2642 return VINF_SUCCESS;
2643 else
2644 {
2645 cbBuffer -= (uint32_t)cbTransfer;
2646 pvBuffer -= cbTransfer;
2647 }
2648 }
2649 else
2650 return cbTransfer; /* error */
2651 break;
2652 }
2653 case VBOXVDMACMD_TYPE_DMA_NOP:
2654 return VINF_SUCCESS;
2655 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2656 return VINF_SUCCESS;
2657 default:
2658 AssertBreakpoint();
2659 return VERR_INVALID_FUNCTION;
2660 }
2661 } while (1);
2662
2663 /* we should not be here */
2664 AssertBreakpoint();
2665 return VERR_INVALID_STATE;
2666}
2667
2668static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2669{
2670 RT_NOREF(hThreadSelf);
2671 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2672 PVGASTATE pVGAState = pVdma->pVGAState;
2673 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2674 uint8_t *pCmd;
2675 uint32_t cbCmd;
2676 int rc;
2677
2678 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2679
2680 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2681 {
2682 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2683 switch (enmType)
2684 {
2685 case VBVAEXHOST_DATA_TYPE_CMD:
2686 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2687 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2688 VBVARaiseIrq(pVGAState, 0);
2689 break;
2690 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2691 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2692 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2693 break;
2694 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2695 {
2696 bool fContinue = true;
2697 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2698 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2699 if (fContinue)
2700 break;
2701 }
2702 RT_FALL_THRU();
2703 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2704 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2705 AssertRC(rc);
2706 break;
2707 default:
2708 WARN(("unexpected type %d\n", enmType));
2709 break;
2710 }
2711 }
2712
2713 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2714
2715 return VINF_SUCCESS;
2716}
2717
2718static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2719{
2720 RT_NOREF(cbCmd);
2721 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2722 const uint8_t * pvBuf;
2723 PGMPAGEMAPLOCK Lock;
2724 int rc;
2725 bool bReleaseLocked = false;
2726
2727 do
2728 {
2729 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2730
2731 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2732 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2733 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2734 {
2735 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2736 pvBuf = pvRam + pCmd->Location.offVramBuf;
2737 }
2738 else
2739 {
2740 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2741 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2742 Assert(offset + pCmd->cbBuf <= 0x1000);
2743 if (offset + pCmd->cbBuf > 0x1000)
2744 {
2745 /** @todo more advanced mechanism of command buffer proc is actually needed */
2746 rc = VERR_INVALID_PARAMETER;
2747 break;
2748 }
2749
2750 const void * pvPageBuf;
2751 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2752 AssertRC(rc);
2753 if (!RT_SUCCESS(rc))
2754 {
2755 /** @todo if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2756 break;
2757 }
2758
2759 pvBuf = (const uint8_t *)pvPageBuf;
2760 pvBuf += offset;
2761
2762 bReleaseLocked = true;
2763 }
2764
2765 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2766 AssertRC(rc);
2767
2768 if (bReleaseLocked)
2769 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2770 } while (0);
2771
2772 pCmd->rc = rc;
2773
2774 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2775 AssertRC(rc);
2776}
2777
2778# if 0 /** @todo vboxVDMAControlProcess is unused */
2779static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2780{
2781 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2782 pCmd->i32Result = VINF_SUCCESS;
2783 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2784 AssertRC(rc);
2785}
2786# endif
2787
2788#endif /* VBOX_WITH_CRHGSMI */
2789#ifdef VBOX_VDMA_WITH_WATCHDOG
2790
2791static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2792{
2793 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2794 PVGASTATE pVGAState = pVdma->pVGAState;
2795 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2796}
2797
2798static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2799{
2800 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2801 if (cMillis)
2802 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2803 else
2804 TMTimerStop(pVdma->WatchDogTimer);
2805 return VINF_SUCCESS;
2806}
2807
2808#endif /* VBOX_VDMA_WITH_WATCHDOG */
2809
2810int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2811{
2812 RT_NOREF(cPipeElements);
2813 int rc;
2814 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2815 Assert(pVdma);
2816 if (pVdma)
2817 {
2818 pVdma->pHgsmi = pVGAState->pHGSMI;
2819 pVdma->pVGAState = pVGAState;
2820
2821#ifdef VBOX_VDMA_WITH_WATCHDOG
2822 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2823 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2824 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2825 AssertRC(rc);
2826#endif
2827
2828#ifdef VBOX_WITH_CRHGSMI
2829 VBoxVDMAThreadInit(&pVdma->Thread);
2830
2831 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2832 if (RT_SUCCESS(rc))
2833 {
2834 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2835 if (RT_SUCCESS(rc))
2836 {
2837 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2838 if (RT_SUCCESS(rc))
2839 {
2840 pVGAState->pVdma = pVdma;
2841
2842 /* No HGCM service if VMSVGA is enabled. */
2843 if (!pVGAState->fVMSVGAEnabled)
2844 {
2845 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2846 }
2847 return VINF_SUCCESS;
2848 }
2849 WARN(("RTCritSectInit failed %d\n", rc));
2850
2851 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2852 }
2853 else
2854 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2855
2856 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2857 }
2858 else
2859 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2860
2861
2862 RTMemFree(pVdma);
2863#else
2864 pVGAState->pVdma = pVdma;
2865 return VINF_SUCCESS;
2866#endif
2867 }
2868 else
2869 rc = VERR_OUT_OF_RESOURCES;
2870
2871 return rc;
2872}
2873
2874int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2875{
2876#ifdef VBOX_WITH_CRHGSMI
2877 vdmaVBVACtlDisableSync(pVdma);
2878#else
2879 RT_NOREF(pVdma);
2880#endif
2881 return VINF_SUCCESS;
2882}
2883
2884int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2885{
2886 if (!pVdma)
2887 return VINF_SUCCESS;
2888#ifdef VBOX_WITH_CRHGSMI
2889 if (pVdma->pVGAState->fVMSVGAEnabled)
2890 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
2891 else
2892 {
2893 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
2894 * as the result of the SharedOpenGL HGCM service unloading.
2895 */
2896 vdmaVBVACtlDisableSync(pVdma);
2897 }
2898 VBoxVDMAThreadCleanup(&pVdma->Thread);
2899 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2900 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2901 RTCritSectDelete(&pVdma->CalloutCritSect);
2902#endif
2903 RTMemFree(pVdma);
2904 return VINF_SUCCESS;
2905}
2906
2907void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2908{
2909 RT_NOREF(cbCmd);
2910 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2911
2912 switch (pCmd->enmCtl)
2913 {
2914 case VBOXVDMA_CTL_TYPE_ENABLE:
2915 pCmd->i32Result = VINF_SUCCESS;
2916 break;
2917 case VBOXVDMA_CTL_TYPE_DISABLE:
2918 pCmd->i32Result = VINF_SUCCESS;
2919 break;
2920 case VBOXVDMA_CTL_TYPE_FLUSH:
2921 pCmd->i32Result = VINF_SUCCESS;
2922 break;
2923#ifdef VBOX_VDMA_WITH_WATCHDOG
2924 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2925 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2926 break;
2927#endif
2928 default:
2929 WARN(("cmd not supported"));
2930 pCmd->i32Result = VERR_NOT_SUPPORTED;
2931 }
2932
2933 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2934 AssertRC(rc);
2935}
2936
2937void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2938{
2939 int rc = VERR_NOT_IMPLEMENTED;
2940
2941#ifdef VBOX_WITH_CRHGSMI
2942 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2943 * this is why we process them specially */
2944 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2945 if (rc == VINF_SUCCESS)
2946 return;
2947
2948 if (RT_FAILURE(rc))
2949 {
2950 pCmd->rc = rc;
2951 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2952 AssertRC(rc);
2953 return;
2954 }
2955
2956 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2957#else
2958 RT_NOREF(cbCmd);
2959 pCmd->rc = rc;
2960 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2961 AssertRC(rc);
2962#endif
2963}
2964
2965#ifdef VBOX_WITH_CRHGSMI
2966
2967static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2968
2969static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2970{
2971 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2972 if (RT_SUCCESS(rc))
2973 {
2974 if (rc == VINF_SUCCESS)
2975 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2976 else
2977 Assert(rc == VINF_ALREADY_INITIALIZED);
2978 }
2979 else
2980 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2981
2982 return rc;
2983}
2984
2985static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2986{
2987 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2988 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2989 AssertRC(rc);
2990 pGCtl->i32Result = rc;
2991
2992 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2993 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2994 AssertRC(rc);
2995
2996 VBoxVBVAExHCtlFree(pVbva, pCtl);
2997}
2998
2999static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3000{
3001 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
3002 if (!pHCtl)
3003 {
3004 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3005 return VERR_NO_MEMORY;
3006 }
3007
3008 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
3009 pHCtl->u.cmd.cbCmd = cbCmd;
3010 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
3011 if (RT_FAILURE(rc))
3012 {
3013 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3014 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3015 return rc;;
3016 }
3017 return VINF_SUCCESS;
3018}
3019
3020static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3021{
3022 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
3023 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
3024 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3025 if (RT_SUCCESS(rc))
3026 return VINF_SUCCESS;
3027
3028 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
3029 pCtl->i32Result = rc;
3030 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3031 AssertRC(rc);
3032 return VINF_SUCCESS;
3033}
3034
3035static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
3036{
3037 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
3038 if (pVboxCtl->u.pfnInternal)
3039 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
3040 VBoxVBVAExHCtlFree(pVbva, pCtl);
3041}
3042
3043static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3044 PFNCRCTLCOMPLETION pfnCompletion,
3045 void *pvCompletion)
3046{
3047 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
3048 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
3049 if (RT_FAILURE(rc))
3050 {
3051 if (rc == VERR_INVALID_STATE)
3052 {
3053 pCmd->u.pfnInternal = NULL;
3054 PVGASTATE pVGAState = pVdma->pVGAState;
3055 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
3056 if (!RT_SUCCESS(rc))
3057 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
3058
3059 return rc;
3060 }
3061 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
3062 return rc;
3063 }
3064
3065 return VINF_SUCCESS;
3066}
3067
3068static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3069{
3070 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3071 {
3072 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3073 if (!RT_SUCCESS(rc))
3074 {
3075 WARN(("pfnVBVAEnable failed %d\n", rc));
3076 for (uint32_t j = 0; j < i; j++)
3077 {
3078 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3079 }
3080
3081 return rc;
3082 }
3083 }
3084 return VINF_SUCCESS;
3085}
3086
3087static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3088{
3089 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3090 {
3091 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
3092 }
3093 return VINF_SUCCESS;
3094}
3095
3096static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3097 void *pvThreadContext, void *pvContext)
3098{
3099 RT_NOREF(pThread);
3100 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3101 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3102
3103 if (RT_SUCCESS(rc))
3104 {
3105 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3106 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3107 if (rc == VINF_SUCCESS)
3108 {
3109 /* we need to inform Main about VBVA enable/disable
3110 * main expects notifications to be done from the main thread
3111 * submit it there */
3112 PVGASTATE pVGAState = pVdma->pVGAState;
3113
3114 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3115 vdmaVBVANotifyEnable(pVGAState);
3116 else
3117 vdmaVBVANotifyDisable(pVGAState);
3118 }
3119 else if (RT_FAILURE(rc))
3120 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3121 }
3122 else
3123 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3124
3125 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3126}
3127
3128static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3129{
3130 int rc;
3131 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3132 if (pHCtl)
3133 {
3134 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3135 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3136 pHCtl->pfnComplete = pfnComplete;
3137 pHCtl->pvComplete = pvComplete;
3138
3139 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3140 if (RT_SUCCESS(rc))
3141 return VINF_SUCCESS;
3142 else
3143 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3144
3145 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3146 }
3147 else
3148 {
3149 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3150 rc = VERR_NO_MEMORY;
3151 }
3152
3153 return rc;
3154}
3155
3156static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3157{
3158 VBVAENABLE Enable = {0};
3159 Enable.u32Flags = VBVA_F_ENABLE;
3160 Enable.u32Offset = offVram;
3161
3162 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3163 Data.rc = VERR_NOT_IMPLEMENTED;
3164 int rc = RTSemEventCreate(&Data.hEvent);
3165 if (!RT_SUCCESS(rc))
3166 {
3167 WARN(("RTSemEventCreate failed %d\n", rc));
3168 return rc;
3169 }
3170
3171 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3172 if (RT_SUCCESS(rc))
3173 {
3174 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3175 if (RT_SUCCESS(rc))
3176 {
3177 rc = Data.rc;
3178 if (!RT_SUCCESS(rc))
3179 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3180 }
3181 else
3182 WARN(("RTSemEventWait failed %d\n", rc));
3183 }
3184 else
3185 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3186
3187 RTSemEventDestroy(Data.hEvent);
3188
3189 return rc;
3190}
3191
3192static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3193{
3194 int rc;
3195 VBVAEXHOSTCTL* pHCtl;
3196 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3197 {
3198 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3199 return VINF_SUCCESS;
3200 }
3201
3202 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3203 if (!pHCtl)
3204 {
3205 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3206 return VERR_NO_MEMORY;
3207 }
3208
3209 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3210 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3211 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3212 if (RT_SUCCESS(rc))
3213 return VINF_SUCCESS;
3214
3215 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3216 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3217 return rc;
3218}
3219
3220static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3221{
3222 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3223 if (fEnable)
3224 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3225 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3226}
3227
3228static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3229{
3230 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3231 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3232 if (RT_SUCCESS(rc))
3233 return VINF_SUCCESS;
3234
3235 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3236 pEnable->Hdr.i32Result = rc;
3237 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3238 AssertRC(rc);
3239 return VINF_SUCCESS;
3240}
3241
3242static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3243 int rc, void *pvContext)
3244{
3245 RT_NOREF(pVbva, pCtl);
3246 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3247 pData->rc = rc;
3248 rc = RTSemEventSignal(pData->hEvent);
3249 if (!RT_SUCCESS(rc))
3250 WARN(("RTSemEventSignal failed %d\n", rc));
3251}
3252
3253static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3254{
3255 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3256 Data.rc = VERR_NOT_IMPLEMENTED;
3257 int rc = RTSemEventCreate(&Data.hEvent);
3258 if (!RT_SUCCESS(rc))
3259 {
3260 WARN(("RTSemEventCreate failed %d\n", rc));
3261 return rc;
3262 }
3263
3264 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3265 if (RT_SUCCESS(rc))
3266 {
3267 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3268 if (RT_SUCCESS(rc))
3269 {
3270 rc = Data.rc;
3271 if (!RT_SUCCESS(rc))
3272 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3273 }
3274 else
3275 WARN(("RTSemEventWait failed %d\n", rc));
3276 }
3277 else
3278 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3279
3280 RTSemEventDestroy(Data.hEvent);
3281
3282 return rc;
3283}
3284
3285static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3286{
3287 VBVAEXHOSTCTL Ctl;
3288 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3289 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3290}
3291
3292static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3293{
3294 VBVAEXHOSTCTL Ctl;
3295 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3296 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3297}
3298
3299static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3300{
3301 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3302 switch (rc)
3303 {
3304 case VINF_SUCCESS:
3305 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3306 case VINF_ALREADY_INITIALIZED:
3307 case VINF_EOF:
3308 case VERR_INVALID_STATE:
3309 return VINF_SUCCESS;
3310 default:
3311 Assert(!RT_FAILURE(rc));
3312 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3313 }
3314}
3315
3316
3317int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3318 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3319 PFNCRCTLCOMPLETION pfnCompletion,
3320 void *pvCompletion)
3321{
3322 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3323 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3324 if (pVdma == NULL)
3325 return VERR_INVALID_STATE;
3326 pCmd->CalloutList.List.pNext = NULL;
3327 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3328}
3329
3330typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3331{
3332 struct VBOXVDMAHOST *pVdma;
3333 uint32_t fProcessing;
3334 int rc;
3335} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3336
3337static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3338{
3339 RT_NOREF(pCmd, cbCmd);
3340 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3341
3342 pData->rc = rc;
3343
3344 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3345
3346 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3347
3348 pData->fProcessing = 0;
3349
3350 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3351}
3352
3353static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3354{
3355 pEntry->pfnCb = pfnCb;
3356 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3357 if (RT_SUCCESS(rc))
3358 {
3359 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3360 RTCritSectLeave(&pVdma->CalloutCritSect);
3361
3362 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3363 }
3364 else
3365 WARN(("RTCritSectEnter failed %d\n", rc));
3366
3367 return rc;
3368}
3369
3370
3371static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3372{
3373 int rc = VINF_SUCCESS;
3374 for (;;)
3375 {
3376 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3377 if (RT_SUCCESS(rc))
3378 {
3379 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3380 if (pEntry)
3381 RTListNodeRemove(&pEntry->Node);
3382 RTCritSectLeave(&pVdma->CalloutCritSect);
3383
3384 if (!pEntry)
3385 break;
3386
3387 pEntry->pfnCb(pEntry);
3388 }
3389 else
3390 {
3391 WARN(("RTCritSectEnter failed %d\n", rc));
3392 break;
3393 }
3394 }
3395
3396 return rc;
3397}
3398
3399DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3400 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3401{
3402 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3403 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3404 if (pVdma == NULL)
3405 return VERR_INVALID_STATE;
3406 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3407 Data.pVdma = pVdma;
3408 Data.fProcessing = 1;
3409 Data.rc = VERR_INTERNAL_ERROR;
3410 RTListInit(&pCmd->CalloutList.List);
3411 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3412 if (!RT_SUCCESS(rc))
3413 {
3414 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3415 return rc;
3416 }
3417
3418 while (Data.fProcessing)
3419 {
3420 /* Poll infrequently to make sure no completed message has been missed. */
3421 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3422
3423 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3424
3425 if (Data.fProcessing)
3426 RTThreadYield();
3427 }
3428
3429 /* extra check callouts */
3430 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3431
3432 /* 'Our' message has been processed, so should reset the semaphore.
3433 * There is still possible that another message has been processed
3434 * and the semaphore has been signalled again.
3435 * Reset only if there are no other messages completed.
3436 */
3437 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3438 Assert(c >= 0);
3439 if (!c)
3440 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3441
3442 rc = Data.rc;
3443 if (!RT_SUCCESS(rc))
3444 WARN(("host call failed %d", rc));
3445
3446 return rc;
3447}
3448
3449int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3450{
3451 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3452 int rc = VINF_SUCCESS;
3453 switch (pCtl->u32Type)
3454 {
3455 case VBOXCMDVBVACTL_TYPE_3DCTL:
3456 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3457 case VBOXCMDVBVACTL_TYPE_RESIZE:
3458 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3459 case VBOXCMDVBVACTL_TYPE_ENABLE:
3460 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3461 {
3462 WARN(("incorrect enable size\n"));
3463 rc = VERR_INVALID_PARAMETER;
3464 break;
3465 }
3466 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3467 default:
3468 WARN(("unsupported type\n"));
3469 rc = VERR_INVALID_PARAMETER;
3470 break;
3471 }
3472
3473 pCtl->i32Result = rc;
3474 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3475 AssertRC(rc);
3476 return VINF_SUCCESS;
3477}
3478
3479int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3480{
3481 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3482 {
3483 WARN(("vdma VBVA is disabled\n"));
3484 return VERR_INVALID_STATE;
3485 }
3486
3487 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3488}
3489
3490int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3491{
3492 WARN(("flush\n"));
3493 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3494 {
3495 WARN(("vdma VBVA is disabled\n"));
3496 return VERR_INVALID_STATE;
3497 }
3498 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3499}
3500
3501void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3502{
3503 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3504 return;
3505 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3506}
3507
3508bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3509{
3510 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3511}
3512
3513#endif /* VBOX_WITH_CRHGSMI */
3514
3515int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3516{
3517#ifdef VBOX_WITH_CRHGSMI
3518 int rc = vdmaVBVAPause(pVdma);
3519 if (RT_SUCCESS(rc))
3520 return VINF_SUCCESS;
3521
3522 if (rc != VERR_INVALID_STATE)
3523 {
3524 WARN(("vdmaVBVAPause failed %d\n", rc));
3525 return rc;
3526 }
3527
3528# ifdef DEBUG_misha
3529 WARN(("debug prep"));
3530# endif
3531
3532 PVGASTATE pVGAState = pVdma->pVGAState;
3533 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3534 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3535 Assert(pCmd);
3536 if (pCmd)
3537 {
3538 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3539 AssertRC(rc);
3540 if (RT_SUCCESS(rc))
3541 {
3542 rc = vboxVDMACrCtlGetRc(pCmd);
3543 }
3544 vboxVDMACrCtlRelease(pCmd);
3545 return rc;
3546 }
3547 return VERR_NO_MEMORY;
3548#else
3549 RT_NOREF(pVdma);
3550 return VINF_SUCCESS;
3551#endif
3552}
3553
3554int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3555{
3556#ifdef VBOX_WITH_CRHGSMI
3557 int rc = vdmaVBVAResume(pVdma);
3558 if (RT_SUCCESS(rc))
3559 return VINF_SUCCESS;
3560
3561 if (rc != VERR_INVALID_STATE)
3562 {
3563 WARN(("vdmaVBVAResume failed %d\n", rc));
3564 return rc;
3565 }
3566
3567# ifdef DEBUG_misha
3568 WARN(("debug done"));
3569# endif
3570
3571 PVGASTATE pVGAState = pVdma->pVGAState;
3572 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3573 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3574 Assert(pCmd);
3575 if (pCmd)
3576 {
3577 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3578 AssertRC(rc);
3579 if (RT_SUCCESS(rc))
3580 {
3581 rc = vboxVDMACrCtlGetRc(pCmd);
3582 }
3583 vboxVDMACrCtlRelease(pCmd);
3584 return rc;
3585 }
3586 return VERR_NO_MEMORY;
3587#else
3588 RT_NOREF(pVdma);
3589 return VINF_SUCCESS;
3590#endif
3591}
3592
3593int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3594{
3595 int rc;
3596#ifndef VBOX_WITH_CRHGSMI
3597 RT_NOREF(pVdma, pSSM);
3598
3599#else
3600 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3601#endif
3602 {
3603 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3604 AssertRCReturn(rc, rc);
3605 return VINF_SUCCESS;
3606 }
3607
3608#ifdef VBOX_WITH_CRHGSMI
3609 PVGASTATE pVGAState = pVdma->pVGAState;
3610 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3611
3612 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3613 AssertRCReturn(rc, rc);
3614
3615 VBVAEXHOSTCTL HCtl;
3616 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3617 HCtl.u.state.pSSM = pSSM;
3618 HCtl.u.state.u32Version = 0;
3619 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3620#endif
3621}
3622
3623int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3624{
3625 uint32_t u32;
3626 int rc = SSMR3GetU32(pSSM, &u32);
3627 AssertLogRelRCReturn(rc, rc);
3628
3629 if (u32 != UINT32_MAX)
3630 {
3631#ifdef VBOX_WITH_CRHGSMI
3632 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3633 AssertLogRelRCReturn(rc, rc);
3634
3635 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3636
3637 VBVAEXHOSTCTL HCtl;
3638 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3639 HCtl.u.state.pSSM = pSSM;
3640 HCtl.u.state.u32Version = u32Version;
3641 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3642 AssertLogRelRCReturn(rc, rc);
3643
3644 rc = vdmaVBVAResume(pVdma);
3645 AssertLogRelRCReturn(rc, rc);
3646
3647 return VINF_SUCCESS;
3648#else
3649 RT_NOREF(pVdma, u32Version);
3650 WARN(("Unsupported VBVACtl info!\n"));
3651 return VERR_VERSION_MISMATCH;
3652#endif
3653 }
3654
3655 return VINF_SUCCESS;
3656}
3657
3658int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3659{
3660#ifdef VBOX_WITH_CRHGSMI
3661 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3662 return VINF_SUCCESS;
3663
3664/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3665 * the purpose of this code is. */
3666 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3667 if (!pHCtl)
3668 {
3669 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3670 return VERR_NO_MEMORY;
3671 }
3672
3673 /* sanity */
3674 pHCtl->u.cmd.pu8Cmd = NULL;
3675 pHCtl->u.cmd.cbCmd = 0;
3676
3677 /* NULL completion will just free the ctl up */
3678 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3679 if (RT_FAILURE(rc))
3680 {
3681 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3682 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3683 return rc;
3684 }
3685#else
3686 RT_NOREF(pVdma);
3687#endif
3688 return VINF_SUCCESS;
3689}
3690
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette