VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 63699

Last change on this file since 63699 was 63562, checked in by vboxsync, 9 years ago

scm: cleaning up todos

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 114.6 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 63562 2016-08-16 14:04:03Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include <VBox/VMMDev.h>
23#include <VBox/vmm/pdmdev.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/VBoxVideo.h>
26#include <iprt/semaphore.h>
27#include <iprt/thread.h>
28#include <iprt/mem.h>
29#include <iprt/asm.h>
30#include <iprt/list.h>
31#include <iprt/param.h>
32
33#include "DevVGA.h"
34#include "HGSMI/SHGSMIHost.h"
35
36#include <VBox/VBoxVideo3D.h>
37#include <VBox/VBoxVideoHost3D.h>
38
39#ifdef DEBUG_misha
40# define VBOXVDBG_MEMCACHE_DISABLE
41#endif
42
43#ifndef VBOXVDBG_MEMCACHE_DISABLE
44# include <iprt/memcache.h>
45#endif
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51#ifdef DEBUG_misha
52# define WARN_BP() do { AssertFailed(); } while (0)
53#else
54# define WARN_BP() do { } while (0)
55#endif
56#define WARN(_msg) do { \
57 LogRel(_msg); \
58 WARN_BP(); \
59 } while (0)
60
61#define VBOXVDMATHREAD_STATE_TERMINATED 0
62#define VBOXVDMATHREAD_STATE_CREATING 1
63#define VBOXVDMATHREAD_STATE_CREATED 3
64#define VBOXVDMATHREAD_STATE_TERMINATING 4
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70struct VBOXVDMATHREAD;
71
72typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
73
74#ifdef VBOX_WITH_CRHGSMI
75static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
76#endif
77
78
79typedef struct VBOXVDMATHREAD
80{
81 RTTHREAD hWorkerThread;
82 RTSEMEVENT hEvent;
83 volatile uint32_t u32State;
84 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
85 void *pvChanged;
86} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
87
88
89/* state transformations:
90 *
91 * submitter | processor
92 *
93 * LISTENING ---> PROCESSING
94 *
95 * */
96#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
97#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
98
99#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
100#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
101#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
102
103typedef struct VBVAEXHOSTCONTEXT
104{
105 VBVABUFFER *pVBVA;
106 volatile int32_t i32State;
107 volatile int32_t i32EnableState;
108 volatile uint32_t u32cCtls;
109 /* critical section for accessing ctl lists */
110 RTCRITSECT CltCritSect;
111 RTLISTANCHOR GuestCtlList;
112 RTLISTANCHOR HostCtlList;
113#ifndef VBOXVDBG_MEMCACHE_DISABLE
114 RTMEMCACHE CtlCache;
115#endif
116} VBVAEXHOSTCONTEXT;
117
118typedef enum
119{
120 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
121 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
122 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
123 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
124 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
125 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
126 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
127 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
128 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
129 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
130 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
131 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
132 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
133} VBVAEXHOSTCTL_TYPE;
134
135struct VBVAEXHOSTCTL;
136
137typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
138
139typedef struct VBVAEXHOSTCTL
140{
141 RTLISTNODE Node;
142 VBVAEXHOSTCTL_TYPE enmType;
143 union
144 {
145 struct
146 {
147 uint8_t * pu8Cmd;
148 uint32_t cbCmd;
149 } cmd;
150
151 struct
152 {
153 PSSMHANDLE pSSM;
154 uint32_t u32Version;
155 } state;
156 } u;
157 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
158 void *pvComplete;
159} VBVAEXHOSTCTL;
160
161/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
162 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
163 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
164 * see mor edetailed comments in headers for function definitions */
165typedef enum
166{
167 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
168 VBVAEXHOST_DATA_TYPE_CMD,
169 VBVAEXHOST_DATA_TYPE_HOSTCTL,
170 VBVAEXHOST_DATA_TYPE_GUESTCTL
171} VBVAEXHOST_DATA_TYPE;
172
173
174#ifdef VBOX_WITH_CRHGSMI
175typedef struct VBOXVDMA_SOURCE
176{
177 VBVAINFOSCREEN Screen;
178 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
179} VBOXVDMA_SOURCE;
180#endif
181
182typedef struct VBOXVDMAHOST
183{
184 PHGSMIINSTANCE pHgsmi;
185 PVGASTATE pVGAState;
186#ifdef VBOX_WITH_CRHGSMI
187 VBVAEXHOSTCONTEXT CmdVbva;
188 VBOXVDMATHREAD Thread;
189 VBOXCRCMD_SVRINFO CrSrvInfo;
190 VBVAEXHOSTCTL* pCurRemainingHostCtl;
191 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
192 int32_t volatile i32cHostCrCtlCompleted;
193 RTCRITSECT CalloutCritSect;
194// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
195#endif
196#ifdef VBOX_VDMA_WITH_WATCHDOG
197 PTMTIMERR3 WatchDogTimer;
198#endif
199} VBOXVDMAHOST, *PVBOXVDMAHOST;
200
201
202/*********************************************************************************************************************************
203* Internal Functions *
204*********************************************************************************************************************************/
205#ifdef VBOX_WITH_CRHGSMI
206static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
207static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
208
209static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
210static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
211
212/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
213 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
214static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
215
216static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
217static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
218static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
219static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
220static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
221static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
222
223#endif /* VBOX_WITH_CRHGSMI */
224
225
226
227#ifdef VBOX_WITH_CRHGSMI
228
229static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
230{
231# ifndef VBOXVDBG_MEMCACHE_DISABLE
232 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
233# else
234 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
235# endif
236}
237
238static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
239{
240# ifndef VBOXVDBG_MEMCACHE_DISABLE
241 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
242# else
243 RTMemFree(pCtl);
244# endif
245}
246
247static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
248{
249 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
250 if (!pCtl)
251 {
252 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
253 return NULL;
254 }
255
256 pCtl->enmType = enmType;
257 return pCtl;
258}
259
260static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
261{
262 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
263
264 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
265 return VINF_SUCCESS;
266 return VERR_SEM_BUSY;
267}
268
269static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
270{
271 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
272
273 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
274 return NULL;
275
276 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
277 if (RT_SUCCESS(rc))
278 {
279 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
280 if (pCtl)
281 *pfHostCtl = true;
282 else if (!fHostOnlyMode)
283 {
284 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
285 {
286 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
287 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
288 * and there are no HostCtl commands*/
289 Assert(pCtl);
290 *pfHostCtl = false;
291 }
292 }
293
294 if (pCtl)
295 {
296 RTListNodeRemove(&pCtl->Node);
297 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
298 }
299
300 RTCritSectLeave(&pCmdVbva->CltCritSect);
301
302 return pCtl;
303 }
304 else
305 WARN(("RTCritSectEnter failed %d\n", rc));
306
307 return NULL;
308}
309
310static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
311{
312 bool fHostCtl = false;
313 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
314 Assert(!pCtl || fHostCtl);
315 return pCtl;
316}
317
318static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
319{
320 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
321 {
322 WARN(("Invalid state\n"));
323 return VERR_INVALID_STATE;
324 }
325
326 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
327 return VINF_SUCCESS;
328}
329
330static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
331{
332 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
333 {
334 WARN(("Invalid state\n"));
335 return VERR_INVALID_STATE;
336 }
337
338 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
339 return VINF_SUCCESS;
340}
341
342static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
343{
344 switch (pCtl->enmType)
345 {
346 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
347 {
348 VBoxVBVAExHPPause(pCmdVbva);
349 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
350 return true;
351 }
352 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
353 {
354 VBoxVBVAExHPResume(pCmdVbva);
355 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
356 return true;
357 }
358 default:
359 return false;
360 }
361}
362
363static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
364{
365 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
366
367 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
368}
369
370static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
371{
372 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
373 if (pCmdVbva->pVBVA)
374 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
375}
376
377static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
378{
379 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
380 if (pCmdVbva->pVBVA)
381 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
382}
383
384static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
385{
386 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
387 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
388
389 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
390
391 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
392 uint32_t indexRecordFree = pVBVA->indexRecordFree;
393
394 Log(("first = %d, free = %d\n",
395 indexRecordFirst, indexRecordFree));
396
397 if (indexRecordFirst == indexRecordFree)
398 {
399 /* No records to process. Return without assigning output variables. */
400 return VINF_EOF;
401 }
402
403 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
404
405 /* A new record need to be processed. */
406 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
407 {
408 /* the record is being recorded, try again */
409 return VINF_TRY_AGAIN;
410 }
411
412 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
413
414 if (!cbRecord)
415 {
416 /* the record is being recorded, try again */
417 return VINF_TRY_AGAIN;
418 }
419
420 /* we should not get partial commands here actually */
421 Assert(cbRecord);
422
423 /* The size of largest contiguous chunk in the ring biffer. */
424 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
425
426 /* The pointer to data in the ring buffer. */
427 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
428
429 /* Fetch or point the data. */
430 if (u32BytesTillBoundary >= cbRecord)
431 {
432 /* The command does not cross buffer boundary. Return address in the buffer. */
433 *ppCmd = pSrc;
434 *pcbCmd = cbRecord;
435 return VINF_SUCCESS;
436 }
437
438 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
439 return VERR_INVALID_STATE;
440}
441
442static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
443{
444 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
445 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
446
447 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
448}
449
450static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
451{
452 if (pCtl->pfnComplete)
453 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
454 else
455 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
456}
457
458
459static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
460{
461 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
462 VBVAEXHOSTCTL*pCtl;
463 bool fHostClt;
464
465 for (;;)
466 {
467 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
468 if (pCtl)
469 {
470 if (fHostClt)
471 {
472 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
473 {
474 *ppCmd = (uint8_t*)pCtl;
475 *pcbCmd = sizeof (*pCtl);
476 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
477 }
478 continue;
479 }
480 else
481 {
482 *ppCmd = (uint8_t*)pCtl;
483 *pcbCmd = sizeof (*pCtl);
484 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
485 }
486 }
487
488 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
489 return VBVAEXHOST_DATA_TYPE_NO_DATA;
490
491 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
492 switch (rc)
493 {
494 case VINF_SUCCESS:
495 return VBVAEXHOST_DATA_TYPE_CMD;
496 case VINF_EOF:
497 return VBVAEXHOST_DATA_TYPE_NO_DATA;
498 case VINF_TRY_AGAIN:
499 RTThreadSleep(1);
500 continue;
501 default:
502 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
503 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
504 return VBVAEXHOST_DATA_TYPE_NO_DATA;
505 }
506 }
507 /* not reached */
508}
509
510static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
511{
512 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
513 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
514 {
515 vboxVBVAExHPHgEventClear(pCmdVbva);
516 vboxVBVAExHPProcessorRelease(pCmdVbva);
517 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
518 * 1. we check the queue -> and it is empty
519 * 2. submitter adds command to the queue
520 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
521 * 4. we clear the "processing" state
522 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
523 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
524 **/
525 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
526 if (RT_SUCCESS(rc))
527 {
528 /* we are the processor now */
529 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
530 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
531 {
532 vboxVBVAExHPProcessorRelease(pCmdVbva);
533 return VBVAEXHOST_DATA_TYPE_NO_DATA;
534 }
535
536 vboxVBVAExHPHgEventSet(pCmdVbva);
537 }
538 }
539
540 return enmType;
541}
542
543DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
544{
545 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
546
547 if (pVBVA)
548 {
549 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
550 uint32_t indexRecordFree = pVBVA->indexRecordFree;
551
552 if (indexRecordFirst != indexRecordFree)
553 return true;
554 }
555
556 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
557}
558
559/** Checks whether the new commands are ready for processing
560 * @returns
561 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
562 * VINF_EOF - no commands in a queue
563 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
564 * VERR_INVALID_STATE - the VBVA is paused or pausing */
565static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
566{
567 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
568 if (RT_SUCCESS(rc))
569 {
570 /* we are the processor now */
571 if (vboxVBVAExHSHasCommands(pCmdVbva))
572 {
573 vboxVBVAExHPHgEventSet(pCmdVbva);
574 return VINF_SUCCESS;
575 }
576
577 vboxVBVAExHPProcessorRelease(pCmdVbva);
578 return VINF_EOF;
579 }
580 if (rc == VERR_SEM_BUSY)
581 return VINF_ALREADY_INITIALIZED;
582 return VERR_INVALID_STATE;
583}
584
585static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
586{
587 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
588 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
589 if (RT_SUCCESS(rc))
590 {
591# ifndef VBOXVDBG_MEMCACHE_DISABLE
592 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
593 0, /* size_t cbAlignment */
594 UINT32_MAX, /* uint32_t cMaxObjects */
595 NULL, /* PFNMEMCACHECTOR pfnCtor*/
596 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
597 NULL, /* void *pvUser*/
598 0 /* uint32_t fFlags*/
599 );
600 if (RT_SUCCESS(rc))
601# endif
602 {
603 RTListInit(&pCmdVbva->GuestCtlList);
604 RTListInit(&pCmdVbva->HostCtlList);
605 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
606 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
607 return VINF_SUCCESS;
608 }
609# ifndef VBOXVDBG_MEMCACHE_DISABLE
610 else
611 WARN(("RTMemCacheCreate failed %d\n", rc));
612# endif
613 }
614 else
615 WARN(("RTCritSectInit failed %d\n", rc));
616
617 return rc;
618}
619
620DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
621{
622 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
623}
624
625DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
626{
627 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
628}
629
630static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
631{
632 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
633 {
634 WARN(("VBVAEx is enabled already\n"));
635 return VERR_INVALID_STATE;
636 }
637
638 pCmdVbva->pVBVA = pVBVA;
639 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
640 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
641 return VINF_SUCCESS;
642}
643
644static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
645{
646 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
647 return VINF_SUCCESS;
648
649 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
650 return VINF_SUCCESS;
651}
652
653static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
654{
655 /* ensure the processor is stopped */
656 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
657
658 /* ensure no one tries to submit the command */
659 if (pCmdVbva->pVBVA)
660 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
661
662 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
663 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
664
665 RTCritSectDelete(&pCmdVbva->CltCritSect);
666
667# ifndef VBOXVDBG_MEMCACHE_DISABLE
668 RTMemCacheDestroy(pCmdVbva->CtlCache);
669# endif
670
671 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
672}
673
674static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
675{
676 RT_NOREF(pCmdVbva);
677 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
678 AssertRCReturn(rc, rc);
679 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
680 AssertRCReturn(rc, rc);
681 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
682 AssertRCReturn(rc, rc);
683
684 return VINF_SUCCESS;
685}
686
687static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
688{
689 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
690 {
691 WARN(("vbva not paused\n"));
692 return VERR_INVALID_STATE;
693 }
694
695 VBVAEXHOSTCTL* pCtl;
696 int rc;
697 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
698 {
699 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
700 AssertRCReturn(rc, rc);
701 }
702
703 rc = SSMR3PutU32(pSSM, 0);
704 AssertRCReturn(rc, rc);
705
706 return VINF_SUCCESS;
707}
708
709
710/** Saves state
711 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
712 */
713static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
714{
715 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
716 if (RT_FAILURE(rc))
717 {
718 WARN(("RTCritSectEnter failed %d\n", rc));
719 return rc;
720 }
721
722 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
723 if (RT_FAILURE(rc))
724 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
725
726 RTCritSectLeave(&pCmdVbva->CltCritSect);
727
728 return rc;
729}
730
731static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
732{
733 RT_NOREF(u32Version);
734 uint32_t u32;
735 int rc = SSMR3GetU32(pSSM, &u32);
736 AssertLogRelRCReturn(rc, rc);
737
738 if (!u32)
739 return VINF_EOF;
740
741 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
742 if (!pHCtl)
743 {
744 WARN(("VBoxVBVAExHCtlCreate failed\n"));
745 return VERR_NO_MEMORY;
746 }
747
748 rc = SSMR3GetU32(pSSM, &u32);
749 AssertLogRelRCReturn(rc, rc);
750 pHCtl->u.cmd.cbCmd = u32;
751
752 rc = SSMR3GetU32(pSSM, &u32);
753 AssertLogRelRCReturn(rc, rc);
754 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
755
756 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
757 ++pCmdVbva->u32cCtls;
758
759 return VINF_SUCCESS;
760}
761
762
763static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
764{
765 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
766 {
767 WARN(("vbva not stopped\n"));
768 return VERR_INVALID_STATE;
769 }
770
771 int rc;
772
773 do {
774 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
775 AssertLogRelRCReturn(rc, rc);
776 } while (VINF_EOF != rc);
777
778 return VINF_SUCCESS;
779}
780
781/** Loads state
782 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
783 */
784static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
785{
786 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
787 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
788 if (RT_FAILURE(rc))
789 {
790 WARN(("RTCritSectEnter failed %d\n", rc));
791 return rc;
792 }
793
794 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
795 if (RT_FAILURE(rc))
796 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
797
798 RTCritSectLeave(&pCmdVbva->CltCritSect);
799
800 return rc;
801}
802
803typedef enum
804{
805 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
806 VBVAEXHOSTCTL_SOURCE_HOST
807} VBVAEXHOSTCTL_SOURCE;
808
809
810static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
811{
812 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
813 {
814 Log(("cmd vbva not enabled\n"));
815 return VERR_INVALID_STATE;
816 }
817
818 pCtl->pfnComplete = pfnComplete;
819 pCtl->pvComplete = pvComplete;
820
821 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
822 if (RT_SUCCESS(rc))
823 {
824 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
825 {
826 Log(("cmd vbva not enabled\n"));
827 RTCritSectLeave(&pCmdVbva->CltCritSect);
828 return VERR_INVALID_STATE;
829 }
830
831 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
832 {
833 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
834 }
835 else
836 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
837
838 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
839
840 RTCritSectLeave(&pCmdVbva->CltCritSect);
841
842 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
843 }
844 else
845 WARN(("RTCritSectEnter failed %d\n", rc));
846
847 return rc;
848}
849
850void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
851{
852 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
853 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
854 void *pvChanged = pThread->pvChanged;
855
856 pThread->pfnChanged = NULL;
857 pThread->pvChanged = NULL;
858
859 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
860
861 if (pfnChanged)
862 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
863}
864
865void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
866{
867 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
868 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
869 void *pvChanged = pThread->pvChanged;
870
871 pThread->pfnChanged = NULL;
872 pThread->pvChanged = NULL;
873
874 if (pfnChanged)
875 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
876}
877
878DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
879{
880 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
881}
882
883void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
884{
885 memset(pThread, 0, sizeof (*pThread));
886 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
887}
888
889int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
890{
891 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
892 switch (u32State)
893 {
894 case VBOXVDMATHREAD_STATE_TERMINATED:
895 return VINF_SUCCESS;
896 case VBOXVDMATHREAD_STATE_TERMINATING:
897 {
898 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
899 if (!RT_SUCCESS(rc))
900 {
901 WARN(("RTThreadWait failed %d\n", rc));
902 return rc;
903 }
904
905 RTSemEventDestroy(pThread->hEvent);
906
907 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
908 return VINF_SUCCESS;
909 }
910 default:
911 WARN(("invalid state"));
912 return VERR_INVALID_STATE;
913 }
914}
915
916int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
917{
918 int rc = VBoxVDMAThreadCleanup(pThread);
919 if (RT_FAILURE(rc))
920 {
921 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
922 return rc;
923 }
924
925 rc = RTSemEventCreate(&pThread->hEvent);
926 if (RT_SUCCESS(rc))
927 {
928 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
929 pThread->pfnChanged = pfnCreated;
930 pThread->pvChanged = pvCreated;
931 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
932 if (RT_SUCCESS(rc))
933 return VINF_SUCCESS;
934 else
935 WARN(("RTThreadCreate failed %d\n", rc));
936
937 RTSemEventDestroy(pThread->hEvent);
938 }
939 else
940 WARN(("RTSemEventCreate failed %d\n", rc));
941
942 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
943
944 return rc;
945}
946
947DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
948{
949 int rc = RTSemEventSignal(pThread->hEvent);
950 AssertRC(rc);
951 return rc;
952}
953
954DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
955{
956 int rc = RTSemEventWait(pThread->hEvent, cMillies);
957 AssertRC(rc);
958 return rc;
959}
960
961int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
962{
963 int rc;
964 do
965 {
966 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
967 switch (u32State)
968 {
969 case VBOXVDMATHREAD_STATE_CREATED:
970 pThread->pfnChanged = pfnTerminated;
971 pThread->pvChanged = pvTerminated;
972 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
973 if (fNotify)
974 {
975 rc = VBoxVDMAThreadEventNotify(pThread);
976 AssertRC(rc);
977 }
978 return VINF_SUCCESS;
979 case VBOXVDMATHREAD_STATE_TERMINATING:
980 case VBOXVDMATHREAD_STATE_TERMINATED:
981 {
982 WARN(("thread is marked to termination or terminated\nn"));
983 return VERR_INVALID_STATE;
984 }
985 case VBOXVDMATHREAD_STATE_CREATING:
986 {
987 /* wait till the thread creation is completed */
988 WARN(("concurrent thread create/destron\n"));
989 RTThreadYield();
990 continue;
991 }
992 default:
993 WARN(("invalid state"));
994 return VERR_INVALID_STATE;
995 }
996 } while (1);
997
998 WARN(("should never be here\n"));
999 return VERR_INTERNAL_ERROR;
1000}
1001
1002static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
1003
1004typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1005typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1006
1007typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1008{
1009 uint32_t cRefs;
1010 int32_t rc;
1011 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1012 void *pvCompletion;
1013 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1014} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1015
1016# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
1017
1018static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1019{
1020 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1021 Assert(pHdr);
1022 if (pHdr)
1023 {
1024 pHdr->cRefs = 1;
1025 pHdr->rc = VERR_NOT_IMPLEMENTED;
1026 pHdr->Cmd.enmType = enmCmd;
1027 pHdr->Cmd.cbCmd = cbCmd;
1028 return &pHdr->Cmd;
1029 }
1030
1031 return NULL;
1032}
1033
1034DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1035{
1036 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1037 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1038 if (!cRefs)
1039 RTMemFree(pHdr);
1040}
1041
1042#if 0 /* unused */
1043DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1044{
1045 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1046 ASMAtomicIncU32(&pHdr->cRefs);
1047}
1048#endif /* unused */
1049
1050DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1051{
1052 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1053 return pHdr->rc;
1054}
1055
1056static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1057{
1058 RT_NOREF(pVGAState, pCmd);
1059 RTSemEventSignal((RTSEMEVENT)pvContext);
1060}
1061
1062# if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
1063static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1064{
1065 RT_NOREF(pVGAState, pvContext);
1066 vboxVDMACrCtlRelease(pCmd);
1067}
1068# endif
1069
1070static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1071{
1072 if ( pVGAState->pDrv
1073 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1074 {
1075 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1076 pHdr->pfnCompletion = pfnCompletion;
1077 pHdr->pvCompletion = pvCompletion;
1078 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1079 return VINF_SUCCESS;
1080 }
1081# ifdef DEBUG_misha
1082 Assert(0);
1083# endif
1084 return VERR_NOT_SUPPORTED;
1085}
1086
1087static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1088{
1089 RTSEMEVENT hComplEvent;
1090 int rc = RTSemEventCreate(&hComplEvent);
1091 AssertRC(rc);
1092 if (RT_SUCCESS(rc))
1093 {
1094 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1095# ifdef DEBUG_misha
1096 AssertRC(rc);
1097# endif
1098 if (RT_SUCCESS(rc))
1099 {
1100 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1101 AssertRC(rc);
1102 if (RT_SUCCESS(rc))
1103 {
1104 RTSemEventDestroy(hComplEvent);
1105 }
1106 }
1107 else
1108 {
1109 /* the command is completed */
1110 RTSemEventDestroy(hComplEvent);
1111 }
1112 }
1113 return rc;
1114}
1115
1116typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1117{
1118 int rc;
1119 RTSEMEVENT hEvent;
1120} VDMA_VBVA_CTL_CYNC_COMPLETION;
1121
1122static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1123{
1124 RT_NOREF(pCmd, cbCmd);
1125 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1126 pData->rc = rc;
1127 rc = RTSemEventSignal(pData->hEvent);
1128 if (!RT_SUCCESS(rc))
1129 WARN(("RTSemEventSignal failed %d\n", rc));
1130}
1131
1132static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1133{
1134 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1135 Data.rc = VERR_NOT_IMPLEMENTED;
1136 int rc = RTSemEventCreate(&Data.hEvent);
1137 if (!RT_SUCCESS(rc))
1138 {
1139 WARN(("RTSemEventCreate failed %d\n", rc));
1140 return rc;
1141 }
1142
1143 pCtl->CalloutList.List.pNext = NULL;
1144
1145 PVGASTATE pVGAState = pVdma->pVGAState;
1146 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1147 if (RT_SUCCESS(rc))
1148 {
1149 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1150 if (RT_SUCCESS(rc))
1151 {
1152 rc = Data.rc;
1153 if (!RT_SUCCESS(rc))
1154 {
1155 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1156 }
1157
1158 }
1159 else
1160 WARN(("RTSemEventWait failed %d\n", rc));
1161 }
1162 else
1163 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1164
1165
1166 RTSemEventDestroy(Data.hEvent);
1167
1168 return rc;
1169}
1170
1171static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1172{
1173 VBVAEXHOSTCTL HCtl;
1174 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1175 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1176 if (RT_FAILURE(rc))
1177 {
1178 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1179 return rc;
1180 }
1181
1182 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1183
1184 return VINF_SUCCESS;
1185}
1186
1187static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1188{
1189 struct VBOXVDMAHOST *pVdma = hClient;
1190 if (!pVdma->pCurRemainingHostCtl)
1191 {
1192 /* disable VBVA, all subsequent host commands will go HGCM way */
1193 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1194 }
1195 else
1196 {
1197 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1198 }
1199
1200 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1201 if (pVdma->pCurRemainingHostCtl)
1202 {
1203 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1204 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1205 }
1206
1207 *pcbCtl = 0;
1208 return NULL;
1209}
1210
1211static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1212{
1213# ifdef VBOX_STRICT
1214 struct VBOXVDMAHOST *pVdma = hClient;
1215 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1216 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1217# else
1218 RT_NOREF(hClient);
1219# endif
1220}
1221
1222static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1223{
1224 struct VBOXVDMAHOST *pVdma = hClient;
1225 VBVAEXHOSTCTL HCtl;
1226 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1227 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1228
1229 pHgcmEnableData->hRHCmd = pVdma;
1230 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1231
1232 if (RT_FAILURE(rc))
1233 {
1234 if (rc == VERR_INVALID_STATE)
1235 rc = VINF_SUCCESS;
1236 else
1237 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1238 }
1239
1240 return rc;
1241}
1242
1243static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1244{
1245 VBOXCRCMDCTL_ENABLE Enable;
1246 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1247 Enable.Data.hRHCmd = pVdma;
1248 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1249
1250 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1251 Assert(!pVdma->pCurRemainingHostCtl);
1252 if (RT_SUCCESS(rc))
1253 {
1254 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1255 return VINF_SUCCESS;
1256 }
1257
1258 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1259 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1260
1261 return rc;
1262}
1263
1264static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1265{
1266 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1267 {
1268 WARN(("vdma VBVA is already enabled\n"));
1269 return VERR_INVALID_STATE;
1270 }
1271
1272 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1273 if (!pVBVA)
1274 {
1275 WARN(("invalid offset %d\n", u32Offset));
1276 return VERR_INVALID_PARAMETER;
1277 }
1278
1279 if (!pVdma->CrSrvInfo.pfnEnable)
1280 {
1281# ifdef DEBUG_misha
1282 WARN(("pfnEnable is NULL\n"));
1283 return VERR_NOT_SUPPORTED;
1284# endif
1285 }
1286
1287 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1288 if (RT_SUCCESS(rc))
1289 {
1290 VBOXCRCMDCTL_DISABLE Disable;
1291 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1292 Disable.Data.hNotifyTerm = pVdma;
1293 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1294 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1295 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1296 if (RT_SUCCESS(rc))
1297 {
1298 PVGASTATE pVGAState = pVdma->pVGAState;
1299 VBOXCRCMD_SVRENABLE_INFO Info;
1300 Info.hCltScr = pVGAState->pDrv;
1301 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1302 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1303 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1304 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1305 if (RT_SUCCESS(rc))
1306 return VINF_SUCCESS;
1307 else
1308 WARN(("pfnEnable failed %d\n", rc));
1309
1310 vboxVDMACrHgcmHandleEnable(pVdma);
1311 }
1312 else
1313 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1314
1315 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1316 }
1317 else
1318 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1319
1320 return rc;
1321}
1322
1323static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1324{
1325 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1326 {
1327 Log(("vdma VBVA is already disabled\n"));
1328 return VINF_SUCCESS;
1329 }
1330
1331 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1332 if (RT_SUCCESS(rc))
1333 {
1334 if (fDoHgcmEnable)
1335 {
1336 PVGASTATE pVGAState = pVdma->pVGAState;
1337
1338 /* disable is a bit tricky
1339 * we need to ensure the host ctl commands do not come out of order
1340 * and do not come over HGCM channel until after it is enabled */
1341 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1342 if (RT_SUCCESS(rc))
1343 {
1344 vdmaVBVANotifyDisable(pVGAState);
1345 return VINF_SUCCESS;
1346 }
1347
1348 VBOXCRCMD_SVRENABLE_INFO Info;
1349 Info.hCltScr = pVGAState->pDrv;
1350 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1351 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1352 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1353 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1354 }
1355 }
1356 else
1357 WARN(("pfnDisable failed %d\n", rc));
1358
1359 return rc;
1360}
1361
1362static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1363{
1364 *pfContinue = true;
1365
1366 switch (pCmd->enmType)
1367 {
1368 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1369 {
1370 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1371 {
1372 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1373 return VERR_INVALID_STATE;
1374 }
1375 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1376 }
1377 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1378 {
1379 int rc = vdmaVBVADisableProcess(pVdma, true);
1380 if (RT_FAILURE(rc))
1381 {
1382 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1383 return rc;
1384 }
1385
1386 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1387 }
1388 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1389 {
1390 int rc = vdmaVBVADisableProcess(pVdma, false);
1391 if (RT_FAILURE(rc))
1392 {
1393 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1394 return rc;
1395 }
1396
1397 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1398 if (RT_FAILURE(rc))
1399 {
1400 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1401 return rc;
1402 }
1403
1404 *pfContinue = false;
1405 return VINF_SUCCESS;
1406 }
1407 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1408 {
1409 PVGASTATE pVGAState = pVdma->pVGAState;
1410 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1411 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1412 if (RT_FAILURE(rc))
1413 {
1414 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1415 return rc;
1416 }
1417 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1418
1419 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1420 }
1421 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1422 {
1423 PVGASTATE pVGAState = pVdma->pVGAState;
1424 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1425
1426 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1427 if (RT_FAILURE(rc))
1428 {
1429 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1430 return rc;
1431 }
1432
1433 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1434 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1435 if (RT_FAILURE(rc))
1436 {
1437 WARN(("pfnLoadState failed %d\n", rc));
1438 return rc;
1439 }
1440
1441 return VINF_SUCCESS;
1442 }
1443 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1444 {
1445 PVGASTATE pVGAState = pVdma->pVGAState;
1446
1447 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1448 {
1449 VBVAINFOSCREEN CurScreen;
1450 VBVAINFOVIEW CurView;
1451
1452 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1453 if (RT_FAILURE(rc))
1454 {
1455 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1456 return rc;
1457 }
1458
1459 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1460 if (RT_FAILURE(rc))
1461 {
1462 WARN(("VBVAInfoScreen failed %d\n", rc));
1463 return rc;
1464 }
1465 }
1466
1467 return VINF_SUCCESS;
1468 }
1469 default:
1470 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1471 return VERR_INVALID_PARAMETER;
1472 }
1473}
1474
1475static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1476{
1477 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1478 const bool fDisabled = RT_BOOL(pScreen->u16Flags & VBVA_SCREEN_F_DISABLED);
1479
1480 if (fDisabled)
1481 {
1482 if ( u32ViewIndex < pVGAState->cMonitors
1483 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1484 {
1485 RT_ZERO(*pScreen);
1486 pScreen->u32ViewIndex = u32ViewIndex;
1487 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1488 return VINF_SUCCESS;
1489 }
1490 }
1491 else
1492 {
1493 if ( u32ViewIndex < pVGAState->cMonitors
1494 && pScreen->u16BitsPerPixel <= 32
1495 && pScreen->u32Width <= UINT16_MAX
1496 && pScreen->u32Height <= UINT16_MAX
1497 && pScreen->u32LineSize <= UINT16_MAX * 4)
1498 {
1499 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1500 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1501 {
1502 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1503 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1504 && u64ScreenSize <= pVGAState->vram_size
1505 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1506 {
1507 return VINF_SUCCESS;
1508 }
1509 }
1510 }
1511 }
1512
1513 return VERR_INVALID_PARAMETER;
1514}
1515
1516static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1517{
1518 PVGASTATE pVGAState = pVdma->pVGAState;
1519 VBVAINFOSCREEN Screen = pEntry->Screen;
1520
1521 /* Verify and cleanup local copy of the input data. */
1522 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1523 if (RT_FAILURE(rc))
1524 {
1525 WARN(("invalid screen data\n"));
1526 return rc;
1527 }
1528
1529 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1530 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1531 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1532
1533 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1534 if (RT_FAILURE(rc))
1535 {
1536 WARN(("pfnResize failed %d\n", rc));
1537 return rc;
1538 }
1539
1540 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1541 VBVAINFOVIEW View;
1542 View.u32ViewOffset = 0;
1543 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1544 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1545
1546 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1547
1548 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1549 i >= 0;
1550 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1551 {
1552 Screen.u32ViewIndex = i;
1553
1554 VBVAINFOSCREEN CurScreen;
1555 VBVAINFOVIEW CurView;
1556
1557 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1558 AssertRC(rc);
1559
1560 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1561 continue;
1562
1563 if (!fDisable || !CurView.u32ViewSize)
1564 {
1565 View.u32ViewIndex = Screen.u32ViewIndex;
1566
1567 rc = VBVAInfoView(pVGAState, &View);
1568 if (RT_FAILURE(rc))
1569 {
1570 WARN(("VBVAInfoView failed %d\n", rc));
1571 break;
1572 }
1573 }
1574
1575 rc = VBVAInfoScreen(pVGAState, &Screen);
1576 if (RT_FAILURE(rc))
1577 {
1578 WARN(("VBVAInfoScreen failed %d\n", rc));
1579 break;
1580 }
1581 }
1582
1583 return rc;
1584}
1585
1586static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1587{
1588 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1589 switch (enmType)
1590 {
1591 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1592 {
1593 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1594 {
1595 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1596 return VERR_INVALID_STATE;
1597 }
1598 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1599 }
1600 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1601 {
1602 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1603 {
1604 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1605 return VERR_INVALID_STATE;
1606 }
1607
1608 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1609
1610 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1611 {
1612 WARN(("invalid buffer size\n"));
1613 return VERR_INVALID_PARAMETER;
1614 }
1615
1616 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1617 if (!cElements)
1618 {
1619 WARN(("invalid buffer size\n"));
1620 return VERR_INVALID_PARAMETER;
1621 }
1622
1623 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1624
1625 int rc = VINF_SUCCESS;
1626
1627 for (uint32_t i = 0; i < cElements; ++i)
1628 {
1629 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1630 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1631 if (RT_FAILURE(rc))
1632 {
1633 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1634 break;
1635 }
1636 }
1637 return rc;
1638 }
1639 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1640 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1641 {
1642 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1643 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1644 uint32_t u32Offset = pEnable->u32Offset;
1645 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1646 if (!RT_SUCCESS(rc))
1647 {
1648 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1649 return rc;
1650 }
1651
1652 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1653 {
1654 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1655 if (!RT_SUCCESS(rc))
1656 {
1657 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1658 return rc;
1659 }
1660 }
1661
1662 return VINF_SUCCESS;
1663 }
1664 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1665 {
1666 int rc = vdmaVBVADisableProcess(pVdma, true);
1667 if (RT_FAILURE(rc))
1668 {
1669 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1670 return rc;
1671 }
1672
1673 /* do vgaUpdateDisplayAll right away */
1674 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1675 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1676
1677 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1678 }
1679 default:
1680 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1681 return VERR_INVALID_PARAMETER;
1682 }
1683}
1684
1685/**
1686 * @param fIn - whether this is a page in or out op.
1687 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1688 */
1689static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1690{
1691 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1692 PGMPAGEMAPLOCK Lock;
1693 int rc;
1694
1695 if (fIn)
1696 {
1697 const void * pvPage;
1698 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1699 if (!RT_SUCCESS(rc))
1700 {
1701 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1702 return rc;
1703 }
1704
1705 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1706
1707 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1708 }
1709 else
1710 {
1711 void * pvPage;
1712 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1713 if (!RT_SUCCESS(rc))
1714 {
1715 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1716 return rc;
1717 }
1718
1719 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1720
1721 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1722 }
1723
1724 return VINF_SUCCESS;
1725}
1726
1727static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1728{
1729 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1730 {
1731 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1732 if (!RT_SUCCESS(rc))
1733 {
1734 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1735 return rc;
1736 }
1737 }
1738
1739 return VINF_SUCCESS;
1740}
1741
1742static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1743 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1744 uint8_t **ppu8Vram, bool *pfIn)
1745{
1746 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1747 {
1748 WARN(("cmd too small"));
1749 return -1;
1750 }
1751
1752 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1753 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1754 {
1755 WARN(("invalid cmd size"));
1756 return -1;
1757 }
1758 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1759
1760 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1761 if (offVRAM & PAGE_OFFSET_MASK)
1762 {
1763 WARN(("offVRAM address is not on page boundary\n"));
1764 return -1;
1765 }
1766 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1767
1768 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1769 if (offVRAM >= pVGAState->vram_size)
1770 {
1771 WARN(("invalid vram offset"));
1772 return -1;
1773 }
1774
1775 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1776 {
1777 WARN(("invalid cPages %d", cPages));
1778 return -1;
1779 }
1780
1781 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1782 {
1783 WARN(("invalid cPages %d, exceeding vram size", cPages));
1784 return -1;
1785 }
1786
1787 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1788 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1789
1790 *ppPages = pPages;
1791 *pcPages = cPages;
1792 *ppu8Vram = pu8Vram;
1793 *pfIn = fIn;
1794 return 0;
1795}
1796
1797static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1798{
1799 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1800 if (offVRAM & PAGE_OFFSET_MASK)
1801 {
1802 WARN(("offVRAM address is not on page boundary\n"));
1803 return -1;
1804 }
1805
1806 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1807 if (offVRAM >= pVGAState->vram_size)
1808 {
1809 WARN(("invalid vram offset"));
1810 return -1;
1811 }
1812
1813 uint32_t cbFill = pFill->u32CbFill;
1814
1815 if (offVRAM + cbFill >= pVGAState->vram_size)
1816 {
1817 WARN(("invalid cPages"));
1818 return -1;
1819 }
1820
1821 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1822 uint32_t u32Color = pFill->u32Pattern;
1823
1824 Assert(!(cbFill % 4));
1825 for (uint32_t i = 0; i < cbFill / 4; ++i)
1826 {
1827 pu32Vram[i] = u32Color;
1828 }
1829
1830 return 0;
1831}
1832
1833static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1834{
1835 switch (pCmd->u8OpCode)
1836 {
1837 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1838 return 0;
1839 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1840 {
1841 PVGASTATE pVGAState = pVdma->pVGAState;
1842 const VBOXCMDVBVAPAGEIDX *pPages;
1843 uint32_t cPages;
1844 uint8_t *pu8Vram;
1845 bool fIn;
1846 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1847 &pPages, &cPages,
1848 &pu8Vram, &fIn);
1849 if (i8Result < 0)
1850 {
1851 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1852 return i8Result;
1853 }
1854
1855 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1856 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1857 if (!RT_SUCCESS(rc))
1858 {
1859 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1860 return -1;
1861 }
1862
1863 return 0;
1864 }
1865 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1866 {
1867 PVGASTATE pVGAState = pVdma->pVGAState;
1868 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1869 {
1870 WARN(("cmd too small"));
1871 return -1;
1872 }
1873
1874 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1875 }
1876 default:
1877 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1878 }
1879}
1880
1881# if 0
1882typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1883{
1884 VBOXCMDVBVA_HDR Hdr;
1885 /* for now can only contain offVRAM.
1886 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1887 VBOXCMDVBVA_ALLOCINFO Alloc;
1888 uint32_t u32Reserved;
1889 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1890} VBOXCMDVBVA_PAGING_TRANSFER;
1891# endif
1892
1893AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1894AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1895AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1896AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1897
1898# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1899
1900static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1901{
1902 switch (pCmd->u8OpCode)
1903 {
1904 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1905 {
1906 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1907 {
1908 WARN(("invalid command size"));
1909 return -1;
1910 }
1911 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1912 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1913 uint32_t cbRealCmd = pCmd->u8Flags;
1914 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1915 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1916 {
1917 WARN(("invalid sysmem cmd size"));
1918 return -1;
1919 }
1920
1921 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1922
1923 PGMPAGEMAPLOCK Lock;
1924 PVGASTATE pVGAState = pVdma->pVGAState;
1925 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1926 const void * pvCmd;
1927 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1928 if (!RT_SUCCESS(rc))
1929 {
1930 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1931 return -1;
1932 }
1933
1934 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1935
1936 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1937
1938 if (cbRealCmd <= cbCmdPart)
1939 {
1940 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1941 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1942 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1943 return i8Result;
1944 }
1945
1946 VBOXCMDVBVA_HDR Hdr;
1947 const void *pvCurCmdTail;
1948 uint32_t cbCurCmdTail;
1949 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1950 {
1951 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1952 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1953 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1954 }
1955 else
1956 {
1957 memcpy(&Hdr, pvCmd, cbCmdPart);
1958 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1959 phCmd += cbCmdPart;
1960 Assert(!(phCmd & PAGE_OFFSET_MASK));
1961 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1962 if (!RT_SUCCESS(rc))
1963 {
1964 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1965 return -1;
1966 }
1967
1968 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1969 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1970 pRealCmdHdr = &Hdr;
1971 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1972 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1973 }
1974
1975 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1976 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1977
1978 int8_t i8Result = 0;
1979
1980 switch (pRealCmdHdr->u8OpCode)
1981 {
1982 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1983 {
1984 const uint32_t *pPages;
1985 uint32_t cPages;
1986 uint8_t *pu8Vram;
1987 bool fIn;
1988 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1989 &pPages, &cPages,
1990 &pu8Vram, &fIn);
1991 if (i8Result < 0)
1992 {
1993 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1994 /* we need to break, not return, to ensure currently locked page is released */
1995 break;
1996 }
1997
1998 if (cbCurCmdTail & 3)
1999 {
2000 WARN(("command is not alligned properly %d", cbCurCmdTail));
2001 i8Result = -1;
2002 /* we need to break, not return, to ensure currently locked page is released */
2003 break;
2004 }
2005
2006 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
2007 Assert(cCurPages < cPages);
2008
2009 do
2010 {
2011 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
2012 if (!RT_SUCCESS(rc))
2013 {
2014 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
2015 i8Result = -1;
2016 /* we need to break, not return, to ensure currently locked page is released */
2017 break;
2018 }
2019
2020 Assert(cPages >= cCurPages);
2021 cPages -= cCurPages;
2022
2023 if (!cPages)
2024 break;
2025
2026 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2027
2028 Assert(!(phCmd & PAGE_OFFSET_MASK));
2029
2030 phCmd += PAGE_SIZE;
2031 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
2032
2033 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2034 if (!RT_SUCCESS(rc))
2035 {
2036 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2037 /* the page is not locked, return */
2038 return -1;
2039 }
2040
2041 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2042 if (cCurPages > cPages)
2043 cCurPages = cPages;
2044 } while (1);
2045 break;
2046 }
2047 default:
2048 WARN(("command can not be splitted"));
2049 i8Result = -1;
2050 break;
2051 }
2052
2053 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2054 return i8Result;
2055 }
2056 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2057 {
2058 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2059 ++pCmd;
2060 cbCmd -= sizeof (*pCmd);
2061 uint32_t cbCurCmd = 0;
2062 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2063 {
2064 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2065 {
2066 WARN(("invalid command size"));
2067 return -1;
2068 }
2069
2070 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2071 if (cbCmd < cbCurCmd)
2072 {
2073 WARN(("invalid command size"));
2074 return -1;
2075 }
2076
2077 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2078 if (i8Result < 0)
2079 {
2080 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2081 return i8Result;
2082 }
2083 }
2084 return 0;
2085 }
2086 default:
2087 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2088 }
2089}
2090
2091static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2092{
2093 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2094 return;
2095
2096 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2097 {
2098 WARN(("invalid command size"));
2099 return;
2100 }
2101
2102 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2103
2104 /* check if the command is cancelled */
2105 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2106 {
2107 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2108 return;
2109 }
2110
2111 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2112}
2113
2114static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2115{
2116 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2117 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2118 int rc = VERR_NO_MEMORY;
2119 if (pCmd)
2120 {
2121 PVGASTATE pVGAState = pVdma->pVGAState;
2122 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2123 pCmd->cbVRam = pVGAState->vram_size;
2124 pCmd->pLed = &pVGAState->Led3D;
2125 pCmd->CrClientInfo.hClient = pVdma;
2126 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2127 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2128 if (RT_SUCCESS(rc))
2129 {
2130 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2131 if (RT_SUCCESS(rc))
2132 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2133 else if (rc != VERR_NOT_SUPPORTED)
2134 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2135 }
2136 else
2137 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2138
2139 vboxVDMACrCtlRelease(&pCmd->Hdr);
2140 }
2141
2142 if (!RT_SUCCESS(rc))
2143 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2144
2145 return rc;
2146}
2147
2148static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2149
2150/* check if this is external cmd to be passed to chromium backend */
2151static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2152{
2153 PVBOXVDMACMD pDmaCmd = NULL;
2154 uint32_t cbDmaCmd = 0;
2155 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2156 int rc = VINF_NOT_SUPPORTED;
2157
2158 cbDmaCmd = pCmdDr->cbBuf;
2159
2160 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2161 {
2162 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2163 {
2164 AssertMsgFailed(("invalid buffer data!"));
2165 return VERR_INVALID_PARAMETER;
2166 }
2167
2168 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2169 {
2170 AssertMsgFailed(("invalid command buffer data!"));
2171 return VERR_INVALID_PARAMETER;
2172 }
2173
2174 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2175 }
2176 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2177 {
2178 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2179 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2180 {
2181 AssertMsgFailed(("invalid command buffer data from offset!"));
2182 return VERR_INVALID_PARAMETER;
2183 }
2184 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2185 }
2186
2187 if (pDmaCmd)
2188 {
2189 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2190 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2191
2192 switch (pDmaCmd->enmType)
2193 {
2194 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2195 {
2196 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2197 if (cbBody < sizeof (*pCrCmd))
2198 {
2199 AssertMsgFailed(("invalid chromium command buffer size!"));
2200 return VERR_INVALID_PARAMETER;
2201 }
2202 PVGASTATE pVGAState = pVdma->pVGAState;
2203 rc = VINF_SUCCESS;
2204 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2205 {
2206 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2207 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2208 break;
2209 }
2210 else
2211 {
2212 Assert(0);
2213 }
2214
2215 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2216 AssertRC(tmpRc);
2217 break;
2218 }
2219 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2220 {
2221 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2222 if (cbBody < sizeof (*pTransfer))
2223 {
2224 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2225 return VERR_INVALID_PARAMETER;
2226 }
2227
2228 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2229 AssertRC(rc);
2230 if (RT_SUCCESS(rc))
2231 {
2232 pCmdDr->rc = VINF_SUCCESS;
2233 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2234 AssertRC(rc);
2235 rc = VINF_SUCCESS;
2236 }
2237 break;
2238 }
2239 default:
2240 break;
2241 }
2242 }
2243 return rc;
2244}
2245
2246int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2247{
2248 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2249 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2250 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2251 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2252 AssertRC(rc);
2253 pDr->rc = rc;
2254
2255 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2256 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2257 AssertRC(rc);
2258 return rc;
2259}
2260
2261int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2262{
2263 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2264 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2265 pCmdPrivate->rc = rc;
2266 if (pCmdPrivate->pfnCompletion)
2267 {
2268 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2269 }
2270 return VINF_SUCCESS;
2271}
2272
2273static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2274 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2275 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2276{
2277 RT_NOREF(pVdma);
2278 /* we do not support color conversion */
2279 Assert(pDstDesc->format == pSrcDesc->format);
2280 /* we do not support stretching */
2281 Assert(pDstRectl->height == pSrcRectl->height);
2282 Assert(pDstRectl->width == pSrcRectl->width);
2283 if (pDstDesc->format != pSrcDesc->format)
2284 return VERR_INVALID_FUNCTION;
2285 if (pDstDesc->width == pDstRectl->width
2286 && pSrcDesc->width == pSrcRectl->width
2287 && pSrcDesc->width == pDstDesc->width)
2288 {
2289 Assert(!pDstRectl->left);
2290 Assert(!pSrcRectl->left);
2291 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2292 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2293 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2294 }
2295 else
2296 {
2297 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2298 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2299 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2300 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2301 Assert(cbDstLine <= pDstDesc->pitch);
2302 uint32_t cbDstSkip = pDstDesc->pitch;
2303 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2304
2305 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2306# ifdef VBOX_STRICT
2307 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2308 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2309# endif
2310 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2311 Assert(cbSrcLine <= pSrcDesc->pitch);
2312 uint32_t cbSrcSkip = pSrcDesc->pitch;
2313 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2314
2315 Assert(cbDstLine == cbSrcLine);
2316
2317 for (uint32_t i = 0; ; ++i)
2318 {
2319 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2320 if (i == pDstRectl->height)
2321 break;
2322 pvDstStart += cbDstSkip;
2323 pvSrcStart += cbSrcSkip;
2324 }
2325 }
2326 return VINF_SUCCESS;
2327}
2328
2329static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2330{
2331 if (!pRectl1->width)
2332 *pRectl1 = *pRectl2;
2333 else
2334 {
2335 int16_t x21 = pRectl1->left + pRectl1->width;
2336 int16_t x22 = pRectl2->left + pRectl2->width;
2337 if (pRectl1->left > pRectl2->left)
2338 {
2339 pRectl1->left = pRectl2->left;
2340 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2341 }
2342 else if (x21 < x22)
2343 pRectl1->width = x22 - pRectl1->left;
2344
2345 x21 = pRectl1->top + pRectl1->height;
2346 x22 = pRectl2->top + pRectl2->height;
2347 if (pRectl1->top > pRectl2->top)
2348 {
2349 pRectl1->top = pRectl2->top;
2350 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2351 }
2352 else if (x21 < x22)
2353 pRectl1->height = x22 - pRectl1->top;
2354 }
2355}
2356
2357/*
2358 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2359 */
2360static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2361{
2362 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2363 Assert(cbBlt <= cbBuffer);
2364 if (cbBuffer < cbBlt)
2365 return VERR_INVALID_FUNCTION;
2366
2367 /* we do not support stretching for now */
2368 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2369 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2370 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2371 return VERR_INVALID_FUNCTION;
2372 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2373 return VERR_INVALID_FUNCTION;
2374 Assert(pBlt->cDstSubRects);
2375
2376 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2377 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2378
2379 if (pBlt->cDstSubRects)
2380 {
2381 VBOXVDMA_RECTL dstRectl, srcRectl;
2382 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2383 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2384 {
2385 pDstRectl = &pBlt->aDstSubRects[i];
2386 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2387 {
2388 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2389 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2390 dstRectl.width = pDstRectl->width;
2391 dstRectl.height = pDstRectl->height;
2392 pDstRectl = &dstRectl;
2393 }
2394
2395 pSrcRectl = &pBlt->aDstSubRects[i];
2396 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2397 {
2398 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2399 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2400 srcRectl.width = pSrcRectl->width;
2401 srcRectl.height = pSrcRectl->height;
2402 pSrcRectl = &srcRectl;
2403 }
2404
2405 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2406 &pBlt->dstDesc, &pBlt->srcDesc,
2407 pDstRectl,
2408 pSrcRectl);
2409 AssertRC(rc);
2410 if (!RT_SUCCESS(rc))
2411 return rc;
2412
2413 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2414 }
2415 }
2416 else
2417 {
2418 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2419 &pBlt->dstDesc, &pBlt->srcDesc,
2420 &pBlt->dstRectl,
2421 &pBlt->srcRectl);
2422 AssertRC(rc);
2423 if (!RT_SUCCESS(rc))
2424 return rc;
2425
2426 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2427 }
2428
2429 return cbBlt;
2430}
2431
2432static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2433{
2434 if (cbBuffer < sizeof (*pTransfer))
2435 return VERR_INVALID_PARAMETER;
2436
2437 PVGASTATE pVGAState = pVdma->pVGAState;
2438 uint8_t * pvRam = pVGAState->vram_ptrR3;
2439 PGMPAGEMAPLOCK SrcLock;
2440 PGMPAGEMAPLOCK DstLock;
2441 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2442 const void * pvSrc;
2443 void * pvDst;
2444 int rc = VINF_SUCCESS;
2445 uint32_t cbTransfer = pTransfer->cbTransferSize;
2446 uint32_t cbTransfered = 0;
2447 bool bSrcLocked = false;
2448 bool bDstLocked = false;
2449 do
2450 {
2451 uint32_t cbSubTransfer = cbTransfer;
2452 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2453 {
2454 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2455 }
2456 else
2457 {
2458 RTGCPHYS phPage = pTransfer->Src.phBuf;
2459 phPage += cbTransfered;
2460 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2461 AssertRC(rc);
2462 if (RT_SUCCESS(rc))
2463 {
2464 bSrcLocked = true;
2465 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2466 }
2467 else
2468 {
2469 break;
2470 }
2471 }
2472
2473 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2474 {
2475 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2476 }
2477 else
2478 {
2479 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2480 phPage += cbTransfered;
2481 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2482 AssertRC(rc);
2483 if (RT_SUCCESS(rc))
2484 {
2485 bDstLocked = true;
2486 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2487 }
2488 else
2489 {
2490 break;
2491 }
2492 }
2493
2494 if (RT_SUCCESS(rc))
2495 {
2496 memcpy(pvDst, pvSrc, cbSubTransfer);
2497 cbTransfer -= cbSubTransfer;
2498 cbTransfered += cbSubTransfer;
2499 }
2500 else
2501 {
2502 cbTransfer = 0; /* to break */
2503 }
2504
2505 if (bSrcLocked)
2506 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2507 if (bDstLocked)
2508 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2509 } while (cbTransfer);
2510
2511 if (RT_SUCCESS(rc))
2512 return sizeof (*pTransfer);
2513 return rc;
2514}
2515
2516static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2517{
2518 do
2519 {
2520 Assert(pvBuffer);
2521 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2522
2523 if (!pvBuffer)
2524 return VERR_INVALID_PARAMETER;
2525 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2526 return VERR_INVALID_PARAMETER;
2527
2528 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2529 switch (pCmd->enmType)
2530 {
2531 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2532 {
2533# ifdef VBOXWDDM_TEST_UHGSMI
2534 static int count = 0;
2535 static uint64_t start, end;
2536 if (count==0)
2537 {
2538 start = RTTimeNanoTS();
2539 }
2540 ++count;
2541 if (count==100000)
2542 {
2543 end = RTTimeNanoTS();
2544 float ems = (end-start)/1000000.f;
2545 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2546 }
2547# endif
2548 /** @todo post the buffer to chromium */
2549 return VINF_SUCCESS;
2550 }
2551 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2552 {
2553 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2554 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2555 Assert(cbBlt >= 0);
2556 Assert((uint32_t)cbBlt <= cbBuffer);
2557 if (cbBlt >= 0)
2558 {
2559 if ((uint32_t)cbBlt == cbBuffer)
2560 return VINF_SUCCESS;
2561 else
2562 {
2563 cbBuffer -= (uint32_t)cbBlt;
2564 pvBuffer -= cbBlt;
2565 }
2566 }
2567 else
2568 return cbBlt; /* error */
2569 break;
2570 }
2571 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2572 {
2573 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2574 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2575 Assert(cbTransfer >= 0);
2576 Assert((uint32_t)cbTransfer <= cbBuffer);
2577 if (cbTransfer >= 0)
2578 {
2579 if ((uint32_t)cbTransfer == cbBuffer)
2580 return VINF_SUCCESS;
2581 else
2582 {
2583 cbBuffer -= (uint32_t)cbTransfer;
2584 pvBuffer -= cbTransfer;
2585 }
2586 }
2587 else
2588 return cbTransfer; /* error */
2589 break;
2590 }
2591 case VBOXVDMACMD_TYPE_DMA_NOP:
2592 return VINF_SUCCESS;
2593 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2594 return VINF_SUCCESS;
2595 default:
2596 AssertBreakpoint();
2597 return VERR_INVALID_FUNCTION;
2598 }
2599 } while (1);
2600
2601 /* we should not be here */
2602 AssertBreakpoint();
2603 return VERR_INVALID_STATE;
2604}
2605
2606static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2607{
2608 RT_NOREF(hThreadSelf);
2609 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2610 PVGASTATE pVGAState = pVdma->pVGAState;
2611 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2612 uint8_t *pCmd;
2613 uint32_t cbCmd;
2614 int rc;
2615
2616 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2617
2618 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2619 {
2620 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2621 switch (enmType)
2622 {
2623 case VBVAEXHOST_DATA_TYPE_CMD:
2624 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2625 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2626 VBVARaiseIrq(pVGAState, 0);
2627 break;
2628 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2629 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2630 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2631 break;
2632 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2633 {
2634 bool fContinue = true;
2635 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2636 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2637 if (fContinue)
2638 break;
2639 }
2640 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2641 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2642 AssertRC(rc);
2643 break;
2644 default:
2645 WARN(("unexpected type %d\n", enmType));
2646 break;
2647 }
2648 }
2649
2650 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2651
2652 return VINF_SUCCESS;
2653}
2654
2655static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2656{
2657 RT_NOREF(cbCmd);
2658 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2659 const uint8_t * pvBuf;
2660 PGMPAGEMAPLOCK Lock;
2661 int rc;
2662 bool bReleaseLocked = false;
2663
2664 do
2665 {
2666 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2667
2668 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2669 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2670 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2671 {
2672 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2673 pvBuf = pvRam + pCmd->Location.offVramBuf;
2674 }
2675 else
2676 {
2677 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2678 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2679 Assert(offset + pCmd->cbBuf <= 0x1000);
2680 if (offset + pCmd->cbBuf > 0x1000)
2681 {
2682 /** @todo more advanced mechanism of command buffer proc is actually needed */
2683 rc = VERR_INVALID_PARAMETER;
2684 break;
2685 }
2686
2687 const void * pvPageBuf;
2688 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2689 AssertRC(rc);
2690 if (!RT_SUCCESS(rc))
2691 {
2692 /** @todo if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2693 break;
2694 }
2695
2696 pvBuf = (const uint8_t *)pvPageBuf;
2697 pvBuf += offset;
2698
2699 bReleaseLocked = true;
2700 }
2701
2702 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2703 AssertRC(rc);
2704
2705 if (bReleaseLocked)
2706 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2707 } while (0);
2708
2709 pCmd->rc = rc;
2710
2711 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2712 AssertRC(rc);
2713}
2714
2715# if 0 /** @todo vboxVDMAControlProcess is unused */
2716static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2717{
2718 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2719 pCmd->i32Result = VINF_SUCCESS;
2720 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2721 AssertRC(rc);
2722}
2723# endif
2724
2725#endif /* VBOX_WITH_CRHGSMI */
2726#ifdef VBOX_VDMA_WITH_WATCHDOG
2727
2728static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2729{
2730 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2731 PVGASTATE pVGAState = pVdma->pVGAState;
2732 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2733}
2734
2735static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2736{
2737 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2738 if (cMillis)
2739 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2740 else
2741 TMTimerStop(pVdma->WatchDogTimer);
2742 return VINF_SUCCESS;
2743}
2744
2745#endif /* VBOX_VDMA_WITH_WATCHDOG */
2746
2747int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2748{
2749 RT_NOREF(cPipeElements);
2750 int rc;
2751 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2752 Assert(pVdma);
2753 if (pVdma)
2754 {
2755 pVdma->pHgsmi = pVGAState->pHGSMI;
2756 pVdma->pVGAState = pVGAState;
2757
2758#ifdef VBOX_VDMA_WITH_WATCHDOG
2759 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2760 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2761 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2762 AssertRC(rc);
2763#endif
2764
2765#ifdef VBOX_WITH_CRHGSMI
2766 VBoxVDMAThreadInit(&pVdma->Thread);
2767
2768 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2769 if (RT_SUCCESS(rc))
2770 {
2771 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2772 if (RT_SUCCESS(rc))
2773 {
2774 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2775 if (RT_SUCCESS(rc))
2776 {
2777 pVGAState->pVdma = pVdma;
2778 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2779 return VINF_SUCCESS;
2780 }
2781 WARN(("RTCritSectInit failed %d\n", rc));
2782
2783 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2784 }
2785 else
2786 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2787
2788 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2789 }
2790 else
2791 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2792
2793
2794 RTMemFree(pVdma);
2795#else
2796 pVGAState->pVdma = pVdma;
2797 return VINF_SUCCESS;
2798#endif
2799 }
2800 else
2801 rc = VERR_OUT_OF_RESOURCES;
2802
2803 return rc;
2804}
2805
2806int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2807{
2808#ifdef VBOX_WITH_CRHGSMI
2809 vdmaVBVACtlDisableSync(pVdma);
2810#else
2811 RT_NOREF(pVdma);
2812#endif
2813 return VINF_SUCCESS;
2814}
2815
2816int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2817{
2818 if (!pVdma)
2819 return VINF_SUCCESS;
2820#ifdef VBOX_WITH_CRHGSMI
2821 vdmaVBVACtlDisableSync(pVdma);
2822 VBoxVDMAThreadCleanup(&pVdma->Thread);
2823 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2824 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2825 RTCritSectDelete(&pVdma->CalloutCritSect);
2826#endif
2827 RTMemFree(pVdma);
2828 return VINF_SUCCESS;
2829}
2830
2831void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2832{
2833 RT_NOREF(cbCmd);
2834 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2835
2836 switch (pCmd->enmCtl)
2837 {
2838 case VBOXVDMA_CTL_TYPE_ENABLE:
2839 pCmd->i32Result = VINF_SUCCESS;
2840 break;
2841 case VBOXVDMA_CTL_TYPE_DISABLE:
2842 pCmd->i32Result = VINF_SUCCESS;
2843 break;
2844 case VBOXVDMA_CTL_TYPE_FLUSH:
2845 pCmd->i32Result = VINF_SUCCESS;
2846 break;
2847#ifdef VBOX_VDMA_WITH_WATCHDOG
2848 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2849 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2850 break;
2851#endif
2852 default:
2853 WARN(("cmd not supported"));
2854 pCmd->i32Result = VERR_NOT_SUPPORTED;
2855 }
2856
2857 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2858 AssertRC(rc);
2859}
2860
2861void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2862{
2863 int rc = VERR_NOT_IMPLEMENTED;
2864
2865#ifdef VBOX_WITH_CRHGSMI
2866 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2867 * this is why we process them specially */
2868 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2869 if (rc == VINF_SUCCESS)
2870 return;
2871
2872 if (RT_FAILURE(rc))
2873 {
2874 pCmd->rc = rc;
2875 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2876 AssertRC(rc);
2877 return;
2878 }
2879
2880 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2881#else
2882 RT_NOREF(cbCmd);
2883 pCmd->rc = rc;
2884 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2885 AssertRC(rc);
2886#endif
2887}
2888
2889#ifdef VBOX_WITH_CRHGSMI
2890
2891static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2892
2893static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2894{
2895 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2896 if (RT_SUCCESS(rc))
2897 {
2898 if (rc == VINF_SUCCESS)
2899 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2900 else
2901 Assert(rc == VINF_ALREADY_INITIALIZED);
2902 }
2903 else
2904 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2905
2906 return rc;
2907}
2908
2909static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2910{
2911 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2912 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2913 AssertRC(rc);
2914 pGCtl->i32Result = rc;
2915
2916 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2917 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2918 AssertRC(rc);
2919
2920 VBoxVBVAExHCtlFree(pVbva, pCtl);
2921}
2922
2923static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2924{
2925 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2926 if (!pHCtl)
2927 {
2928 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2929 return VERR_NO_MEMORY;
2930 }
2931
2932 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2933 pHCtl->u.cmd.cbCmd = cbCmd;
2934 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2935 if (RT_FAILURE(rc))
2936 {
2937 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2938 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2939 return rc;;
2940 }
2941 return VINF_SUCCESS;
2942}
2943
2944static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2945{
2946 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2947 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2948 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2949 if (RT_SUCCESS(rc))
2950 return VINF_SUCCESS;
2951
2952 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2953 pCtl->i32Result = rc;
2954 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2955 AssertRC(rc);
2956 return VINF_SUCCESS;
2957}
2958
2959static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2960{
2961 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2962 if (pVboxCtl->u.pfnInternal)
2963 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2964 VBoxVBVAExHCtlFree(pVbva, pCtl);
2965}
2966
2967static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2968 PFNCRCTLCOMPLETION pfnCompletion,
2969 void *pvCompletion)
2970{
2971 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2972 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2973 if (RT_FAILURE(rc))
2974 {
2975 if (rc == VERR_INVALID_STATE)
2976 {
2977 pCmd->u.pfnInternal = NULL;
2978 PVGASTATE pVGAState = pVdma->pVGAState;
2979 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2980 if (!RT_SUCCESS(rc))
2981 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2982
2983 return rc;
2984 }
2985 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2986 return rc;
2987 }
2988
2989 return VINF_SUCCESS;
2990}
2991
2992static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2993{
2994 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2995 {
2996 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2997 if (!RT_SUCCESS(rc))
2998 {
2999 WARN(("pfnVBVAEnable failed %d\n", rc));
3000 for (uint32_t j = 0; j < i; j++)
3001 {
3002 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3003 }
3004
3005 return rc;
3006 }
3007 }
3008 return VINF_SUCCESS;
3009}
3010
3011static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3012{
3013 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3014 {
3015 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
3016 }
3017 return VINF_SUCCESS;
3018}
3019
3020static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3021 void *pvThreadContext, void *pvContext)
3022{
3023 RT_NOREF(pThread);
3024 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3025 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3026
3027 if (RT_SUCCESS(rc))
3028 {
3029 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3030 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3031 if (rc == VINF_SUCCESS)
3032 {
3033 /* we need to inform Main about VBVA enable/disable
3034 * main expects notifications to be done from the main thread
3035 * submit it there */
3036 PVGASTATE pVGAState = pVdma->pVGAState;
3037
3038 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3039 vdmaVBVANotifyEnable(pVGAState);
3040 else
3041 vdmaVBVANotifyDisable(pVGAState);
3042 }
3043 else if (RT_FAILURE(rc))
3044 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3045 }
3046 else
3047 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3048
3049 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3050}
3051
3052static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3053{
3054 int rc;
3055 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3056 if (pHCtl)
3057 {
3058 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3059 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3060 pHCtl->pfnComplete = pfnComplete;
3061 pHCtl->pvComplete = pvComplete;
3062
3063 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3064 if (RT_SUCCESS(rc))
3065 return VINF_SUCCESS;
3066 else
3067 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3068
3069 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3070 }
3071 else
3072 {
3073 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3074 rc = VERR_NO_MEMORY;
3075 }
3076
3077 return rc;
3078}
3079
3080static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3081{
3082 VBVAENABLE Enable = {0};
3083 Enable.u32Flags = VBVA_F_ENABLE;
3084 Enable.u32Offset = offVram;
3085
3086 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3087 Data.rc = VERR_NOT_IMPLEMENTED;
3088 int rc = RTSemEventCreate(&Data.hEvent);
3089 if (!RT_SUCCESS(rc))
3090 {
3091 WARN(("RTSemEventCreate failed %d\n", rc));
3092 return rc;
3093 }
3094
3095 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3096 if (RT_SUCCESS(rc))
3097 {
3098 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3099 if (RT_SUCCESS(rc))
3100 {
3101 rc = Data.rc;
3102 if (!RT_SUCCESS(rc))
3103 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3104 }
3105 else
3106 WARN(("RTSemEventWait failed %d\n", rc));
3107 }
3108 else
3109 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3110
3111 RTSemEventDestroy(Data.hEvent);
3112
3113 return rc;
3114}
3115
3116static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3117{
3118 int rc;
3119 VBVAEXHOSTCTL* pHCtl;
3120 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3121 {
3122 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3123 return VINF_SUCCESS;
3124 }
3125
3126 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3127 if (!pHCtl)
3128 {
3129 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3130 return VERR_NO_MEMORY;
3131 }
3132
3133 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3134 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3135 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3136 if (RT_SUCCESS(rc))
3137 return VINF_SUCCESS;
3138
3139 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3140 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3141 return rc;
3142}
3143
3144static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3145{
3146 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3147 if (fEnable)
3148 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3149 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3150}
3151
3152static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3153{
3154 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3155 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3156 if (RT_SUCCESS(rc))
3157 return VINF_SUCCESS;
3158
3159 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3160 pEnable->Hdr.i32Result = rc;
3161 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3162 AssertRC(rc);
3163 return VINF_SUCCESS;
3164}
3165
3166static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3167 int rc, void *pvContext)
3168{
3169 RT_NOREF(pVbva, pCtl);
3170 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3171 pData->rc = rc;
3172 rc = RTSemEventSignal(pData->hEvent);
3173 if (!RT_SUCCESS(rc))
3174 WARN(("RTSemEventSignal failed %d\n", rc));
3175}
3176
3177static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3178{
3179 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3180 Data.rc = VERR_NOT_IMPLEMENTED;
3181 int rc = RTSemEventCreate(&Data.hEvent);
3182 if (!RT_SUCCESS(rc))
3183 {
3184 WARN(("RTSemEventCreate failed %d\n", rc));
3185 return rc;
3186 }
3187
3188 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3189 if (RT_SUCCESS(rc))
3190 {
3191 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3192 if (RT_SUCCESS(rc))
3193 {
3194 rc = Data.rc;
3195 if (!RT_SUCCESS(rc))
3196 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3197 }
3198 else
3199 WARN(("RTSemEventWait failed %d\n", rc));
3200 }
3201 else
3202 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3203
3204 RTSemEventDestroy(Data.hEvent);
3205
3206 return rc;
3207}
3208
3209static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3210{
3211 VBVAEXHOSTCTL Ctl;
3212 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3213 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3214}
3215
3216static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3217{
3218 VBVAEXHOSTCTL Ctl;
3219 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3220 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3221}
3222
3223static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3224{
3225 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3226 switch (rc)
3227 {
3228 case VINF_SUCCESS:
3229 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3230 case VINF_ALREADY_INITIALIZED:
3231 case VINF_EOF:
3232 case VERR_INVALID_STATE:
3233 return VINF_SUCCESS;
3234 default:
3235 Assert(!RT_FAILURE(rc));
3236 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3237 }
3238}
3239
3240
3241int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3242 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3243 PFNCRCTLCOMPLETION pfnCompletion,
3244 void *pvCompletion)
3245{
3246 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3247 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3248 if (pVdma == NULL)
3249 return VERR_INVALID_STATE;
3250 pCmd->CalloutList.List.pNext = NULL;
3251 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3252}
3253
3254typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3255{
3256 struct VBOXVDMAHOST *pVdma;
3257 uint32_t fProcessing;
3258 int rc;
3259} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3260
3261static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3262{
3263 RT_NOREF(pCmd, cbCmd);
3264 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3265
3266 pData->rc = rc;
3267
3268 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3269
3270 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3271
3272 pData->fProcessing = 0;
3273
3274 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3275}
3276
3277static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3278{
3279 pEntry->pfnCb = pfnCb;
3280 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3281 if (RT_SUCCESS(rc))
3282 {
3283 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3284 RTCritSectLeave(&pVdma->CalloutCritSect);
3285
3286 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3287 }
3288 else
3289 WARN(("RTCritSectEnter failed %d\n", rc));
3290
3291 return rc;
3292}
3293
3294
3295static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3296{
3297 int rc = VINF_SUCCESS;
3298 for (;;)
3299 {
3300 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3301 if (RT_SUCCESS(rc))
3302 {
3303 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3304 if (pEntry)
3305 RTListNodeRemove(&pEntry->Node);
3306 RTCritSectLeave(&pVdma->CalloutCritSect);
3307
3308 if (!pEntry)
3309 break;
3310
3311 pEntry->pfnCb(pEntry);
3312 }
3313 else
3314 {
3315 WARN(("RTCritSectEnter failed %d\n", rc));
3316 break;
3317 }
3318 }
3319
3320 return rc;
3321}
3322
3323DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3324 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3325{
3326 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3327 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3328 if (pVdma == NULL)
3329 return VERR_INVALID_STATE;
3330 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3331 Data.pVdma = pVdma;
3332 Data.fProcessing = 1;
3333 Data.rc = VERR_INTERNAL_ERROR;
3334 RTListInit(&pCmd->CalloutList.List);
3335 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3336 if (!RT_SUCCESS(rc))
3337 {
3338 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3339 return rc;
3340 }
3341
3342 while (Data.fProcessing)
3343 {
3344 /* Poll infrequently to make sure no completed message has been missed. */
3345 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3346
3347 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3348
3349 if (Data.fProcessing)
3350 RTThreadYield();
3351 }
3352
3353 /* extra check callouts */
3354 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3355
3356 /* 'Our' message has been processed, so should reset the semaphore.
3357 * There is still possible that another message has been processed
3358 * and the semaphore has been signalled again.
3359 * Reset only if there are no other messages completed.
3360 */
3361 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3362 Assert(c >= 0);
3363 if (!c)
3364 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3365
3366 rc = Data.rc;
3367 if (!RT_SUCCESS(rc))
3368 WARN(("host call failed %d", rc));
3369
3370 return rc;
3371}
3372
3373int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3374{
3375 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3376 int rc = VINF_SUCCESS;
3377 switch (pCtl->u32Type)
3378 {
3379 case VBOXCMDVBVACTL_TYPE_3DCTL:
3380 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3381 case VBOXCMDVBVACTL_TYPE_RESIZE:
3382 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3383 case VBOXCMDVBVACTL_TYPE_ENABLE:
3384 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3385 {
3386 WARN(("incorrect enable size\n"));
3387 rc = VERR_INVALID_PARAMETER;
3388 break;
3389 }
3390 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3391 default:
3392 WARN(("unsupported type\n"));
3393 rc = VERR_INVALID_PARAMETER;
3394 break;
3395 }
3396
3397 pCtl->i32Result = rc;
3398 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3399 AssertRC(rc);
3400 return VINF_SUCCESS;
3401}
3402
3403int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3404{
3405 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3406 {
3407 WARN(("vdma VBVA is disabled\n"));
3408 return VERR_INVALID_STATE;
3409 }
3410
3411 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3412}
3413
3414int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3415{
3416 WARN(("flush\n"));
3417 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3418 {
3419 WARN(("vdma VBVA is disabled\n"));
3420 return VERR_INVALID_STATE;
3421 }
3422 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3423}
3424
3425void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3426{
3427 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3428 return;
3429 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3430}
3431
3432bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3433{
3434 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3435}
3436
3437#endif /* VBOX_WITH_CRHGSMI */
3438
3439int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3440{
3441#ifdef VBOX_WITH_CRHGSMI
3442 int rc = vdmaVBVAPause(pVdma);
3443 if (RT_SUCCESS(rc))
3444 return VINF_SUCCESS;
3445
3446 if (rc != VERR_INVALID_STATE)
3447 {
3448 WARN(("vdmaVBVAPause failed %d\n", rc));
3449 return rc;
3450 }
3451
3452# ifdef DEBUG_misha
3453 WARN(("debug prep"));
3454# endif
3455
3456 PVGASTATE pVGAState = pVdma->pVGAState;
3457 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3458 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3459 Assert(pCmd);
3460 if (pCmd)
3461 {
3462 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3463 AssertRC(rc);
3464 if (RT_SUCCESS(rc))
3465 {
3466 rc = vboxVDMACrCtlGetRc(pCmd);
3467 }
3468 vboxVDMACrCtlRelease(pCmd);
3469 return rc;
3470 }
3471 return VERR_NO_MEMORY;
3472#else
3473 RT_NOREF(pVdma);
3474 return VINF_SUCCESS;
3475#endif
3476}
3477
3478int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3479{
3480#ifdef VBOX_WITH_CRHGSMI
3481 int rc = vdmaVBVAResume(pVdma);
3482 if (RT_SUCCESS(rc))
3483 return VINF_SUCCESS;
3484
3485 if (rc != VERR_INVALID_STATE)
3486 {
3487 WARN(("vdmaVBVAResume failed %d\n", rc));
3488 return rc;
3489 }
3490
3491# ifdef DEBUG_misha
3492 WARN(("debug done"));
3493# endif
3494
3495 PVGASTATE pVGAState = pVdma->pVGAState;
3496 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3497 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3498 Assert(pCmd);
3499 if (pCmd)
3500 {
3501 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3502 AssertRC(rc);
3503 if (RT_SUCCESS(rc))
3504 {
3505 rc = vboxVDMACrCtlGetRc(pCmd);
3506 }
3507 vboxVDMACrCtlRelease(pCmd);
3508 return rc;
3509 }
3510 return VERR_NO_MEMORY;
3511#else
3512 RT_NOREF(pVdma);
3513 return VINF_SUCCESS;
3514#endif
3515}
3516
3517int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3518{
3519 int rc;
3520#ifndef VBOX_WITH_CRHGSMI
3521 RT_NOREF(pVdma, pSSM);
3522
3523#else
3524 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3525#endif
3526 {
3527 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3528 AssertRCReturn(rc, rc);
3529 return VINF_SUCCESS;
3530 }
3531
3532#ifdef VBOX_WITH_CRHGSMI
3533 PVGASTATE pVGAState = pVdma->pVGAState;
3534 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3535
3536 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3537 AssertRCReturn(rc, rc);
3538
3539 VBVAEXHOSTCTL HCtl;
3540 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3541 HCtl.u.state.pSSM = pSSM;
3542 HCtl.u.state.u32Version = 0;
3543 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3544#endif
3545}
3546
3547int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3548{
3549 uint32_t u32;
3550 int rc = SSMR3GetU32(pSSM, &u32);
3551 AssertLogRelRCReturn(rc, rc);
3552
3553 if (u32 != UINT32_MAX)
3554 {
3555#ifdef VBOX_WITH_CRHGSMI
3556 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3557 AssertLogRelRCReturn(rc, rc);
3558
3559 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3560
3561 VBVAEXHOSTCTL HCtl;
3562 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3563 HCtl.u.state.pSSM = pSSM;
3564 HCtl.u.state.u32Version = u32Version;
3565 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3566 AssertLogRelRCReturn(rc, rc);
3567
3568 rc = vdmaVBVAResume(pVdma);
3569 AssertLogRelRCReturn(rc, rc);
3570
3571 return VINF_SUCCESS;
3572#else
3573 RT_NOREF(pVdma, u32Version);
3574 WARN(("Unsupported VBVACtl info!\n"));
3575 return VERR_VERSION_MISMATCH;
3576#endif
3577 }
3578
3579 return VINF_SUCCESS;
3580}
3581
3582int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3583{
3584#ifdef VBOX_WITH_CRHGSMI
3585 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3586 return VINF_SUCCESS;
3587
3588/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3589 * the purpose of this code is. */
3590 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3591 if (!pHCtl)
3592 {
3593 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3594 return VERR_NO_MEMORY;
3595 }
3596
3597 /* sanity */
3598 pHCtl->u.cmd.pu8Cmd = NULL;
3599 pHCtl->u.cmd.cbCmd = 0;
3600
3601 /* NULL completion will just free the ctl up */
3602 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3603 if (RT_FAILURE(rc))
3604 {
3605 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3606 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3607 return rc;
3608 }
3609#else
3610 RT_NOREF(pVdma);
3611#endif
3612 return VINF_SUCCESS;
3613}
3614
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette