VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 63547

Last change on this file since 63547 was 63478, checked in by vboxsync, 8 years ago

Devices: warnings (clang)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 114.6 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 63478 2016-08-15 14:04:10Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include <VBox/VMMDev.h>
23#include <VBox/vmm/pdmdev.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/VBoxVideo.h>
26#include <iprt/semaphore.h>
27#include <iprt/thread.h>
28#include <iprt/mem.h>
29#include <iprt/asm.h>
30#include <iprt/list.h>
31#include <iprt/param.h>
32
33#include "DevVGA.h"
34#include "HGSMI/SHGSMIHost.h"
35
36#include <VBox/VBoxVideo3D.h>
37#include <VBox/VBoxVideoHost3D.h>
38
39#ifdef DEBUG_misha
40# define VBOXVDBG_MEMCACHE_DISABLE
41#endif
42
43#ifndef VBOXVDBG_MEMCACHE_DISABLE
44# include <iprt/memcache.h>
45#endif
46
47/*********************************************************************************************************************************
48* Defined Constants And Macros *
49*********************************************************************************************************************************/
50#ifdef DEBUG_misha
51# define WARN_BP() do { AssertFailed(); } while (0)
52#else
53# define WARN_BP() do { } while (0)
54#endif
55#define WARN(_msg) do { \
56 LogRel(_msg); \
57 WARN_BP(); \
58 } while (0)
59
60#define VBOXVDMATHREAD_STATE_TERMINATED 0
61#define VBOXVDMATHREAD_STATE_CREATING 1
62#define VBOXVDMATHREAD_STATE_CREATED 3
63#define VBOXVDMATHREAD_STATE_TERMINATING 4
64
65
66/*********************************************************************************************************************************
67* Structures and Typedefs *
68*********************************************************************************************************************************/
69struct VBOXVDMATHREAD;
70
71typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
72
73#ifdef VBOX_WITH_CRHGSMI
74static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
75#endif
76
77
78typedef struct VBOXVDMATHREAD
79{
80 RTTHREAD hWorkerThread;
81 RTSEMEVENT hEvent;
82 volatile uint32_t u32State;
83 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
84 void *pvChanged;
85} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
86
87
88/* state transformations:
89 *
90 * submitter | processor
91 *
92 * LISTENING ---> PROCESSING
93 *
94 * */
95#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
96#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
97
98#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
99#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
100#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
101
102typedef struct VBVAEXHOSTCONTEXT
103{
104 VBVABUFFER *pVBVA;
105 volatile int32_t i32State;
106 volatile int32_t i32EnableState;
107 volatile uint32_t u32cCtls;
108 /* critical section for accessing ctl lists */
109 RTCRITSECT CltCritSect;
110 RTLISTANCHOR GuestCtlList;
111 RTLISTANCHOR HostCtlList;
112#ifndef VBOXVDBG_MEMCACHE_DISABLE
113 RTMEMCACHE CtlCache;
114#endif
115} VBVAEXHOSTCONTEXT;
116
117typedef enum
118{
119 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
120 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
121 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
122 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
123 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
124 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
125 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
126 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
127 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
128 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
129 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
130 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
131 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
132} VBVAEXHOSTCTL_TYPE;
133
134struct VBVAEXHOSTCTL;
135
136typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
137
138typedef struct VBVAEXHOSTCTL
139{
140 RTLISTNODE Node;
141 VBVAEXHOSTCTL_TYPE enmType;
142 union
143 {
144 struct
145 {
146 uint8_t * pu8Cmd;
147 uint32_t cbCmd;
148 } cmd;
149
150 struct
151 {
152 PSSMHANDLE pSSM;
153 uint32_t u32Version;
154 } state;
155 } u;
156 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
157 void *pvComplete;
158} VBVAEXHOSTCTL;
159
160/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
161 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
162 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
163 * see mor edetailed comments in headers for function definitions */
164typedef enum
165{
166 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
167 VBVAEXHOST_DATA_TYPE_CMD,
168 VBVAEXHOST_DATA_TYPE_HOSTCTL,
169 VBVAEXHOST_DATA_TYPE_GUESTCTL
170} VBVAEXHOST_DATA_TYPE;
171
172
173#ifdef VBOX_WITH_CRHGSMI
174typedef struct VBOXVDMA_SOURCE
175{
176 VBVAINFOSCREEN Screen;
177 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
178} VBOXVDMA_SOURCE;
179#endif
180
181typedef struct VBOXVDMAHOST
182{
183 PHGSMIINSTANCE pHgsmi;
184 PVGASTATE pVGAState;
185#ifdef VBOX_WITH_CRHGSMI
186 VBVAEXHOSTCONTEXT CmdVbva;
187 VBOXVDMATHREAD Thread;
188 VBOXCRCMD_SVRINFO CrSrvInfo;
189 VBVAEXHOSTCTL* pCurRemainingHostCtl;
190 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
191 int32_t volatile i32cHostCrCtlCompleted;
192 RTCRITSECT CalloutCritSect;
193// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
194#endif
195#ifdef VBOX_VDMA_WITH_WATCHDOG
196 PTMTIMERR3 WatchDogTimer;
197#endif
198} VBOXVDMAHOST, *PVBOXVDMAHOST;
199
200
201/*********************************************************************************************************************************
202* Internal Functions *
203*********************************************************************************************************************************/
204#ifdef VBOX_WITH_CRHGSMI
205static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
206static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
207
208static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
209static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
210
211/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
212 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
213static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
214
215static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
216static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
217static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
218static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
219static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
220static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
221
222#endif /* VBOX_WITH_CRHGSMI */
223
224
225
226#ifdef VBOX_WITH_CRHGSMI
227
228static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
229{
230# ifndef VBOXVDBG_MEMCACHE_DISABLE
231 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
232# else
233 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
234# endif
235}
236
237static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
238{
239# ifndef VBOXVDBG_MEMCACHE_DISABLE
240 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
241# else
242 RTMemFree(pCtl);
243# endif
244}
245
246static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
247{
248 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
249 if (!pCtl)
250 {
251 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
252 return NULL;
253 }
254
255 pCtl->enmType = enmType;
256 return pCtl;
257}
258
259static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
260{
261 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
262
263 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
264 return VINF_SUCCESS;
265 return VERR_SEM_BUSY;
266}
267
268static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
269{
270 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
271
272 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
273 return NULL;
274
275 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
276 if (RT_SUCCESS(rc))
277 {
278 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
279 if (pCtl)
280 *pfHostCtl = true;
281 else if (!fHostOnlyMode)
282 {
283 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
284 {
285 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
286 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
287 * and there are no HostCtl commands*/
288 Assert(pCtl);
289 *pfHostCtl = false;
290 }
291 }
292
293 if (pCtl)
294 {
295 RTListNodeRemove(&pCtl->Node);
296 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
297 }
298
299 RTCritSectLeave(&pCmdVbva->CltCritSect);
300
301 return pCtl;
302 }
303 else
304 WARN(("RTCritSectEnter failed %d\n", rc));
305
306 return NULL;
307}
308
309static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
310{
311 bool fHostCtl = false;
312 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
313 Assert(!pCtl || fHostCtl);
314 return pCtl;
315}
316
317static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
318{
319 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
320 {
321 WARN(("Invalid state\n"));
322 return VERR_INVALID_STATE;
323 }
324
325 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
326 return VINF_SUCCESS;
327}
328
329static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
330{
331 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
332 {
333 WARN(("Invalid state\n"));
334 return VERR_INVALID_STATE;
335 }
336
337 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
338 return VINF_SUCCESS;
339}
340
341static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
342{
343 switch (pCtl->enmType)
344 {
345 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
346 {
347 VBoxVBVAExHPPause(pCmdVbva);
348 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
349 return true;
350 }
351 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
352 {
353 VBoxVBVAExHPResume(pCmdVbva);
354 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
355 return true;
356 }
357 default:
358 return false;
359 }
360}
361
362static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
363{
364 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
365
366 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
367}
368
369static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
370{
371 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
372 if (pCmdVbva->pVBVA)
373 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
374}
375
376static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
377{
378 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
379 if (pCmdVbva->pVBVA)
380 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
381}
382
383static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
384{
385 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
386 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
387
388 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
389
390 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
391 uint32_t indexRecordFree = pVBVA->indexRecordFree;
392
393 Log(("first = %d, free = %d\n",
394 indexRecordFirst, indexRecordFree));
395
396 if (indexRecordFirst == indexRecordFree)
397 {
398 /* No records to process. Return without assigning output variables. */
399 return VINF_EOF;
400 }
401
402 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
403
404 /* A new record need to be processed. */
405 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
406 {
407 /* the record is being recorded, try again */
408 return VINF_TRY_AGAIN;
409 }
410
411 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
412
413 if (!cbRecord)
414 {
415 /* the record is being recorded, try again */
416 return VINF_TRY_AGAIN;
417 }
418
419 /* we should not get partial commands here actually */
420 Assert(cbRecord);
421
422 /* The size of largest contiguous chunk in the ring biffer. */
423 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
424
425 /* The pointer to data in the ring buffer. */
426 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
427
428 /* Fetch or point the data. */
429 if (u32BytesTillBoundary >= cbRecord)
430 {
431 /* The command does not cross buffer boundary. Return address in the buffer. */
432 *ppCmd = pSrc;
433 *pcbCmd = cbRecord;
434 return VINF_SUCCESS;
435 }
436
437 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
438 return VERR_INVALID_STATE;
439}
440
441static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
442{
443 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
444 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
445
446 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
447}
448
449static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
450{
451 if (pCtl->pfnComplete)
452 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
453 else
454 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
455}
456
457
458static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
459{
460 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
461 VBVAEXHOSTCTL*pCtl;
462 bool fHostClt;
463
464 for (;;)
465 {
466 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
467 if (pCtl)
468 {
469 if (fHostClt)
470 {
471 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
472 {
473 *ppCmd = (uint8_t*)pCtl;
474 *pcbCmd = sizeof (*pCtl);
475 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
476 }
477 continue;
478 }
479 else
480 {
481 *ppCmd = (uint8_t*)pCtl;
482 *pcbCmd = sizeof (*pCtl);
483 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
484 }
485 }
486
487 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
488 return VBVAEXHOST_DATA_TYPE_NO_DATA;
489
490 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
491 switch (rc)
492 {
493 case VINF_SUCCESS:
494 return VBVAEXHOST_DATA_TYPE_CMD;
495 case VINF_EOF:
496 return VBVAEXHOST_DATA_TYPE_NO_DATA;
497 case VINF_TRY_AGAIN:
498 RTThreadSleep(1);
499 continue;
500 default:
501 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
502 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
503 return VBVAEXHOST_DATA_TYPE_NO_DATA;
504 }
505 }
506 /* not reached */
507}
508
509static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
510{
511 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
512 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
513 {
514 vboxVBVAExHPHgEventClear(pCmdVbva);
515 vboxVBVAExHPProcessorRelease(pCmdVbva);
516 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
517 * 1. we check the queue -> and it is empty
518 * 2. submitter adds command to the queue
519 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
520 * 4. we clear the "processing" state
521 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
522 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
523 **/
524 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
525 if (RT_SUCCESS(rc))
526 {
527 /* we are the processor now */
528 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
529 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
530 {
531 vboxVBVAExHPProcessorRelease(pCmdVbva);
532 return VBVAEXHOST_DATA_TYPE_NO_DATA;
533 }
534
535 vboxVBVAExHPHgEventSet(pCmdVbva);
536 }
537 }
538
539 return enmType;
540}
541
542DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
543{
544 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
545
546 if (pVBVA)
547 {
548 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
549 uint32_t indexRecordFree = pVBVA->indexRecordFree;
550
551 if (indexRecordFirst != indexRecordFree)
552 return true;
553 }
554
555 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
556}
557
558/** Checks whether the new commands are ready for processing
559 * @returns
560 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
561 * VINF_EOF - no commands in a queue
562 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
563 * VERR_INVALID_STATE - the VBVA is paused or pausing */
564static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
565{
566 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
567 if (RT_SUCCESS(rc))
568 {
569 /* we are the processor now */
570 if (vboxVBVAExHSHasCommands(pCmdVbva))
571 {
572 vboxVBVAExHPHgEventSet(pCmdVbva);
573 return VINF_SUCCESS;
574 }
575
576 vboxVBVAExHPProcessorRelease(pCmdVbva);
577 return VINF_EOF;
578 }
579 if (rc == VERR_SEM_BUSY)
580 return VINF_ALREADY_INITIALIZED;
581 return VERR_INVALID_STATE;
582}
583
584static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
585{
586 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
587 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
588 if (RT_SUCCESS(rc))
589 {
590# ifndef VBOXVDBG_MEMCACHE_DISABLE
591 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
592 0, /* size_t cbAlignment */
593 UINT32_MAX, /* uint32_t cMaxObjects */
594 NULL, /* PFNMEMCACHECTOR pfnCtor*/
595 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
596 NULL, /* void *pvUser*/
597 0 /* uint32_t fFlags*/
598 );
599 if (RT_SUCCESS(rc))
600# endif
601 {
602 RTListInit(&pCmdVbva->GuestCtlList);
603 RTListInit(&pCmdVbva->HostCtlList);
604 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
605 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
606 return VINF_SUCCESS;
607 }
608# ifndef VBOXVDBG_MEMCACHE_DISABLE
609 else
610 WARN(("RTMemCacheCreate failed %d\n", rc));
611# endif
612 }
613 else
614 WARN(("RTCritSectInit failed %d\n", rc));
615
616 return rc;
617}
618
619DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
620{
621 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
622}
623
624DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
625{
626 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
627}
628
629static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
630{
631 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
632 {
633 WARN(("VBVAEx is enabled already\n"));
634 return VERR_INVALID_STATE;
635 }
636
637 pCmdVbva->pVBVA = pVBVA;
638 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
639 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
640 return VINF_SUCCESS;
641}
642
643static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
644{
645 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
646 return VINF_SUCCESS;
647
648 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
649 return VINF_SUCCESS;
650}
651
652static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
653{
654 /* ensure the processor is stopped */
655 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
656
657 /* ensure no one tries to submit the command */
658 if (pCmdVbva->pVBVA)
659 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
660
661 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
662 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
663
664 RTCritSectDelete(&pCmdVbva->CltCritSect);
665
666# ifndef VBOXVDBG_MEMCACHE_DISABLE
667 RTMemCacheDestroy(pCmdVbva->CtlCache);
668# endif
669
670 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
671}
672
673static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
674{
675 RT_NOREF(pCmdVbva);
676 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
677 AssertRCReturn(rc, rc);
678 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
679 AssertRCReturn(rc, rc);
680 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
681 AssertRCReturn(rc, rc);
682
683 return VINF_SUCCESS;
684}
685
686static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
687{
688 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
689 {
690 WARN(("vbva not paused\n"));
691 return VERR_INVALID_STATE;
692 }
693
694 VBVAEXHOSTCTL* pCtl;
695 int rc;
696 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
697 {
698 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
699 AssertRCReturn(rc, rc);
700 }
701
702 rc = SSMR3PutU32(pSSM, 0);
703 AssertRCReturn(rc, rc);
704
705 return VINF_SUCCESS;
706}
707
708
709/** Saves state
710 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
711 */
712static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
713{
714 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
715 if (RT_FAILURE(rc))
716 {
717 WARN(("RTCritSectEnter failed %d\n", rc));
718 return rc;
719 }
720
721 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
722 if (RT_FAILURE(rc))
723 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
724
725 RTCritSectLeave(&pCmdVbva->CltCritSect);
726
727 return rc;
728}
729
730static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
731{
732 RT_NOREF(u32Version);
733 uint32_t u32;
734 int rc = SSMR3GetU32(pSSM, &u32);
735 AssertLogRelRCReturn(rc, rc);
736
737 if (!u32)
738 return VINF_EOF;
739
740 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
741 if (!pHCtl)
742 {
743 WARN(("VBoxVBVAExHCtlCreate failed\n"));
744 return VERR_NO_MEMORY;
745 }
746
747 rc = SSMR3GetU32(pSSM, &u32);
748 AssertLogRelRCReturn(rc, rc);
749 pHCtl->u.cmd.cbCmd = u32;
750
751 rc = SSMR3GetU32(pSSM, &u32);
752 AssertLogRelRCReturn(rc, rc);
753 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
754
755 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
756 ++pCmdVbva->u32cCtls;
757
758 return VINF_SUCCESS;
759}
760
761
762static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
763{
764 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
765 {
766 WARN(("vbva not stopped\n"));
767 return VERR_INVALID_STATE;
768 }
769
770 int rc;
771
772 do {
773 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
774 AssertLogRelRCReturn(rc, rc);
775 } while (VINF_EOF != rc);
776
777 return VINF_SUCCESS;
778}
779
780/** Loads state
781 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
782 */
783static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
784{
785 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
786 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
787 if (RT_FAILURE(rc))
788 {
789 WARN(("RTCritSectEnter failed %d\n", rc));
790 return rc;
791 }
792
793 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
794 if (RT_FAILURE(rc))
795 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
796
797 RTCritSectLeave(&pCmdVbva->CltCritSect);
798
799 return rc;
800}
801
802typedef enum
803{
804 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
805 VBVAEXHOSTCTL_SOURCE_HOST
806} VBVAEXHOSTCTL_SOURCE;
807
808
809static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
810{
811 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
812 {
813 Log(("cmd vbva not enabled\n"));
814 return VERR_INVALID_STATE;
815 }
816
817 pCtl->pfnComplete = pfnComplete;
818 pCtl->pvComplete = pvComplete;
819
820 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
821 if (RT_SUCCESS(rc))
822 {
823 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
824 {
825 Log(("cmd vbva not enabled\n"));
826 RTCritSectLeave(&pCmdVbva->CltCritSect);
827 return VERR_INVALID_STATE;
828 }
829
830 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
831 {
832 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
833 }
834 else
835 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
836
837 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
838
839 RTCritSectLeave(&pCmdVbva->CltCritSect);
840
841 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
842 }
843 else
844 WARN(("RTCritSectEnter failed %d\n", rc));
845
846 return rc;
847}
848
849void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
850{
851 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
852 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
853 void *pvChanged = pThread->pvChanged;
854
855 pThread->pfnChanged = NULL;
856 pThread->pvChanged = NULL;
857
858 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
859
860 if (pfnChanged)
861 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
862}
863
864void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
865{
866 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
867 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
868 void *pvChanged = pThread->pvChanged;
869
870 pThread->pfnChanged = NULL;
871 pThread->pvChanged = NULL;
872
873 if (pfnChanged)
874 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
875}
876
877DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
878{
879 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
880}
881
882void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
883{
884 memset(pThread, 0, sizeof (*pThread));
885 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
886}
887
888int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
889{
890 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
891 switch (u32State)
892 {
893 case VBOXVDMATHREAD_STATE_TERMINATED:
894 return VINF_SUCCESS;
895 case VBOXVDMATHREAD_STATE_TERMINATING:
896 {
897 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
898 if (!RT_SUCCESS(rc))
899 {
900 WARN(("RTThreadWait failed %d\n", rc));
901 return rc;
902 }
903
904 RTSemEventDestroy(pThread->hEvent);
905
906 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
907 return VINF_SUCCESS;
908 }
909 default:
910 WARN(("invalid state"));
911 return VERR_INVALID_STATE;
912 }
913}
914
915int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
916{
917 int rc = VBoxVDMAThreadCleanup(pThread);
918 if (RT_FAILURE(rc))
919 {
920 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
921 return rc;
922 }
923
924 rc = RTSemEventCreate(&pThread->hEvent);
925 if (RT_SUCCESS(rc))
926 {
927 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
928 pThread->pfnChanged = pfnCreated;
929 pThread->pvChanged = pvCreated;
930 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
931 if (RT_SUCCESS(rc))
932 return VINF_SUCCESS;
933 else
934 WARN(("RTThreadCreate failed %d\n", rc));
935
936 RTSemEventDestroy(pThread->hEvent);
937 }
938 else
939 WARN(("RTSemEventCreate failed %d\n", rc));
940
941 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
942
943 return rc;
944}
945
946DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
947{
948 int rc = RTSemEventSignal(pThread->hEvent);
949 AssertRC(rc);
950 return rc;
951}
952
953DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
954{
955 int rc = RTSemEventWait(pThread->hEvent, cMillies);
956 AssertRC(rc);
957 return rc;
958}
959
960int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
961{
962 int rc;
963 do
964 {
965 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
966 switch (u32State)
967 {
968 case VBOXVDMATHREAD_STATE_CREATED:
969 pThread->pfnChanged = pfnTerminated;
970 pThread->pvChanged = pvTerminated;
971 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
972 if (fNotify)
973 {
974 rc = VBoxVDMAThreadEventNotify(pThread);
975 AssertRC(rc);
976 }
977 return VINF_SUCCESS;
978 case VBOXVDMATHREAD_STATE_TERMINATING:
979 case VBOXVDMATHREAD_STATE_TERMINATED:
980 {
981 WARN(("thread is marked to termination or terminated\nn"));
982 return VERR_INVALID_STATE;
983 }
984 case VBOXVDMATHREAD_STATE_CREATING:
985 {
986 /* wait till the thread creation is completed */
987 WARN(("concurrent thread create/destron\n"));
988 RTThreadYield();
989 continue;
990 }
991 default:
992 WARN(("invalid state"));
993 return VERR_INVALID_STATE;
994 }
995 } while (1);
996
997 WARN(("should never be here\n"));
998 return VERR_INTERNAL_ERROR;
999}
1000
1001static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
1002
1003typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1004typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1005
1006typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1007{
1008 uint32_t cRefs;
1009 int32_t rc;
1010 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1011 void *pvCompletion;
1012 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1013} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1014
1015# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
1016
1017static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1018{
1019 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1020 Assert(pHdr);
1021 if (pHdr)
1022 {
1023 pHdr->cRefs = 1;
1024 pHdr->rc = VERR_NOT_IMPLEMENTED;
1025 pHdr->Cmd.enmType = enmCmd;
1026 pHdr->Cmd.cbCmd = cbCmd;
1027 return &pHdr->Cmd;
1028 }
1029
1030 return NULL;
1031}
1032
1033DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1034{
1035 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1036 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1037 if (!cRefs)
1038 RTMemFree(pHdr);
1039}
1040
1041#if 0 /* unused */
1042DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1043{
1044 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1045 ASMAtomicIncU32(&pHdr->cRefs);
1046}
1047#endif /* unused */
1048
1049DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1050{
1051 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1052 return pHdr->rc;
1053}
1054
1055static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1056{
1057 RT_NOREF(pVGAState, pCmd);
1058 RTSemEventSignal((RTSEMEVENT)pvContext);
1059}
1060
1061# if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
1062static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1063{
1064 RT_NOREF(pVGAState, pvContext);
1065 vboxVDMACrCtlRelease(pCmd);
1066}
1067# endif
1068
1069static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1070{
1071 if ( pVGAState->pDrv
1072 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1073 {
1074 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1075 pHdr->pfnCompletion = pfnCompletion;
1076 pHdr->pvCompletion = pvCompletion;
1077 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1078 return VINF_SUCCESS;
1079 }
1080# ifdef DEBUG_misha
1081 Assert(0);
1082# endif
1083 return VERR_NOT_SUPPORTED;
1084}
1085
1086static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1087{
1088 RTSEMEVENT hComplEvent;
1089 int rc = RTSemEventCreate(&hComplEvent);
1090 AssertRC(rc);
1091 if (RT_SUCCESS(rc))
1092 {
1093 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1094# ifdef DEBUG_misha
1095 AssertRC(rc);
1096# endif
1097 if (RT_SUCCESS(rc))
1098 {
1099 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1100 AssertRC(rc);
1101 if (RT_SUCCESS(rc))
1102 {
1103 RTSemEventDestroy(hComplEvent);
1104 }
1105 }
1106 else
1107 {
1108 /* the command is completed */
1109 RTSemEventDestroy(hComplEvent);
1110 }
1111 }
1112 return rc;
1113}
1114
1115typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1116{
1117 int rc;
1118 RTSEMEVENT hEvent;
1119} VDMA_VBVA_CTL_CYNC_COMPLETION;
1120
1121static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1122{
1123 RT_NOREF(pCmd, cbCmd);
1124 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1125 pData->rc = rc;
1126 rc = RTSemEventSignal(pData->hEvent);
1127 if (!RT_SUCCESS(rc))
1128 WARN(("RTSemEventSignal failed %d\n", rc));
1129}
1130
1131static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1132{
1133 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1134 Data.rc = VERR_NOT_IMPLEMENTED;
1135 int rc = RTSemEventCreate(&Data.hEvent);
1136 if (!RT_SUCCESS(rc))
1137 {
1138 WARN(("RTSemEventCreate failed %d\n", rc));
1139 return rc;
1140 }
1141
1142 pCtl->CalloutList.List.pNext = NULL;
1143
1144 PVGASTATE pVGAState = pVdma->pVGAState;
1145 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1146 if (RT_SUCCESS(rc))
1147 {
1148 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1149 if (RT_SUCCESS(rc))
1150 {
1151 rc = Data.rc;
1152 if (!RT_SUCCESS(rc))
1153 {
1154 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1155 }
1156
1157 }
1158 else
1159 WARN(("RTSemEventWait failed %d\n", rc));
1160 }
1161 else
1162 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1163
1164
1165 RTSemEventDestroy(Data.hEvent);
1166
1167 return rc;
1168}
1169
1170static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1171{
1172 VBVAEXHOSTCTL HCtl;
1173 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1174 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1175 if (RT_FAILURE(rc))
1176 {
1177 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1178 return rc;
1179 }
1180
1181 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1182
1183 return VINF_SUCCESS;
1184}
1185
1186static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1187{
1188 struct VBOXVDMAHOST *pVdma = hClient;
1189 if (!pVdma->pCurRemainingHostCtl)
1190 {
1191 /* disable VBVA, all subsequent host commands will go HGCM way */
1192 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1193 }
1194 else
1195 {
1196 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1197 }
1198
1199 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1200 if (pVdma->pCurRemainingHostCtl)
1201 {
1202 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1203 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1204 }
1205
1206 *pcbCtl = 0;
1207 return NULL;
1208}
1209
1210static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1211{
1212# ifdef VBOX_STRICT
1213 struct VBOXVDMAHOST *pVdma = hClient;
1214 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1215 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1216# else
1217 RT_NOREF(hClient);
1218# endif
1219}
1220
1221static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1222{
1223 struct VBOXVDMAHOST *pVdma = hClient;
1224 VBVAEXHOSTCTL HCtl;
1225 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1226 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1227
1228 pHgcmEnableData->hRHCmd = pVdma;
1229 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1230
1231 if (RT_FAILURE(rc))
1232 {
1233 if (rc == VERR_INVALID_STATE)
1234 rc = VINF_SUCCESS;
1235 else
1236 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1237 }
1238
1239 return rc;
1240}
1241
1242static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1243{
1244 VBOXCRCMDCTL_ENABLE Enable;
1245 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1246 Enable.Data.hRHCmd = pVdma;
1247 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1248
1249 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1250 Assert(!pVdma->pCurRemainingHostCtl);
1251 if (RT_SUCCESS(rc))
1252 {
1253 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1254 return VINF_SUCCESS;
1255 }
1256
1257 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1258 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1259
1260 return rc;
1261}
1262
1263static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1264{
1265 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1266 {
1267 WARN(("vdma VBVA is already enabled\n"));
1268 return VERR_INVALID_STATE;
1269 }
1270
1271 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1272 if (!pVBVA)
1273 {
1274 WARN(("invalid offset %d\n", u32Offset));
1275 return VERR_INVALID_PARAMETER;
1276 }
1277
1278 if (!pVdma->CrSrvInfo.pfnEnable)
1279 {
1280# ifdef DEBUG_misha
1281 WARN(("pfnEnable is NULL\n"));
1282 return VERR_NOT_SUPPORTED;
1283# endif
1284 }
1285
1286 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1287 if (RT_SUCCESS(rc))
1288 {
1289 VBOXCRCMDCTL_DISABLE Disable;
1290 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1291 Disable.Data.hNotifyTerm = pVdma;
1292 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1293 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1294 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1295 if (RT_SUCCESS(rc))
1296 {
1297 PVGASTATE pVGAState = pVdma->pVGAState;
1298 VBOXCRCMD_SVRENABLE_INFO Info;
1299 Info.hCltScr = pVGAState->pDrv;
1300 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1301 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1302 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1303 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1304 if (RT_SUCCESS(rc))
1305 return VINF_SUCCESS;
1306 else
1307 WARN(("pfnEnable failed %d\n", rc));
1308
1309 vboxVDMACrHgcmHandleEnable(pVdma);
1310 }
1311 else
1312 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1313
1314 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1315 }
1316 else
1317 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1318
1319 return rc;
1320}
1321
1322static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1323{
1324 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1325 {
1326 Log(("vdma VBVA is already disabled\n"));
1327 return VINF_SUCCESS;
1328 }
1329
1330 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1331 if (RT_SUCCESS(rc))
1332 {
1333 if (fDoHgcmEnable)
1334 {
1335 PVGASTATE pVGAState = pVdma->pVGAState;
1336
1337 /* disable is a bit tricky
1338 * we need to ensure the host ctl commands do not come out of order
1339 * and do not come over HGCM channel until after it is enabled */
1340 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1341 if (RT_SUCCESS(rc))
1342 {
1343 vdmaVBVANotifyDisable(pVGAState);
1344 return VINF_SUCCESS;
1345 }
1346
1347 VBOXCRCMD_SVRENABLE_INFO Info;
1348 Info.hCltScr = pVGAState->pDrv;
1349 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1350 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1351 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1352 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1353 }
1354 }
1355 else
1356 WARN(("pfnDisable failed %d\n", rc));
1357
1358 return rc;
1359}
1360
1361static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1362{
1363 *pfContinue = true;
1364
1365 switch (pCmd->enmType)
1366 {
1367 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1368 {
1369 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1370 {
1371 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1372 return VERR_INVALID_STATE;
1373 }
1374 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1375 }
1376 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1377 {
1378 int rc = vdmaVBVADisableProcess(pVdma, true);
1379 if (RT_FAILURE(rc))
1380 {
1381 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1382 return rc;
1383 }
1384
1385 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1386 }
1387 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1388 {
1389 int rc = vdmaVBVADisableProcess(pVdma, false);
1390 if (RT_FAILURE(rc))
1391 {
1392 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1393 return rc;
1394 }
1395
1396 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1397 if (RT_FAILURE(rc))
1398 {
1399 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1400 return rc;
1401 }
1402
1403 *pfContinue = false;
1404 return VINF_SUCCESS;
1405 }
1406 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1407 {
1408 PVGASTATE pVGAState = pVdma->pVGAState;
1409 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1410 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1411 if (RT_FAILURE(rc))
1412 {
1413 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1414 return rc;
1415 }
1416 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1417
1418 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1419 }
1420 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1421 {
1422 PVGASTATE pVGAState = pVdma->pVGAState;
1423 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1424
1425 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1426 if (RT_FAILURE(rc))
1427 {
1428 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1429 return rc;
1430 }
1431
1432 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1433 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1434 if (RT_FAILURE(rc))
1435 {
1436 WARN(("pfnLoadState failed %d\n", rc));
1437 return rc;
1438 }
1439
1440 return VINF_SUCCESS;
1441 }
1442 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1443 {
1444 PVGASTATE pVGAState = pVdma->pVGAState;
1445
1446 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1447 {
1448 VBVAINFOSCREEN CurScreen;
1449 VBVAINFOVIEW CurView;
1450
1451 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1452 if (RT_FAILURE(rc))
1453 {
1454 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1455 return rc;
1456 }
1457
1458 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1459 if (RT_FAILURE(rc))
1460 {
1461 WARN(("VBVAInfoScreen failed %d\n", rc));
1462 return rc;
1463 }
1464 }
1465
1466 return VINF_SUCCESS;
1467 }
1468 default:
1469 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1470 return VERR_INVALID_PARAMETER;
1471 }
1472}
1473
1474static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1475{
1476 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1477 const bool fDisabled = RT_BOOL(pScreen->u16Flags & VBVA_SCREEN_F_DISABLED);
1478
1479 if (fDisabled)
1480 {
1481 if ( u32ViewIndex < pVGAState->cMonitors
1482 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1483 {
1484 RT_ZERO(*pScreen);
1485 pScreen->u32ViewIndex = u32ViewIndex;
1486 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1487 return VINF_SUCCESS;
1488 }
1489 }
1490 else
1491 {
1492 if ( u32ViewIndex < pVGAState->cMonitors
1493 && pScreen->u16BitsPerPixel <= 32
1494 && pScreen->u32Width <= UINT16_MAX
1495 && pScreen->u32Height <= UINT16_MAX
1496 && pScreen->u32LineSize <= UINT16_MAX * 4)
1497 {
1498 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1499 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1500 {
1501 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1502 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1503 && u64ScreenSize <= pVGAState->vram_size
1504 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1505 {
1506 return VINF_SUCCESS;
1507 }
1508 }
1509 }
1510 }
1511
1512 return VERR_INVALID_PARAMETER;
1513}
1514
1515static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1516{
1517 PVGASTATE pVGAState = pVdma->pVGAState;
1518 VBVAINFOSCREEN Screen = pEntry->Screen;
1519
1520 /* Verify and cleanup local copy of the input data. */
1521 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1522 if (RT_FAILURE(rc))
1523 {
1524 WARN(("invalid screen data\n"));
1525 return rc;
1526 }
1527
1528 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1529 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1530 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1531
1532 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1533 if (RT_FAILURE(rc))
1534 {
1535 WARN(("pfnResize failed %d\n", rc));
1536 return rc;
1537 }
1538
1539 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1540 VBVAINFOVIEW View;
1541 View.u32ViewOffset = 0;
1542 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1543 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1544
1545 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1546
1547 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1548 i >= 0;
1549 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1550 {
1551 Screen.u32ViewIndex = i;
1552
1553 VBVAINFOSCREEN CurScreen;
1554 VBVAINFOVIEW CurView;
1555
1556 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1557 AssertRC(rc);
1558
1559 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1560 continue;
1561
1562 if (!fDisable || !CurView.u32ViewSize)
1563 {
1564 View.u32ViewIndex = Screen.u32ViewIndex;
1565
1566 rc = VBVAInfoView(pVGAState, &View);
1567 if (RT_FAILURE(rc))
1568 {
1569 WARN(("VBVAInfoView failed %d\n", rc));
1570 break;
1571 }
1572 }
1573
1574 rc = VBVAInfoScreen(pVGAState, &Screen);
1575 if (RT_FAILURE(rc))
1576 {
1577 WARN(("VBVAInfoScreen failed %d\n", rc));
1578 break;
1579 }
1580 }
1581
1582 return rc;
1583}
1584
1585static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1586{
1587 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1588 switch (enmType)
1589 {
1590 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1591 {
1592 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1593 {
1594 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1595 return VERR_INVALID_STATE;
1596 }
1597 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1598 }
1599 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1600 {
1601 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1602 {
1603 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1604 return VERR_INVALID_STATE;
1605 }
1606
1607 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1608
1609 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1610 {
1611 WARN(("invalid buffer size\n"));
1612 return VERR_INVALID_PARAMETER;
1613 }
1614
1615 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1616 if (!cElements)
1617 {
1618 WARN(("invalid buffer size\n"));
1619 return VERR_INVALID_PARAMETER;
1620 }
1621
1622 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1623
1624 int rc = VINF_SUCCESS;
1625
1626 for (uint32_t i = 0; i < cElements; ++i)
1627 {
1628 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1629 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1630 if (RT_FAILURE(rc))
1631 {
1632 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1633 break;
1634 }
1635 }
1636 return rc;
1637 }
1638 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1639 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1640 {
1641 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1642 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1643 uint32_t u32Offset = pEnable->u32Offset;
1644 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1645 if (!RT_SUCCESS(rc))
1646 {
1647 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1648 return rc;
1649 }
1650
1651 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1652 {
1653 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1654 if (!RT_SUCCESS(rc))
1655 {
1656 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1657 return rc;
1658 }
1659 }
1660
1661 return VINF_SUCCESS;
1662 }
1663 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1664 {
1665 int rc = vdmaVBVADisableProcess(pVdma, true);
1666 if (RT_FAILURE(rc))
1667 {
1668 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1669 return rc;
1670 }
1671
1672 /* do vgaUpdateDisplayAll right away */
1673 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1674 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1675
1676 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1677 }
1678 default:
1679 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1680 return VERR_INVALID_PARAMETER;
1681 }
1682}
1683
1684/**
1685 * @param fIn - whether this is a page in or out op.
1686 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1687 */
1688static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1689{
1690 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1691 PGMPAGEMAPLOCK Lock;
1692 int rc;
1693
1694 if (fIn)
1695 {
1696 const void * pvPage;
1697 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1698 if (!RT_SUCCESS(rc))
1699 {
1700 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1701 return rc;
1702 }
1703
1704 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1705
1706 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1707 }
1708 else
1709 {
1710 void * pvPage;
1711 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1712 if (!RT_SUCCESS(rc))
1713 {
1714 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1715 return rc;
1716 }
1717
1718 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1719
1720 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1721 }
1722
1723 return VINF_SUCCESS;
1724}
1725
1726static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1727{
1728 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1729 {
1730 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1731 if (!RT_SUCCESS(rc))
1732 {
1733 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1734 return rc;
1735 }
1736 }
1737
1738 return VINF_SUCCESS;
1739}
1740
1741static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1742 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1743 uint8_t **ppu8Vram, bool *pfIn)
1744{
1745 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1746 {
1747 WARN(("cmd too small"));
1748 return -1;
1749 }
1750
1751 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1752 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1753 {
1754 WARN(("invalid cmd size"));
1755 return -1;
1756 }
1757 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1758
1759 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1760 if (offVRAM & PAGE_OFFSET_MASK)
1761 {
1762 WARN(("offVRAM address is not on page boundary\n"));
1763 return -1;
1764 }
1765 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1766
1767 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1768 if (offVRAM >= pVGAState->vram_size)
1769 {
1770 WARN(("invalid vram offset"));
1771 return -1;
1772 }
1773
1774 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1775 {
1776 WARN(("invalid cPages %d", cPages));
1777 return -1;
1778 }
1779
1780 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1781 {
1782 WARN(("invalid cPages %d, exceeding vram size", cPages));
1783 return -1;
1784 }
1785
1786 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1787 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1788
1789 *ppPages = pPages;
1790 *pcPages = cPages;
1791 *ppu8Vram = pu8Vram;
1792 *pfIn = fIn;
1793 return 0;
1794}
1795
1796static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1797{
1798 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1799 if (offVRAM & PAGE_OFFSET_MASK)
1800 {
1801 WARN(("offVRAM address is not on page boundary\n"));
1802 return -1;
1803 }
1804
1805 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1806 if (offVRAM >= pVGAState->vram_size)
1807 {
1808 WARN(("invalid vram offset"));
1809 return -1;
1810 }
1811
1812 uint32_t cbFill = pFill->u32CbFill;
1813
1814 if (offVRAM + cbFill >= pVGAState->vram_size)
1815 {
1816 WARN(("invalid cPages"));
1817 return -1;
1818 }
1819
1820 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1821 uint32_t u32Color = pFill->u32Pattern;
1822
1823 Assert(!(cbFill % 4));
1824 for (uint32_t i = 0; i < cbFill / 4; ++i)
1825 {
1826 pu32Vram[i] = u32Color;
1827 }
1828
1829 return 0;
1830}
1831
1832static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1833{
1834 switch (pCmd->u8OpCode)
1835 {
1836 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1837 return 0;
1838 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1839 {
1840 PVGASTATE pVGAState = pVdma->pVGAState;
1841 const VBOXCMDVBVAPAGEIDX *pPages;
1842 uint32_t cPages;
1843 uint8_t *pu8Vram;
1844 bool fIn;
1845 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1846 &pPages, &cPages,
1847 &pu8Vram, &fIn);
1848 if (i8Result < 0)
1849 {
1850 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1851 return i8Result;
1852 }
1853
1854 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1855 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1856 if (!RT_SUCCESS(rc))
1857 {
1858 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1859 return -1;
1860 }
1861
1862 return 0;
1863 }
1864 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1865 {
1866 PVGASTATE pVGAState = pVdma->pVGAState;
1867 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1868 {
1869 WARN(("cmd too small"));
1870 return -1;
1871 }
1872
1873 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1874 }
1875 default:
1876 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1877 }
1878}
1879
1880# if 0
1881typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1882{
1883 VBOXCMDVBVA_HDR Hdr;
1884 /* for now can only contain offVRAM.
1885 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1886 VBOXCMDVBVA_ALLOCINFO Alloc;
1887 uint32_t u32Reserved;
1888 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1889} VBOXCMDVBVA_PAGING_TRANSFER;
1890# endif
1891
1892AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1893AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1894AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1895AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1896
1897# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1898
1899static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1900{
1901 switch (pCmd->u8OpCode)
1902 {
1903 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1904 {
1905 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1906 {
1907 WARN(("invalid command size"));
1908 return -1;
1909 }
1910 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1911 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1912 uint32_t cbRealCmd = pCmd->u8Flags;
1913 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1914 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1915 {
1916 WARN(("invalid sysmem cmd size"));
1917 return -1;
1918 }
1919
1920 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1921
1922 PGMPAGEMAPLOCK Lock;
1923 PVGASTATE pVGAState = pVdma->pVGAState;
1924 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1925 const void * pvCmd;
1926 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1927 if (!RT_SUCCESS(rc))
1928 {
1929 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1930 return -1;
1931 }
1932
1933 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1934
1935 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1936
1937 if (cbRealCmd <= cbCmdPart)
1938 {
1939 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1940 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1941 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1942 return i8Result;
1943 }
1944
1945 VBOXCMDVBVA_HDR Hdr;
1946 const void *pvCurCmdTail;
1947 uint32_t cbCurCmdTail;
1948 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1949 {
1950 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1951 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1952 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1953 }
1954 else
1955 {
1956 memcpy(&Hdr, pvCmd, cbCmdPart);
1957 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1958 phCmd += cbCmdPart;
1959 Assert(!(phCmd & PAGE_OFFSET_MASK));
1960 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1961 if (!RT_SUCCESS(rc))
1962 {
1963 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1964 return -1;
1965 }
1966
1967 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1968 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1969 pRealCmdHdr = &Hdr;
1970 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1971 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1972 }
1973
1974 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1975 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1976
1977 int8_t i8Result = 0;
1978
1979 switch (pRealCmdHdr->u8OpCode)
1980 {
1981 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1982 {
1983 const uint32_t *pPages;
1984 uint32_t cPages;
1985 uint8_t *pu8Vram;
1986 bool fIn;
1987 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1988 &pPages, &cPages,
1989 &pu8Vram, &fIn);
1990 if (i8Result < 0)
1991 {
1992 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1993 /* we need to break, not return, to ensure currently locked page is released */
1994 break;
1995 }
1996
1997 if (cbCurCmdTail & 3)
1998 {
1999 WARN(("command is not alligned properly %d", cbCurCmdTail));
2000 i8Result = -1;
2001 /* we need to break, not return, to ensure currently locked page is released */
2002 break;
2003 }
2004
2005 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
2006 Assert(cCurPages < cPages);
2007
2008 do
2009 {
2010 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
2011 if (!RT_SUCCESS(rc))
2012 {
2013 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
2014 i8Result = -1;
2015 /* we need to break, not return, to ensure currently locked page is released */
2016 break;
2017 }
2018
2019 Assert(cPages >= cCurPages);
2020 cPages -= cCurPages;
2021
2022 if (!cPages)
2023 break;
2024
2025 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2026
2027 Assert(!(phCmd & PAGE_OFFSET_MASK));
2028
2029 phCmd += PAGE_SIZE;
2030 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
2031
2032 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2033 if (!RT_SUCCESS(rc))
2034 {
2035 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2036 /* the page is not locked, return */
2037 return -1;
2038 }
2039
2040 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2041 if (cCurPages > cPages)
2042 cCurPages = cPages;
2043 } while (1);
2044 break;
2045 }
2046 default:
2047 WARN(("command can not be splitted"));
2048 i8Result = -1;
2049 break;
2050 }
2051
2052 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2053 return i8Result;
2054 }
2055 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2056 {
2057 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2058 ++pCmd;
2059 cbCmd -= sizeof (*pCmd);
2060 uint32_t cbCurCmd = 0;
2061 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2062 {
2063 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2064 {
2065 WARN(("invalid command size"));
2066 return -1;
2067 }
2068
2069 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2070 if (cbCmd < cbCurCmd)
2071 {
2072 WARN(("invalid command size"));
2073 return -1;
2074 }
2075
2076 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2077 if (i8Result < 0)
2078 {
2079 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2080 return i8Result;
2081 }
2082 }
2083 return 0;
2084 }
2085 default:
2086 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2087 }
2088}
2089
2090static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2091{
2092 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2093 return;
2094
2095 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2096 {
2097 WARN(("invalid command size"));
2098 return;
2099 }
2100
2101 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2102
2103 /* check if the command is cancelled */
2104 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2105 {
2106 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2107 return;
2108 }
2109
2110 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2111}
2112
2113static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2114{
2115 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2116 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2117 int rc = VERR_NO_MEMORY;
2118 if (pCmd)
2119 {
2120 PVGASTATE pVGAState = pVdma->pVGAState;
2121 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2122 pCmd->cbVRam = pVGAState->vram_size;
2123 pCmd->pLed = &pVGAState->Led3D;
2124 pCmd->CrClientInfo.hClient = pVdma;
2125 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2126 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2127 if (RT_SUCCESS(rc))
2128 {
2129 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2130 if (RT_SUCCESS(rc))
2131 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2132 else if (rc != VERR_NOT_SUPPORTED)
2133 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2134 }
2135 else
2136 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2137
2138 vboxVDMACrCtlRelease(&pCmd->Hdr);
2139 }
2140
2141 if (!RT_SUCCESS(rc))
2142 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2143
2144 return rc;
2145}
2146
2147static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2148
2149/* check if this is external cmd to be passed to chromium backend */
2150static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2151{
2152 PVBOXVDMACMD pDmaCmd = NULL;
2153 uint32_t cbDmaCmd = 0;
2154 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2155 int rc = VINF_NOT_SUPPORTED;
2156
2157 cbDmaCmd = pCmdDr->cbBuf;
2158
2159 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2160 {
2161 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2162 {
2163 AssertMsgFailed(("invalid buffer data!"));
2164 return VERR_INVALID_PARAMETER;
2165 }
2166
2167 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2168 {
2169 AssertMsgFailed(("invalid command buffer data!"));
2170 return VERR_INVALID_PARAMETER;
2171 }
2172
2173 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2174 }
2175 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2176 {
2177 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2178 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2179 {
2180 AssertMsgFailed(("invalid command buffer data from offset!"));
2181 return VERR_INVALID_PARAMETER;
2182 }
2183 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2184 }
2185
2186 if (pDmaCmd)
2187 {
2188 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2189 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2190
2191 switch (pDmaCmd->enmType)
2192 {
2193 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2194 {
2195 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2196 if (cbBody < sizeof (*pCrCmd))
2197 {
2198 AssertMsgFailed(("invalid chromium command buffer size!"));
2199 return VERR_INVALID_PARAMETER;
2200 }
2201 PVGASTATE pVGAState = pVdma->pVGAState;
2202 rc = VINF_SUCCESS;
2203 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2204 {
2205 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2206 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2207 break;
2208 }
2209 else
2210 {
2211 Assert(0);
2212 }
2213
2214 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2215 AssertRC(tmpRc);
2216 break;
2217 }
2218 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2219 {
2220 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2221 if (cbBody < sizeof (*pTransfer))
2222 {
2223 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2224 return VERR_INVALID_PARAMETER;
2225 }
2226
2227 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2228 AssertRC(rc);
2229 if (RT_SUCCESS(rc))
2230 {
2231 pCmdDr->rc = VINF_SUCCESS;
2232 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2233 AssertRC(rc);
2234 rc = VINF_SUCCESS;
2235 }
2236 break;
2237 }
2238 default:
2239 break;
2240 }
2241 }
2242 return rc;
2243}
2244
2245int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2246{
2247 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2248 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2249 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2250 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2251 AssertRC(rc);
2252 pDr->rc = rc;
2253
2254 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2255 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2256 AssertRC(rc);
2257 return rc;
2258}
2259
2260int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2261{
2262 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2263 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2264 pCmdPrivate->rc = rc;
2265 if (pCmdPrivate->pfnCompletion)
2266 {
2267 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2268 }
2269 return VINF_SUCCESS;
2270}
2271
2272static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2273 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2274 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2275{
2276 RT_NOREF(pVdma);
2277 /* we do not support color conversion */
2278 Assert(pDstDesc->format == pSrcDesc->format);
2279 /* we do not support stretching */
2280 Assert(pDstRectl->height == pSrcRectl->height);
2281 Assert(pDstRectl->width == pSrcRectl->width);
2282 if (pDstDesc->format != pSrcDesc->format)
2283 return VERR_INVALID_FUNCTION;
2284 if (pDstDesc->width == pDstRectl->width
2285 && pSrcDesc->width == pSrcRectl->width
2286 && pSrcDesc->width == pDstDesc->width)
2287 {
2288 Assert(!pDstRectl->left);
2289 Assert(!pSrcRectl->left);
2290 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2291 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2292 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2293 }
2294 else
2295 {
2296 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2297 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2298 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2299 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2300 Assert(cbDstLine <= pDstDesc->pitch);
2301 uint32_t cbDstSkip = pDstDesc->pitch;
2302 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2303
2304 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2305# ifdef VBOX_STRICT
2306 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2307 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2308# endif
2309 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2310 Assert(cbSrcLine <= pSrcDesc->pitch);
2311 uint32_t cbSrcSkip = pSrcDesc->pitch;
2312 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2313
2314 Assert(cbDstLine == cbSrcLine);
2315
2316 for (uint32_t i = 0; ; ++i)
2317 {
2318 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2319 if (i == pDstRectl->height)
2320 break;
2321 pvDstStart += cbDstSkip;
2322 pvSrcStart += cbSrcSkip;
2323 }
2324 }
2325 return VINF_SUCCESS;
2326}
2327
2328static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2329{
2330 if (!pRectl1->width)
2331 *pRectl1 = *pRectl2;
2332 else
2333 {
2334 int16_t x21 = pRectl1->left + pRectl1->width;
2335 int16_t x22 = pRectl2->left + pRectl2->width;
2336 if (pRectl1->left > pRectl2->left)
2337 {
2338 pRectl1->left = pRectl2->left;
2339 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2340 }
2341 else if (x21 < x22)
2342 pRectl1->width = x22 - pRectl1->left;
2343
2344 x21 = pRectl1->top + pRectl1->height;
2345 x22 = pRectl2->top + pRectl2->height;
2346 if (pRectl1->top > pRectl2->top)
2347 {
2348 pRectl1->top = pRectl2->top;
2349 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2350 }
2351 else if (x21 < x22)
2352 pRectl1->height = x22 - pRectl1->top;
2353 }
2354}
2355
2356/*
2357 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2358 */
2359static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2360{
2361 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2362 Assert(cbBlt <= cbBuffer);
2363 if (cbBuffer < cbBlt)
2364 return VERR_INVALID_FUNCTION;
2365
2366 /* we do not support stretching for now */
2367 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2368 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2369 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2370 return VERR_INVALID_FUNCTION;
2371 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2372 return VERR_INVALID_FUNCTION;
2373 Assert(pBlt->cDstSubRects);
2374
2375 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2376 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2377
2378 if (pBlt->cDstSubRects)
2379 {
2380 VBOXVDMA_RECTL dstRectl, srcRectl;
2381 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2382 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2383 {
2384 pDstRectl = &pBlt->aDstSubRects[i];
2385 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2386 {
2387 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2388 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2389 dstRectl.width = pDstRectl->width;
2390 dstRectl.height = pDstRectl->height;
2391 pDstRectl = &dstRectl;
2392 }
2393
2394 pSrcRectl = &pBlt->aDstSubRects[i];
2395 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2396 {
2397 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2398 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2399 srcRectl.width = pSrcRectl->width;
2400 srcRectl.height = pSrcRectl->height;
2401 pSrcRectl = &srcRectl;
2402 }
2403
2404 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2405 &pBlt->dstDesc, &pBlt->srcDesc,
2406 pDstRectl,
2407 pSrcRectl);
2408 AssertRC(rc);
2409 if (!RT_SUCCESS(rc))
2410 return rc;
2411
2412 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2413 }
2414 }
2415 else
2416 {
2417 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2418 &pBlt->dstDesc, &pBlt->srcDesc,
2419 &pBlt->dstRectl,
2420 &pBlt->srcRectl);
2421 AssertRC(rc);
2422 if (!RT_SUCCESS(rc))
2423 return rc;
2424
2425 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2426 }
2427
2428 return cbBlt;
2429}
2430
2431static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2432{
2433 if (cbBuffer < sizeof (*pTransfer))
2434 return VERR_INVALID_PARAMETER;
2435
2436 PVGASTATE pVGAState = pVdma->pVGAState;
2437 uint8_t * pvRam = pVGAState->vram_ptrR3;
2438 PGMPAGEMAPLOCK SrcLock;
2439 PGMPAGEMAPLOCK DstLock;
2440 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2441 const void * pvSrc;
2442 void * pvDst;
2443 int rc = VINF_SUCCESS;
2444 uint32_t cbTransfer = pTransfer->cbTransferSize;
2445 uint32_t cbTransfered = 0;
2446 bool bSrcLocked = false;
2447 bool bDstLocked = false;
2448 do
2449 {
2450 uint32_t cbSubTransfer = cbTransfer;
2451 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2452 {
2453 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2454 }
2455 else
2456 {
2457 RTGCPHYS phPage = pTransfer->Src.phBuf;
2458 phPage += cbTransfered;
2459 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2460 AssertRC(rc);
2461 if (RT_SUCCESS(rc))
2462 {
2463 bSrcLocked = true;
2464 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2465 }
2466 else
2467 {
2468 break;
2469 }
2470 }
2471
2472 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2473 {
2474 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2475 }
2476 else
2477 {
2478 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2479 phPage += cbTransfered;
2480 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2481 AssertRC(rc);
2482 if (RT_SUCCESS(rc))
2483 {
2484 bDstLocked = true;
2485 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2486 }
2487 else
2488 {
2489 break;
2490 }
2491 }
2492
2493 if (RT_SUCCESS(rc))
2494 {
2495 memcpy(pvDst, pvSrc, cbSubTransfer);
2496 cbTransfer -= cbSubTransfer;
2497 cbTransfered += cbSubTransfer;
2498 }
2499 else
2500 {
2501 cbTransfer = 0; /* to break */
2502 }
2503
2504 if (bSrcLocked)
2505 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2506 if (bDstLocked)
2507 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2508 } while (cbTransfer);
2509
2510 if (RT_SUCCESS(rc))
2511 return sizeof (*pTransfer);
2512 return rc;
2513}
2514
2515static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2516{
2517 do
2518 {
2519 Assert(pvBuffer);
2520 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2521
2522 if (!pvBuffer)
2523 return VERR_INVALID_PARAMETER;
2524 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2525 return VERR_INVALID_PARAMETER;
2526
2527 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2528 switch (pCmd->enmType)
2529 {
2530 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2531 {
2532# ifdef VBOXWDDM_TEST_UHGSMI
2533 static int count = 0;
2534 static uint64_t start, end;
2535 if (count==0)
2536 {
2537 start = RTTimeNanoTS();
2538 }
2539 ++count;
2540 if (count==100000)
2541 {
2542 end = RTTimeNanoTS();
2543 float ems = (end-start)/1000000.f;
2544 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2545 }
2546# endif
2547 /* todo: post the buffer to chromium */
2548 return VINF_SUCCESS;
2549 }
2550 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2551 {
2552 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2553 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2554 Assert(cbBlt >= 0);
2555 Assert((uint32_t)cbBlt <= cbBuffer);
2556 if (cbBlt >= 0)
2557 {
2558 if ((uint32_t)cbBlt == cbBuffer)
2559 return VINF_SUCCESS;
2560 else
2561 {
2562 cbBuffer -= (uint32_t)cbBlt;
2563 pvBuffer -= cbBlt;
2564 }
2565 }
2566 else
2567 return cbBlt; /* error */
2568 break;
2569 }
2570 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2571 {
2572 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2573 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2574 Assert(cbTransfer >= 0);
2575 Assert((uint32_t)cbTransfer <= cbBuffer);
2576 if (cbTransfer >= 0)
2577 {
2578 if ((uint32_t)cbTransfer == cbBuffer)
2579 return VINF_SUCCESS;
2580 else
2581 {
2582 cbBuffer -= (uint32_t)cbTransfer;
2583 pvBuffer -= cbTransfer;
2584 }
2585 }
2586 else
2587 return cbTransfer; /* error */
2588 break;
2589 }
2590 case VBOXVDMACMD_TYPE_DMA_NOP:
2591 return VINF_SUCCESS;
2592 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2593 return VINF_SUCCESS;
2594 default:
2595 AssertBreakpoint();
2596 return VERR_INVALID_FUNCTION;
2597 }
2598 } while (1);
2599
2600 /* we should not be here */
2601 AssertBreakpoint();
2602 return VERR_INVALID_STATE;
2603}
2604
2605static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2606{
2607 RT_NOREF(hThreadSelf);
2608 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2609 PVGASTATE pVGAState = pVdma->pVGAState;
2610 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2611 uint8_t *pCmd;
2612 uint32_t cbCmd;
2613 int rc;
2614
2615 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2616
2617 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2618 {
2619 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2620 switch (enmType)
2621 {
2622 case VBVAEXHOST_DATA_TYPE_CMD:
2623 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2624 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2625 VBVARaiseIrq(pVGAState, 0);
2626 break;
2627 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2628 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2629 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2630 break;
2631 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2632 {
2633 bool fContinue = true;
2634 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2635 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2636 if (fContinue)
2637 break;
2638 }
2639 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2640 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2641 AssertRC(rc);
2642 break;
2643 default:
2644 WARN(("unexpected type %d\n", enmType));
2645 break;
2646 }
2647 }
2648
2649 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2650
2651 return VINF_SUCCESS;
2652}
2653
2654static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2655{
2656 RT_NOREF(cbCmd);
2657 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2658 const uint8_t * pvBuf;
2659 PGMPAGEMAPLOCK Lock;
2660 int rc;
2661 bool bReleaseLocked = false;
2662
2663 do
2664 {
2665 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2666
2667 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2668 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2669 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2670 {
2671 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2672 pvBuf = pvRam + pCmd->Location.offVramBuf;
2673 }
2674 else
2675 {
2676 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2677 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2678 Assert(offset + pCmd->cbBuf <= 0x1000);
2679 if (offset + pCmd->cbBuf > 0x1000)
2680 {
2681 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2682 rc = VERR_INVALID_PARAMETER;
2683 break;
2684 }
2685
2686 const void * pvPageBuf;
2687 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2688 AssertRC(rc);
2689 if (!RT_SUCCESS(rc))
2690 {
2691 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2692 break;
2693 }
2694
2695 pvBuf = (const uint8_t *)pvPageBuf;
2696 pvBuf += offset;
2697
2698 bReleaseLocked = true;
2699 }
2700
2701 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2702 AssertRC(rc);
2703
2704 if (bReleaseLocked)
2705 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2706 } while (0);
2707
2708 pCmd->rc = rc;
2709
2710 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2711 AssertRC(rc);
2712}
2713
2714# if 0 /** @todo vboxVDMAControlProcess is unused */
2715static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2716{
2717 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2718 pCmd->i32Result = VINF_SUCCESS;
2719 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2720 AssertRC(rc);
2721}
2722# endif
2723
2724#endif /* VBOX_WITH_CRHGSMI */
2725#ifdef VBOX_VDMA_WITH_WATCHDOG
2726
2727static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2728{
2729 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2730 PVGASTATE pVGAState = pVdma->pVGAState;
2731 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2732}
2733
2734static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2735{
2736 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2737 if (cMillis)
2738 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2739 else
2740 TMTimerStop(pVdma->WatchDogTimer);
2741 return VINF_SUCCESS;
2742}
2743
2744#endif /* VBOX_VDMA_WITH_WATCHDOG */
2745
2746int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2747{
2748 RT_NOREF(cPipeElements);
2749 int rc;
2750 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2751 Assert(pVdma);
2752 if (pVdma)
2753 {
2754 pVdma->pHgsmi = pVGAState->pHGSMI;
2755 pVdma->pVGAState = pVGAState;
2756
2757#ifdef VBOX_VDMA_WITH_WATCHDOG
2758 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2759 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2760 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2761 AssertRC(rc);
2762#endif
2763
2764#ifdef VBOX_WITH_CRHGSMI
2765 VBoxVDMAThreadInit(&pVdma->Thread);
2766
2767 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2768 if (RT_SUCCESS(rc))
2769 {
2770 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2771 if (RT_SUCCESS(rc))
2772 {
2773 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2774 if (RT_SUCCESS(rc))
2775 {
2776 pVGAState->pVdma = pVdma;
2777 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2778 return VINF_SUCCESS;
2779 }
2780 WARN(("RTCritSectInit failed %d\n", rc));
2781
2782 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2783 }
2784 else
2785 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2786
2787 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2788 }
2789 else
2790 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2791
2792
2793 RTMemFree(pVdma);
2794#else
2795 pVGAState->pVdma = pVdma;
2796 return VINF_SUCCESS;
2797#endif
2798 }
2799 else
2800 rc = VERR_OUT_OF_RESOURCES;
2801
2802 return rc;
2803}
2804
2805int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2806{
2807#ifdef VBOX_WITH_CRHGSMI
2808 vdmaVBVACtlDisableSync(pVdma);
2809#else
2810 RT_NOREF(pVdma);
2811#endif
2812 return VINF_SUCCESS;
2813}
2814
2815int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2816{
2817 if (!pVdma)
2818 return VINF_SUCCESS;
2819#ifdef VBOX_WITH_CRHGSMI
2820 vdmaVBVACtlDisableSync(pVdma);
2821 VBoxVDMAThreadCleanup(&pVdma->Thread);
2822 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2823 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2824 RTCritSectDelete(&pVdma->CalloutCritSect);
2825#endif
2826 RTMemFree(pVdma);
2827 return VINF_SUCCESS;
2828}
2829
2830void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2831{
2832 RT_NOREF(cbCmd);
2833 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2834
2835 switch (pCmd->enmCtl)
2836 {
2837 case VBOXVDMA_CTL_TYPE_ENABLE:
2838 pCmd->i32Result = VINF_SUCCESS;
2839 break;
2840 case VBOXVDMA_CTL_TYPE_DISABLE:
2841 pCmd->i32Result = VINF_SUCCESS;
2842 break;
2843 case VBOXVDMA_CTL_TYPE_FLUSH:
2844 pCmd->i32Result = VINF_SUCCESS;
2845 break;
2846#ifdef VBOX_VDMA_WITH_WATCHDOG
2847 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2848 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2849 break;
2850#endif
2851 default:
2852 WARN(("cmd not supported"));
2853 pCmd->i32Result = VERR_NOT_SUPPORTED;
2854 }
2855
2856 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2857 AssertRC(rc);
2858}
2859
2860void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2861{
2862 int rc = VERR_NOT_IMPLEMENTED;
2863
2864#ifdef VBOX_WITH_CRHGSMI
2865 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2866 * this is why we process them specially */
2867 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2868 if (rc == VINF_SUCCESS)
2869 return;
2870
2871 if (RT_FAILURE(rc))
2872 {
2873 pCmd->rc = rc;
2874 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2875 AssertRC(rc);
2876 return;
2877 }
2878
2879 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2880#else
2881 RT_NOREF(cbCmd);
2882 pCmd->rc = rc;
2883 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2884 AssertRC(rc);
2885#endif
2886}
2887
2888#ifdef VBOX_WITH_CRHGSMI
2889
2890static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2891
2892static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2893{
2894 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2895 if (RT_SUCCESS(rc))
2896 {
2897 if (rc == VINF_SUCCESS)
2898 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2899 else
2900 Assert(rc == VINF_ALREADY_INITIALIZED);
2901 }
2902 else
2903 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2904
2905 return rc;
2906}
2907
2908static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2909{
2910 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2911 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2912 AssertRC(rc);
2913 pGCtl->i32Result = rc;
2914
2915 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2916 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2917 AssertRC(rc);
2918
2919 VBoxVBVAExHCtlFree(pVbva, pCtl);
2920}
2921
2922static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2923{
2924 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2925 if (!pHCtl)
2926 {
2927 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2928 return VERR_NO_MEMORY;
2929 }
2930
2931 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2932 pHCtl->u.cmd.cbCmd = cbCmd;
2933 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2934 if (RT_FAILURE(rc))
2935 {
2936 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2937 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2938 return rc;;
2939 }
2940 return VINF_SUCCESS;
2941}
2942
2943static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2944{
2945 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2946 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2947 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2948 if (RT_SUCCESS(rc))
2949 return VINF_SUCCESS;
2950
2951 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2952 pCtl->i32Result = rc;
2953 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2954 AssertRC(rc);
2955 return VINF_SUCCESS;
2956}
2957
2958static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2959{
2960 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2961 if (pVboxCtl->u.pfnInternal)
2962 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2963 VBoxVBVAExHCtlFree(pVbva, pCtl);
2964}
2965
2966static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2967 PFNCRCTLCOMPLETION pfnCompletion,
2968 void *pvCompletion)
2969{
2970 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2971 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2972 if (RT_FAILURE(rc))
2973 {
2974 if (rc == VERR_INVALID_STATE)
2975 {
2976 pCmd->u.pfnInternal = NULL;
2977 PVGASTATE pVGAState = pVdma->pVGAState;
2978 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2979 if (!RT_SUCCESS(rc))
2980 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2981
2982 return rc;
2983 }
2984 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2985 return rc;
2986 }
2987
2988 return VINF_SUCCESS;
2989}
2990
2991static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2992{
2993 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2994 {
2995 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2996 if (!RT_SUCCESS(rc))
2997 {
2998 WARN(("pfnVBVAEnable failed %d\n", rc));
2999 for (uint32_t j = 0; j < i; j++)
3000 {
3001 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3002 }
3003
3004 return rc;
3005 }
3006 }
3007 return VINF_SUCCESS;
3008}
3009
3010static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3011{
3012 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3013 {
3014 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
3015 }
3016 return VINF_SUCCESS;
3017}
3018
3019static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3020 void *pvThreadContext, void *pvContext)
3021{
3022 RT_NOREF(pThread);
3023 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3024 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3025
3026 if (RT_SUCCESS(rc))
3027 {
3028 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3029 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3030 if (rc == VINF_SUCCESS)
3031 {
3032 /* we need to inform Main about VBVA enable/disable
3033 * main expects notifications to be done from the main thread
3034 * submit it there */
3035 PVGASTATE pVGAState = pVdma->pVGAState;
3036
3037 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3038 vdmaVBVANotifyEnable(pVGAState);
3039 else
3040 vdmaVBVANotifyDisable(pVGAState);
3041 }
3042 else if (RT_FAILURE(rc))
3043 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3044 }
3045 else
3046 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3047
3048 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3049}
3050
3051static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3052{
3053 int rc;
3054 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3055 if (pHCtl)
3056 {
3057 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3058 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3059 pHCtl->pfnComplete = pfnComplete;
3060 pHCtl->pvComplete = pvComplete;
3061
3062 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3063 if (RT_SUCCESS(rc))
3064 return VINF_SUCCESS;
3065 else
3066 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3067
3068 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3069 }
3070 else
3071 {
3072 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3073 rc = VERR_NO_MEMORY;
3074 }
3075
3076 return rc;
3077}
3078
3079static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3080{
3081 VBVAENABLE Enable = {0};
3082 Enable.u32Flags = VBVA_F_ENABLE;
3083 Enable.u32Offset = offVram;
3084
3085 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3086 Data.rc = VERR_NOT_IMPLEMENTED;
3087 int rc = RTSemEventCreate(&Data.hEvent);
3088 if (!RT_SUCCESS(rc))
3089 {
3090 WARN(("RTSemEventCreate failed %d\n", rc));
3091 return rc;
3092 }
3093
3094 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3095 if (RT_SUCCESS(rc))
3096 {
3097 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3098 if (RT_SUCCESS(rc))
3099 {
3100 rc = Data.rc;
3101 if (!RT_SUCCESS(rc))
3102 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3103 }
3104 else
3105 WARN(("RTSemEventWait failed %d\n", rc));
3106 }
3107 else
3108 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3109
3110 RTSemEventDestroy(Data.hEvent);
3111
3112 return rc;
3113}
3114
3115static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3116{
3117 int rc;
3118 VBVAEXHOSTCTL* pHCtl;
3119 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3120 {
3121 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3122 return VINF_SUCCESS;
3123 }
3124
3125 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3126 if (!pHCtl)
3127 {
3128 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3129 return VERR_NO_MEMORY;
3130 }
3131
3132 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3133 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3134 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3135 if (RT_SUCCESS(rc))
3136 return VINF_SUCCESS;
3137
3138 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3139 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3140 return rc;
3141}
3142
3143static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3144{
3145 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3146 if (fEnable)
3147 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3148 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3149}
3150
3151static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3152{
3153 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3154 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3155 if (RT_SUCCESS(rc))
3156 return VINF_SUCCESS;
3157
3158 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3159 pEnable->Hdr.i32Result = rc;
3160 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3161 AssertRC(rc);
3162 return VINF_SUCCESS;
3163}
3164
3165static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3166 int rc, void *pvContext)
3167{
3168 RT_NOREF(pVbva, pCtl);
3169 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3170 pData->rc = rc;
3171 rc = RTSemEventSignal(pData->hEvent);
3172 if (!RT_SUCCESS(rc))
3173 WARN(("RTSemEventSignal failed %d\n", rc));
3174}
3175
3176static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3177{
3178 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3179 Data.rc = VERR_NOT_IMPLEMENTED;
3180 int rc = RTSemEventCreate(&Data.hEvent);
3181 if (!RT_SUCCESS(rc))
3182 {
3183 WARN(("RTSemEventCreate failed %d\n", rc));
3184 return rc;
3185 }
3186
3187 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3188 if (RT_SUCCESS(rc))
3189 {
3190 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3191 if (RT_SUCCESS(rc))
3192 {
3193 rc = Data.rc;
3194 if (!RT_SUCCESS(rc))
3195 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3196 }
3197 else
3198 WARN(("RTSemEventWait failed %d\n", rc));
3199 }
3200 else
3201 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3202
3203 RTSemEventDestroy(Data.hEvent);
3204
3205 return rc;
3206}
3207
3208static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3209{
3210 VBVAEXHOSTCTL Ctl;
3211 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3212 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3213}
3214
3215static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3216{
3217 VBVAEXHOSTCTL Ctl;
3218 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3219 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3220}
3221
3222static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3223{
3224 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3225 switch (rc)
3226 {
3227 case VINF_SUCCESS:
3228 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3229 case VINF_ALREADY_INITIALIZED:
3230 case VINF_EOF:
3231 case VERR_INVALID_STATE:
3232 return VINF_SUCCESS;
3233 default:
3234 Assert(!RT_FAILURE(rc));
3235 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3236 }
3237}
3238
3239
3240int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3241 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3242 PFNCRCTLCOMPLETION pfnCompletion,
3243 void *pvCompletion)
3244{
3245 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3246 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3247 if (pVdma == NULL)
3248 return VERR_INVALID_STATE;
3249 pCmd->CalloutList.List.pNext = NULL;
3250 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3251}
3252
3253typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3254{
3255 struct VBOXVDMAHOST *pVdma;
3256 uint32_t fProcessing;
3257 int rc;
3258} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3259
3260static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3261{
3262 RT_NOREF(pCmd, cbCmd);
3263 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3264
3265 pData->rc = rc;
3266
3267 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3268
3269 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3270
3271 pData->fProcessing = 0;
3272
3273 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3274}
3275
3276static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3277{
3278 pEntry->pfnCb = pfnCb;
3279 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3280 if (RT_SUCCESS(rc))
3281 {
3282 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3283 RTCritSectLeave(&pVdma->CalloutCritSect);
3284
3285 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3286 }
3287 else
3288 WARN(("RTCritSectEnter failed %d\n", rc));
3289
3290 return rc;
3291}
3292
3293
3294static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3295{
3296 int rc = VINF_SUCCESS;
3297 for (;;)
3298 {
3299 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3300 if (RT_SUCCESS(rc))
3301 {
3302 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3303 if (pEntry)
3304 RTListNodeRemove(&pEntry->Node);
3305 RTCritSectLeave(&pVdma->CalloutCritSect);
3306
3307 if (!pEntry)
3308 break;
3309
3310 pEntry->pfnCb(pEntry);
3311 }
3312 else
3313 {
3314 WARN(("RTCritSectEnter failed %d\n", rc));
3315 break;
3316 }
3317 }
3318
3319 return rc;
3320}
3321
3322DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3323 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3324{
3325 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3326 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3327 if (pVdma == NULL)
3328 return VERR_INVALID_STATE;
3329 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3330 Data.pVdma = pVdma;
3331 Data.fProcessing = 1;
3332 Data.rc = VERR_INTERNAL_ERROR;
3333 RTListInit(&pCmd->CalloutList.List);
3334 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3335 if (!RT_SUCCESS(rc))
3336 {
3337 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3338 return rc;
3339 }
3340
3341 while (Data.fProcessing)
3342 {
3343 /* Poll infrequently to make sure no completed message has been missed. */
3344 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3345
3346 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3347
3348 if (Data.fProcessing)
3349 RTThreadYield();
3350 }
3351
3352 /* extra check callouts */
3353 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3354
3355 /* 'Our' message has been processed, so should reset the semaphore.
3356 * There is still possible that another message has been processed
3357 * and the semaphore has been signalled again.
3358 * Reset only if there are no other messages completed.
3359 */
3360 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3361 Assert(c >= 0);
3362 if (!c)
3363 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3364
3365 rc = Data.rc;
3366 if (!RT_SUCCESS(rc))
3367 WARN(("host call failed %d", rc));
3368
3369 return rc;
3370}
3371
3372int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3373{
3374 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3375 int rc = VINF_SUCCESS;
3376 switch (pCtl->u32Type)
3377 {
3378 case VBOXCMDVBVACTL_TYPE_3DCTL:
3379 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3380 case VBOXCMDVBVACTL_TYPE_RESIZE:
3381 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3382 case VBOXCMDVBVACTL_TYPE_ENABLE:
3383 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3384 {
3385 WARN(("incorrect enable size\n"));
3386 rc = VERR_INVALID_PARAMETER;
3387 break;
3388 }
3389 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3390 default:
3391 WARN(("unsupported type\n"));
3392 rc = VERR_INVALID_PARAMETER;
3393 break;
3394 }
3395
3396 pCtl->i32Result = rc;
3397 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3398 AssertRC(rc);
3399 return VINF_SUCCESS;
3400}
3401
3402int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3403{
3404 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3405 {
3406 WARN(("vdma VBVA is disabled\n"));
3407 return VERR_INVALID_STATE;
3408 }
3409
3410 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3411}
3412
3413int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3414{
3415 WARN(("flush\n"));
3416 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3417 {
3418 WARN(("vdma VBVA is disabled\n"));
3419 return VERR_INVALID_STATE;
3420 }
3421 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3422}
3423
3424void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3425{
3426 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3427 return;
3428 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3429}
3430
3431bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3432{
3433 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3434}
3435
3436#endif /* VBOX_WITH_CRHGSMI */
3437
3438int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3439{
3440#ifdef VBOX_WITH_CRHGSMI
3441 int rc = vdmaVBVAPause(pVdma);
3442 if (RT_SUCCESS(rc))
3443 return VINF_SUCCESS;
3444
3445 if (rc != VERR_INVALID_STATE)
3446 {
3447 WARN(("vdmaVBVAPause failed %d\n", rc));
3448 return rc;
3449 }
3450
3451# ifdef DEBUG_misha
3452 WARN(("debug prep"));
3453# endif
3454
3455 PVGASTATE pVGAState = pVdma->pVGAState;
3456 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3457 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3458 Assert(pCmd);
3459 if (pCmd)
3460 {
3461 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3462 AssertRC(rc);
3463 if (RT_SUCCESS(rc))
3464 {
3465 rc = vboxVDMACrCtlGetRc(pCmd);
3466 }
3467 vboxVDMACrCtlRelease(pCmd);
3468 return rc;
3469 }
3470 return VERR_NO_MEMORY;
3471#else
3472 RT_NOREF(pVdma);
3473 return VINF_SUCCESS;
3474#endif
3475}
3476
3477int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3478{
3479#ifdef VBOX_WITH_CRHGSMI
3480 int rc = vdmaVBVAResume(pVdma);
3481 if (RT_SUCCESS(rc))
3482 return VINF_SUCCESS;
3483
3484 if (rc != VERR_INVALID_STATE)
3485 {
3486 WARN(("vdmaVBVAResume failed %d\n", rc));
3487 return rc;
3488 }
3489
3490# ifdef DEBUG_misha
3491 WARN(("debug done"));
3492# endif
3493
3494 PVGASTATE pVGAState = pVdma->pVGAState;
3495 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3496 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3497 Assert(pCmd);
3498 if (pCmd)
3499 {
3500 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3501 AssertRC(rc);
3502 if (RT_SUCCESS(rc))
3503 {
3504 rc = vboxVDMACrCtlGetRc(pCmd);
3505 }
3506 vboxVDMACrCtlRelease(pCmd);
3507 return rc;
3508 }
3509 return VERR_NO_MEMORY;
3510#else
3511 RT_NOREF(pVdma);
3512 return VINF_SUCCESS;
3513#endif
3514}
3515
3516int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3517{
3518 int rc;
3519#ifndef VBOX_WITH_CRHGSMI
3520 RT_NOREF(pVdma, pSSM);
3521
3522#else
3523 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3524#endif
3525 {
3526 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3527 AssertRCReturn(rc, rc);
3528 return VINF_SUCCESS;
3529 }
3530
3531#ifdef VBOX_WITH_CRHGSMI
3532 PVGASTATE pVGAState = pVdma->pVGAState;
3533 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3534
3535 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3536 AssertRCReturn(rc, rc);
3537
3538 VBVAEXHOSTCTL HCtl;
3539 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3540 HCtl.u.state.pSSM = pSSM;
3541 HCtl.u.state.u32Version = 0;
3542 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3543#endif
3544}
3545
3546int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3547{
3548 uint32_t u32;
3549 int rc = SSMR3GetU32(pSSM, &u32);
3550 AssertLogRelRCReturn(rc, rc);
3551
3552 if (u32 != UINT32_MAX)
3553 {
3554#ifdef VBOX_WITH_CRHGSMI
3555 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3556 AssertLogRelRCReturn(rc, rc);
3557
3558 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3559
3560 VBVAEXHOSTCTL HCtl;
3561 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3562 HCtl.u.state.pSSM = pSSM;
3563 HCtl.u.state.u32Version = u32Version;
3564 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3565 AssertLogRelRCReturn(rc, rc);
3566
3567 rc = vdmaVBVAResume(pVdma);
3568 AssertLogRelRCReturn(rc, rc);
3569
3570 return VINF_SUCCESS;
3571#else
3572 RT_NOREF(pVdma, u32Version);
3573 WARN(("Unsupported VBVACtl info!\n"));
3574 return VERR_VERSION_MISMATCH;
3575#endif
3576 }
3577
3578 return VINF_SUCCESS;
3579}
3580
3581int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3582{
3583#ifdef VBOX_WITH_CRHGSMI
3584 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3585 return VINF_SUCCESS;
3586
3587/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3588 * the purpose of this code is. */
3589 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3590 if (!pHCtl)
3591 {
3592 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3593 return VERR_NO_MEMORY;
3594 }
3595
3596 /* sanity */
3597 pHCtl->u.cmd.pu8Cmd = NULL;
3598 pHCtl->u.cmd.cbCmd = 0;
3599
3600 /* NULL completion will just free the ctl up */
3601 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3602 if (RT_FAILURE(rc))
3603 {
3604 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3605 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3606 return rc;
3607 }
3608#else
3609 RT_NOREF(pVdma);
3610#endif
3611 return VINF_SUCCESS;
3612}
3613
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette