VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 63211

Last change on this file since 63211 was 63211, checked in by vboxsync, 8 years ago

Devices: warnings (gcc)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.7 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 63211 2016-08-09 14:47:23Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17#include <VBox/VMMDev.h>
18#include <VBox/vmm/pdmdev.h>
19#include <VBox/vmm/pgm.h>
20#include <VBox/VBoxVideo.h>
21#include <iprt/semaphore.h>
22#include <iprt/thread.h>
23#include <iprt/mem.h>
24#include <iprt/asm.h>
25#include <iprt/list.h>
26#include <iprt/param.h>
27
28#include "DevVGA.h"
29#include "HGSMI/SHGSMIHost.h"
30
31#include <VBox/VBoxVideo3D.h>
32#include <VBox/VBoxVideoHost3D.h>
33
34#ifdef DEBUG_misha
35# define VBOXVDBG_MEMCACHE_DISABLE
36#endif
37
38#ifndef VBOXVDBG_MEMCACHE_DISABLE
39# include <iprt/memcache.h>
40#endif
41
42#ifdef DEBUG_misha
43#define WARN_BP() do { AssertFailed(); } while (0)
44#else
45#define WARN_BP() do { } while (0)
46#endif
47#define WARN(_msg) do { \
48 LogRel(_msg); \
49 WARN_BP(); \
50 } while (0)
51
52#define VBOXVDMATHREAD_STATE_TERMINATED 0
53#define VBOXVDMATHREAD_STATE_CREATING 1
54#define VBOXVDMATHREAD_STATE_CREATED 3
55#define VBOXVDMATHREAD_STATE_TERMINATING 4
56
57struct VBOXVDMATHREAD;
58
59typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
60
61static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
62
63
64typedef struct VBOXVDMATHREAD
65{
66 RTTHREAD hWorkerThread;
67 RTSEMEVENT hEvent;
68 volatile uint32_t u32State;
69 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
70 void *pvChanged;
71} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
72
73
74/* state transformations:
75 *
76 * submitter | processor
77 *
78 * LISTENING ---> PROCESSING
79 *
80 * */
81#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
82#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
83
84#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
85#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
86#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
87
88typedef struct VBVAEXHOSTCONTEXT
89{
90 VBVABUFFER *pVBVA;
91 volatile int32_t i32State;
92 volatile int32_t i32EnableState;
93 volatile uint32_t u32cCtls;
94 /* critical section for accessing ctl lists */
95 RTCRITSECT CltCritSect;
96 RTLISTANCHOR GuestCtlList;
97 RTLISTANCHOR HostCtlList;
98#ifndef VBOXVDBG_MEMCACHE_DISABLE
99 RTMEMCACHE CtlCache;
100#endif
101} VBVAEXHOSTCONTEXT;
102
103typedef enum
104{
105 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
106 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
107 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
108 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
109 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
110 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
111 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
112 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
113 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
114 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
115 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
116 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
117 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
118} VBVAEXHOSTCTL_TYPE;
119
120struct VBVAEXHOSTCTL;
121
122typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
123
124typedef struct VBVAEXHOSTCTL
125{
126 RTLISTNODE Node;
127 VBVAEXHOSTCTL_TYPE enmType;
128 union
129 {
130 struct
131 {
132 uint8_t * pu8Cmd;
133 uint32_t cbCmd;
134 } cmd;
135
136 struct
137 {
138 PSSMHANDLE pSSM;
139 uint32_t u32Version;
140 } state;
141 } u;
142 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
143 void *pvComplete;
144} VBVAEXHOSTCTL;
145
146/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
147 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
148 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
149 * see mor edetailed comments in headers for function definitions */
150typedef enum
151{
152 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
153 VBVAEXHOST_DATA_TYPE_CMD,
154 VBVAEXHOST_DATA_TYPE_HOSTCTL,
155 VBVAEXHOST_DATA_TYPE_GUESTCTL
156} VBVAEXHOST_DATA_TYPE;
157
158static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
159
160
161static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
162
163static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
164static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
165
166/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
167 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
168static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
169
170static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
171static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
172static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
173static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
174static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
175static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
176
177static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
178{
179#ifndef VBOXVDBG_MEMCACHE_DISABLE
180 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
181#else
182 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
183#endif
184}
185
186static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
187{
188#ifndef VBOXVDBG_MEMCACHE_DISABLE
189 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
190#else
191 RTMemFree(pCtl);
192#endif
193}
194
195static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
196{
197 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
198 if (!pCtl)
199 {
200 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
201 return NULL;
202 }
203
204 pCtl->enmType = enmType;
205 return pCtl;
206}
207
208static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
209{
210 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
211
212 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
213 return VINF_SUCCESS;
214 return VERR_SEM_BUSY;
215}
216
217static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
218{
219 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
220
221 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
222 return NULL;
223
224 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
225 if (RT_SUCCESS(rc))
226 {
227 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
228 if (pCtl)
229 *pfHostCtl = true;
230 else if (!fHostOnlyMode)
231 {
232 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
233 {
234 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
235 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
236 * and there are no HostCtl commands*/
237 Assert(pCtl);
238 *pfHostCtl = false;
239 }
240 }
241
242 if (pCtl)
243 {
244 RTListNodeRemove(&pCtl->Node);
245 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
246 }
247
248 RTCritSectLeave(&pCmdVbva->CltCritSect);
249
250 return pCtl;
251 }
252 else
253 WARN(("RTCritSectEnter failed %d\n", rc));
254
255 return NULL;
256}
257
258static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
259{
260 bool fHostCtl = false;
261 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
262 Assert(!pCtl || fHostCtl);
263 return pCtl;
264}
265
266static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
267{
268 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
269 {
270 WARN(("Invalid state\n"));
271 return VERR_INVALID_STATE;
272 }
273
274 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
275 return VINF_SUCCESS;
276}
277
278static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
279{
280 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
281 {
282 WARN(("Invalid state\n"));
283 return VERR_INVALID_STATE;
284 }
285
286 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
287 return VINF_SUCCESS;
288}
289
290
291static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
292{
293 switch (pCtl->enmType)
294 {
295 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
296 {
297 VBoxVBVAExHPPause(pCmdVbva);
298 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
299 return true;
300 }
301 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
302 {
303 VBoxVBVAExHPResume(pCmdVbva);
304 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
305 return true;
306 }
307 default:
308 return false;
309 }
310}
311
312static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
313{
314 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
315
316 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
317}
318
319static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
320{
321 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
322 if (pCmdVbva->pVBVA)
323 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
324}
325
326static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
327{
328 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
329 if (pCmdVbva->pVBVA)
330 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
331}
332
333static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
334{
335 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
336 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
337
338 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
339
340 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
341 uint32_t indexRecordFree = pVBVA->indexRecordFree;
342
343 Log(("first = %d, free = %d\n",
344 indexRecordFirst, indexRecordFree));
345
346 if (indexRecordFirst == indexRecordFree)
347 {
348 /* No records to process. Return without assigning output variables. */
349 return VINF_EOF;
350 }
351
352 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
353
354 /* A new record need to be processed. */
355 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
356 {
357 /* the record is being recorded, try again */
358 return VINF_TRY_AGAIN;
359 }
360
361 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
362
363 if (!cbRecord)
364 {
365 /* the record is being recorded, try again */
366 return VINF_TRY_AGAIN;
367 }
368
369 /* we should not get partial commands here actually */
370 Assert(cbRecord);
371
372 /* The size of largest contiguous chunk in the ring biffer. */
373 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
374
375 /* The pointer to data in the ring buffer. */
376 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
377
378 /* Fetch or point the data. */
379 if (u32BytesTillBoundary >= cbRecord)
380 {
381 /* The command does not cross buffer boundary. Return address in the buffer. */
382 *ppCmd = pSrc;
383 *pcbCmd = cbRecord;
384 return VINF_SUCCESS;
385 }
386
387 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
388 return VERR_INVALID_STATE;
389}
390
391static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
392{
393 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
394 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
395
396 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
397}
398
399static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
400{
401 if (pCtl->pfnComplete)
402 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
403 else
404 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
405}
406
407static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
408{
409 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
410 VBVAEXHOSTCTL*pCtl;
411 bool fHostClt;
412
413 for (;;)
414 {
415 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
416 if (pCtl)
417 {
418 if (fHostClt)
419 {
420 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
421 {
422 *ppCmd = (uint8_t*)pCtl;
423 *pcbCmd = sizeof (*pCtl);
424 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
425 }
426 continue;
427 }
428 else
429 {
430 *ppCmd = (uint8_t*)pCtl;
431 *pcbCmd = sizeof (*pCtl);
432 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
433 }
434 }
435
436 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
437 return VBVAEXHOST_DATA_TYPE_NO_DATA;
438
439 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
440 switch (rc)
441 {
442 case VINF_SUCCESS:
443 return VBVAEXHOST_DATA_TYPE_CMD;
444 case VINF_EOF:
445 return VBVAEXHOST_DATA_TYPE_NO_DATA;
446 case VINF_TRY_AGAIN:
447 RTThreadSleep(1);
448 continue;
449 default:
450 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
451 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
452 return VBVAEXHOST_DATA_TYPE_NO_DATA;
453 }
454 }
455 /* not reached */
456}
457
458static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
459{
460 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
461 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
462 {
463 vboxVBVAExHPHgEventClear(pCmdVbva);
464 vboxVBVAExHPProcessorRelease(pCmdVbva);
465 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
466 * 1. we check the queue -> and it is empty
467 * 2. submitter adds command to the queue
468 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
469 * 4. we clear the "processing" state
470 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
471 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
472 **/
473 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
474 if (RT_SUCCESS(rc))
475 {
476 /* we are the processor now */
477 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
478 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
479 {
480 vboxVBVAExHPProcessorRelease(pCmdVbva);
481 return VBVAEXHOST_DATA_TYPE_NO_DATA;
482 }
483
484 vboxVBVAExHPHgEventSet(pCmdVbva);
485 }
486 }
487
488 return enmType;
489}
490
491DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
492{
493 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
494
495 if (pVBVA)
496 {
497 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
498 uint32_t indexRecordFree = pVBVA->indexRecordFree;
499
500 if (indexRecordFirst != indexRecordFree)
501 return true;
502 }
503
504 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
505}
506
507/* Checks whether the new commands are ready for processing
508 * @returns
509 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
510 * VINF_EOF - no commands in a queue
511 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
512 * VERR_INVALID_STATE - the VBVA is paused or pausing */
513static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
514{
515 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
516 if (RT_SUCCESS(rc))
517 {
518 /* we are the processor now */
519 if (vboxVBVAExHSHasCommands(pCmdVbva))
520 {
521 vboxVBVAExHPHgEventSet(pCmdVbva);
522 return VINF_SUCCESS;
523 }
524
525 vboxVBVAExHPProcessorRelease(pCmdVbva);
526 return VINF_EOF;
527 }
528 if (rc == VERR_SEM_BUSY)
529 return VINF_ALREADY_INITIALIZED;
530 return VERR_INVALID_STATE;
531}
532
533static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
534{
535 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
536 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
537 if (RT_SUCCESS(rc))
538 {
539#ifndef VBOXVDBG_MEMCACHE_DISABLE
540 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
541 0, /* size_t cbAlignment */
542 UINT32_MAX, /* uint32_t cMaxObjects */
543 NULL, /* PFNMEMCACHECTOR pfnCtor*/
544 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
545 NULL, /* void *pvUser*/
546 0 /* uint32_t fFlags*/
547 );
548 if (RT_SUCCESS(rc))
549#endif
550 {
551 RTListInit(&pCmdVbva->GuestCtlList);
552 RTListInit(&pCmdVbva->HostCtlList);
553 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
554 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
555 return VINF_SUCCESS;
556 }
557#ifndef VBOXVDBG_MEMCACHE_DISABLE
558 else
559 WARN(("RTMemCacheCreate failed %d\n", rc));
560#endif
561 }
562 else
563 WARN(("RTCritSectInit failed %d\n", rc));
564
565 return rc;
566}
567
568DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
569{
570 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
571}
572
573DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
574{
575 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
576}
577
578static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
579{
580 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
581 {
582 WARN(("VBVAEx is enabled already\n"));
583 return VERR_INVALID_STATE;
584 }
585
586 pCmdVbva->pVBVA = pVBVA;
587 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
588 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
589 return VINF_SUCCESS;
590}
591
592static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
593{
594 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
595 return VINF_SUCCESS;
596
597 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
598 return VINF_SUCCESS;
599}
600
601static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
602{
603 /* ensure the processor is stopped */
604 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
605
606 /* ensure no one tries to submit the command */
607 if (pCmdVbva->pVBVA)
608 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
609
610 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
611 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
612
613 RTCritSectDelete(&pCmdVbva->CltCritSect);
614
615#ifndef VBOXVDBG_MEMCACHE_DISABLE
616 RTMemCacheDestroy(pCmdVbva->CtlCache);
617#endif
618
619 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
620}
621
622static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
623{
624 RT_NOREF(pCmdVbva);
625 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
626 AssertRCReturn(rc, rc);
627 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
628 AssertRCReturn(rc, rc);
629 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
630 AssertRCReturn(rc, rc);
631
632 return VINF_SUCCESS;
633}
634
635static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
636{
637 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
638 {
639 WARN(("vbva not paused\n"));
640 return VERR_INVALID_STATE;
641 }
642
643 VBVAEXHOSTCTL* pCtl;
644 int rc;
645 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
646 {
647 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
648 AssertRCReturn(rc, rc);
649 }
650
651 rc = SSMR3PutU32(pSSM, 0);
652 AssertRCReturn(rc, rc);
653
654 return VINF_SUCCESS;
655}
656/* Saves state
657 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
658 */
659static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
660{
661 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
662 if (RT_FAILURE(rc))
663 {
664 WARN(("RTCritSectEnter failed %d\n", rc));
665 return rc;
666 }
667
668 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
669 if (RT_FAILURE(rc))
670 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
671
672 RTCritSectLeave(&pCmdVbva->CltCritSect);
673
674 return rc;
675}
676
677static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
678{
679 RT_NOREF(u32Version);
680 uint32_t u32;
681 int rc = SSMR3GetU32(pSSM, &u32);
682 AssertLogRelRCReturn(rc, rc);
683
684 if (!u32)
685 return VINF_EOF;
686
687 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
688 if (!pHCtl)
689 {
690 WARN(("VBoxVBVAExHCtlCreate failed\n"));
691 return VERR_NO_MEMORY;
692 }
693
694 rc = SSMR3GetU32(pSSM, &u32);
695 AssertLogRelRCReturn(rc, rc);
696 pHCtl->u.cmd.cbCmd = u32;
697
698 rc = SSMR3GetU32(pSSM, &u32);
699 AssertLogRelRCReturn(rc, rc);
700 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
701
702 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
703 ++pCmdVbva->u32cCtls;
704
705 return VINF_SUCCESS;
706}
707
708
709static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
710{
711 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
712 {
713 WARN(("vbva not stopped\n"));
714 return VERR_INVALID_STATE;
715 }
716
717 int rc;
718
719 do {
720 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
721 AssertLogRelRCReturn(rc, rc);
722 } while (VINF_EOF != rc);
723
724 return VINF_SUCCESS;
725}
726
727/* Loads state
728 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
729 */
730static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
731{
732 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
733 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
734 if (RT_FAILURE(rc))
735 {
736 WARN(("RTCritSectEnter failed %d\n", rc));
737 return rc;
738 }
739
740 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
741 if (RT_FAILURE(rc))
742 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
743
744 RTCritSectLeave(&pCmdVbva->CltCritSect);
745
746 return rc;
747}
748
749typedef enum
750{
751 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
752 VBVAEXHOSTCTL_SOURCE_HOST
753} VBVAEXHOSTCTL_SOURCE;
754
755
756static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
757{
758 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
759 {
760 Log(("cmd vbva not enabled\n"));
761 return VERR_INVALID_STATE;
762 }
763
764 pCtl->pfnComplete = pfnComplete;
765 pCtl->pvComplete = pvComplete;
766
767 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
768 if (RT_SUCCESS(rc))
769 {
770 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
771 {
772 Log(("cmd vbva not enabled\n"));
773 RTCritSectLeave(&pCmdVbva->CltCritSect);
774 return VERR_INVALID_STATE;
775 }
776
777 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
778 {
779 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
780 }
781 else
782 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
783
784 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
785
786 RTCritSectLeave(&pCmdVbva->CltCritSect);
787
788 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
789 }
790 else
791 WARN(("RTCritSectEnter failed %d\n", rc));
792
793 return rc;
794}
795
796#ifdef VBOX_WITH_CRHGSMI
797typedef struct VBOXVDMA_SOURCE
798{
799 VBVAINFOSCREEN Screen;
800 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
801} VBOXVDMA_SOURCE;
802#endif
803
804typedef struct VBOXVDMAHOST
805{
806 PHGSMIINSTANCE pHgsmi;
807 PVGASTATE pVGAState;
808#ifdef VBOX_WITH_CRHGSMI
809 VBVAEXHOSTCONTEXT CmdVbva;
810 VBOXVDMATHREAD Thread;
811 VBOXCRCMD_SVRINFO CrSrvInfo;
812 VBVAEXHOSTCTL* pCurRemainingHostCtl;
813 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
814 int32_t volatile i32cHostCrCtlCompleted;
815 RTCRITSECT CalloutCritSect;
816// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
817#endif
818#ifdef VBOX_VDMA_WITH_WATCHDOG
819 PTMTIMERR3 WatchDogTimer;
820#endif
821} VBOXVDMAHOST, *PVBOXVDMAHOST;
822
823#ifdef VBOX_WITH_CRHGSMI
824
825void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
826{
827 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
828 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
829 void *pvChanged = pThread->pvChanged;
830
831 pThread->pfnChanged = NULL;
832 pThread->pvChanged = NULL;
833
834 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
835
836 if (pfnChanged)
837 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
838}
839
840void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
841{
842 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
843 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
844 void *pvChanged = pThread->pvChanged;
845
846 pThread->pfnChanged = NULL;
847 pThread->pvChanged = NULL;
848
849 if (pfnChanged)
850 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
851}
852
853DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
854{
855 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
856}
857
858void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
859{
860 memset(pThread, 0, sizeof (*pThread));
861 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
862}
863
864int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
865{
866 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
867 switch (u32State)
868 {
869 case VBOXVDMATHREAD_STATE_TERMINATED:
870 return VINF_SUCCESS;
871 case VBOXVDMATHREAD_STATE_TERMINATING:
872 {
873 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
874 if (!RT_SUCCESS(rc))
875 {
876 WARN(("RTThreadWait failed %d\n", rc));
877 return rc;
878 }
879
880 RTSemEventDestroy(pThread->hEvent);
881
882 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
883 return VINF_SUCCESS;
884 }
885 default:
886 WARN(("invalid state"));
887 return VERR_INVALID_STATE;
888 }
889}
890
891int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
892{
893 int rc = VBoxVDMAThreadCleanup(pThread);
894 if (RT_FAILURE(rc))
895 {
896 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
897 return rc;
898 }
899
900 rc = RTSemEventCreate(&pThread->hEvent);
901 if (RT_SUCCESS(rc))
902 {
903 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
904 pThread->pfnChanged = pfnCreated;
905 pThread->pvChanged = pvCreated;
906 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
907 if (RT_SUCCESS(rc))
908 return VINF_SUCCESS;
909 else
910 WARN(("RTThreadCreate failed %d\n", rc));
911
912 RTSemEventDestroy(pThread->hEvent);
913 }
914 else
915 WARN(("RTSemEventCreate failed %d\n", rc));
916
917 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
918
919 return rc;
920}
921
922DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
923{
924 int rc = RTSemEventSignal(pThread->hEvent);
925 AssertRC(rc);
926 return rc;
927}
928
929DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
930{
931 int rc = RTSemEventWait(pThread->hEvent, cMillies);
932 AssertRC(rc);
933 return rc;
934}
935
936int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
937{
938 int rc;
939 do
940 {
941 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
942 switch (u32State)
943 {
944 case VBOXVDMATHREAD_STATE_CREATED:
945 pThread->pfnChanged = pfnTerminated;
946 pThread->pvChanged = pvTerminated;
947 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
948 if (fNotify)
949 {
950 rc = VBoxVDMAThreadEventNotify(pThread);
951 AssertRC(rc);
952 }
953 return VINF_SUCCESS;
954 case VBOXVDMATHREAD_STATE_TERMINATING:
955 case VBOXVDMATHREAD_STATE_TERMINATED:
956 {
957 WARN(("thread is marked to termination or terminated\nn"));
958 return VERR_INVALID_STATE;
959 }
960 case VBOXVDMATHREAD_STATE_CREATING:
961 {
962 /* wait till the thread creation is completed */
963 WARN(("concurrent thread create/destron\n"));
964 RTThreadYield();
965 continue;
966 }
967 default:
968 WARN(("invalid state"));
969 return VERR_INVALID_STATE;
970 }
971 } while (1);
972
973 WARN(("should never be here\n"));
974 return VERR_INTERNAL_ERROR;
975}
976
977static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
978
979typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
980typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
981
982typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
983{
984 uint32_t cRefs;
985 int32_t rc;
986 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
987 void *pvCompletion;
988 VBOXVDMACMD_CHROMIUM_CTL Cmd;
989} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
990
991#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
992
993static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
994{
995 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
996 Assert(pHdr);
997 if (pHdr)
998 {
999 pHdr->cRefs = 1;
1000 pHdr->rc = VERR_NOT_IMPLEMENTED;
1001 pHdr->Cmd.enmType = enmCmd;
1002 pHdr->Cmd.cbCmd = cbCmd;
1003 return &pHdr->Cmd;
1004 }
1005
1006 return NULL;
1007}
1008
1009DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1010{
1011 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1012 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1013 if (!cRefs)
1014 RTMemFree(pHdr);
1015}
1016
1017DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1018{
1019 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1020 ASMAtomicIncU32(&pHdr->cRefs);
1021}
1022
1023DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1024{
1025 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1026 return pHdr->rc;
1027}
1028
1029static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1030{
1031 RT_NOREF(pVGAState, pCmd);
1032 RTSemEventSignal((RTSEMEVENT)pvContext);
1033}
1034
1035#if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
1036static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1037{
1038 RT_NOREF(pVGAState, pvContext);
1039 vboxVDMACrCtlRelease(pCmd);
1040}
1041#endif
1042
1043static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1044{
1045 if ( pVGAState->pDrv
1046 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1047 {
1048 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1049 pHdr->pfnCompletion = pfnCompletion;
1050 pHdr->pvCompletion = pvCompletion;
1051 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1052 return VINF_SUCCESS;
1053 }
1054#ifdef DEBUG_misha
1055 Assert(0);
1056#endif
1057 return VERR_NOT_SUPPORTED;
1058}
1059
1060static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1061{
1062 RTSEMEVENT hComplEvent;
1063 int rc = RTSemEventCreate(&hComplEvent);
1064 AssertRC(rc);
1065 if (RT_SUCCESS(rc))
1066 {
1067 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1068#ifdef DEBUG_misha
1069 AssertRC(rc);
1070#endif
1071 if (RT_SUCCESS(rc))
1072 {
1073 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1074 AssertRC(rc);
1075 if (RT_SUCCESS(rc))
1076 {
1077 RTSemEventDestroy(hComplEvent);
1078 }
1079 }
1080 else
1081 {
1082 /* the command is completed */
1083 RTSemEventDestroy(hComplEvent);
1084 }
1085 }
1086 return rc;
1087}
1088
1089typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1090{
1091 int rc;
1092 RTSEMEVENT hEvent;
1093} VDMA_VBVA_CTL_CYNC_COMPLETION;
1094
1095static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1096{
1097 RT_NOREF(pCmd, cbCmd);
1098 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1099 pData->rc = rc;
1100 rc = RTSemEventSignal(pData->hEvent);
1101 if (!RT_SUCCESS(rc))
1102 WARN(("RTSemEventSignal failed %d\n", rc));
1103}
1104
1105static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1106{
1107 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1108 Data.rc = VERR_NOT_IMPLEMENTED;
1109 int rc = RTSemEventCreate(&Data.hEvent);
1110 if (!RT_SUCCESS(rc))
1111 {
1112 WARN(("RTSemEventCreate failed %d\n", rc));
1113 return rc;
1114 }
1115
1116 pCtl->CalloutList.List.pNext = NULL;
1117
1118 PVGASTATE pVGAState = pVdma->pVGAState;
1119 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1120 if (RT_SUCCESS(rc))
1121 {
1122 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1123 if (RT_SUCCESS(rc))
1124 {
1125 rc = Data.rc;
1126 if (!RT_SUCCESS(rc))
1127 {
1128 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1129 }
1130
1131 }
1132 else
1133 WARN(("RTSemEventWait failed %d\n", rc));
1134 }
1135 else
1136 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1137
1138
1139 RTSemEventDestroy(Data.hEvent);
1140
1141 return rc;
1142}
1143
1144static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1145{
1146 VBVAEXHOSTCTL HCtl;
1147 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1148 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1149 if (RT_FAILURE(rc))
1150 {
1151 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1152 return rc;
1153 }
1154
1155 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1156
1157 return VINF_SUCCESS;
1158}
1159
1160static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1161{
1162 struct VBOXVDMAHOST *pVdma = hClient;
1163 if (!pVdma->pCurRemainingHostCtl)
1164 {
1165 /* disable VBVA, all subsequent host commands will go HGCM way */
1166 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1167 }
1168 else
1169 {
1170 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1171 }
1172
1173 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1174 if (pVdma->pCurRemainingHostCtl)
1175 {
1176 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1177 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1178 }
1179
1180 *pcbCtl = 0;
1181 return NULL;
1182}
1183
1184static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1185{
1186#ifdef VBOX_STRICT
1187 struct VBOXVDMAHOST *pVdma = hClient;
1188 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1189 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1190#else
1191 RT_NOREF(hClient);
1192#endif
1193}
1194
1195static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1196{
1197 struct VBOXVDMAHOST *pVdma = hClient;
1198 VBVAEXHOSTCTL HCtl;
1199 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1200 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1201
1202 pHgcmEnableData->hRHCmd = pVdma;
1203 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1204
1205 if (RT_FAILURE(rc))
1206 {
1207 if (rc == VERR_INVALID_STATE)
1208 rc = VINF_SUCCESS;
1209 else
1210 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1211 }
1212
1213 return rc;
1214}
1215
1216static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1217{
1218 VBOXCRCMDCTL_ENABLE Enable;
1219 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1220 Enable.Data.hRHCmd = pVdma;
1221 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1222
1223 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1224 Assert(!pVdma->pCurRemainingHostCtl);
1225 if (RT_SUCCESS(rc))
1226 {
1227 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1228 return VINF_SUCCESS;
1229 }
1230
1231 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1232 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1233
1234 return rc;
1235}
1236
1237static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1238{
1239 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1240 {
1241 WARN(("vdma VBVA is already enabled\n"));
1242 return VERR_INVALID_STATE;
1243 }
1244
1245 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1246 if (!pVBVA)
1247 {
1248 WARN(("invalid offset %d\n", u32Offset));
1249 return VERR_INVALID_PARAMETER;
1250 }
1251
1252 if (!pVdma->CrSrvInfo.pfnEnable)
1253 {
1254#ifdef DEBUG_misha
1255 WARN(("pfnEnable is NULL\n"));
1256 return VERR_NOT_SUPPORTED;
1257#endif
1258 }
1259
1260 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1261 if (RT_SUCCESS(rc))
1262 {
1263 VBOXCRCMDCTL_DISABLE Disable;
1264 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1265 Disable.Data.hNotifyTerm = pVdma;
1266 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1267 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1268 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1269 if (RT_SUCCESS(rc))
1270 {
1271 PVGASTATE pVGAState = pVdma->pVGAState;
1272 VBOXCRCMD_SVRENABLE_INFO Info;
1273 Info.hCltScr = pVGAState->pDrv;
1274 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1275 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1276 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1277 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1278 if (RT_SUCCESS(rc))
1279 return VINF_SUCCESS;
1280 else
1281 WARN(("pfnEnable failed %d\n", rc));
1282
1283 vboxVDMACrHgcmHandleEnable(pVdma);
1284 }
1285 else
1286 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1287
1288 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1289 }
1290 else
1291 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1292
1293 return rc;
1294}
1295
1296static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1297{
1298 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1299 {
1300 Log(("vdma VBVA is already disabled\n"));
1301 return VINF_SUCCESS;
1302 }
1303
1304 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1305 if (RT_SUCCESS(rc))
1306 {
1307 if (fDoHgcmEnable)
1308 {
1309 PVGASTATE pVGAState = pVdma->pVGAState;
1310
1311 /* disable is a bit tricky
1312 * we need to ensure the host ctl commands do not come out of order
1313 * and do not come over HGCM channel until after it is enabled */
1314 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1315 if (RT_SUCCESS(rc))
1316 {
1317 vdmaVBVANotifyDisable(pVGAState);
1318 return VINF_SUCCESS;
1319 }
1320
1321 VBOXCRCMD_SVRENABLE_INFO Info;
1322 Info.hCltScr = pVGAState->pDrv;
1323 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1324 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1325 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1326 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1327 }
1328 }
1329 else
1330 WARN(("pfnDisable failed %d\n", rc));
1331
1332 return rc;
1333}
1334
1335static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1336{
1337 *pfContinue = true;
1338
1339 switch (pCmd->enmType)
1340 {
1341 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1342 {
1343 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1344 {
1345 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1346 return VERR_INVALID_STATE;
1347 }
1348 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1349 }
1350 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1351 {
1352 int rc = vdmaVBVADisableProcess(pVdma, true);
1353 if (RT_FAILURE(rc))
1354 {
1355 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1356 return rc;
1357 }
1358
1359 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1360 }
1361 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1362 {
1363 int rc = vdmaVBVADisableProcess(pVdma, false);
1364 if (RT_FAILURE(rc))
1365 {
1366 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1367 return rc;
1368 }
1369
1370 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1371 if (RT_FAILURE(rc))
1372 {
1373 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1374 return rc;
1375 }
1376
1377 *pfContinue = false;
1378 return VINF_SUCCESS;
1379 }
1380 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1381 {
1382 PVGASTATE pVGAState = pVdma->pVGAState;
1383 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1384 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1385 if (RT_FAILURE(rc))
1386 {
1387 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1388 return rc;
1389 }
1390 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1391
1392 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1393 }
1394 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1395 {
1396 PVGASTATE pVGAState = pVdma->pVGAState;
1397 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1398
1399 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1400 if (RT_FAILURE(rc))
1401 {
1402 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1403 return rc;
1404 }
1405
1406 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1407 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1408 if (RT_FAILURE(rc))
1409 {
1410 WARN(("pfnLoadState failed %d\n", rc));
1411 return rc;
1412 }
1413
1414 return VINF_SUCCESS;
1415 }
1416 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1417 {
1418 PVGASTATE pVGAState = pVdma->pVGAState;
1419
1420 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1421 {
1422 VBVAINFOSCREEN CurScreen;
1423 VBVAINFOVIEW CurView;
1424
1425 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1426 if (RT_FAILURE(rc))
1427 {
1428 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1429 return rc;
1430 }
1431
1432 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1433 if (RT_FAILURE(rc))
1434 {
1435 WARN(("VBVAInfoScreen failed %d\n", rc));
1436 return rc;
1437 }
1438 }
1439
1440 return VINF_SUCCESS;
1441 }
1442 default:
1443 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1444 return VERR_INVALID_PARAMETER;
1445 }
1446}
1447
1448static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1449{
1450 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1451 const bool fDisabled = RT_BOOL(pScreen->u16Flags & VBVA_SCREEN_F_DISABLED);
1452
1453 if (fDisabled)
1454 {
1455 if ( u32ViewIndex < pVGAState->cMonitors
1456 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1457 {
1458 RT_ZERO(*pScreen);
1459 pScreen->u32ViewIndex = u32ViewIndex;
1460 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1461 return VINF_SUCCESS;
1462 }
1463 }
1464 else
1465 {
1466 if ( u32ViewIndex < pVGAState->cMonitors
1467 && pScreen->u16BitsPerPixel <= 32
1468 && pScreen->u32Width <= UINT16_MAX
1469 && pScreen->u32Height <= UINT16_MAX
1470 && pScreen->u32LineSize <= UINT16_MAX * 4)
1471 {
1472 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1473 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1474 {
1475 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1476 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1477 && u64ScreenSize <= pVGAState->vram_size
1478 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1479 {
1480 return VINF_SUCCESS;
1481 }
1482 }
1483 }
1484 }
1485
1486 return VERR_INVALID_PARAMETER;
1487}
1488
1489static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1490{
1491 PVGASTATE pVGAState = pVdma->pVGAState;
1492 VBVAINFOSCREEN Screen = pEntry->Screen;
1493
1494 /* Verify and cleanup local copy of the input data. */
1495 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1496 if (RT_FAILURE(rc))
1497 {
1498 WARN(("invalid screen data\n"));
1499 return rc;
1500 }
1501
1502 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1503 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1504 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1505
1506 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1507 if (RT_FAILURE(rc))
1508 {
1509 WARN(("pfnResize failed %d\n", rc));
1510 return rc;
1511 }
1512
1513 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1514 VBVAINFOVIEW View;
1515 View.u32ViewOffset = 0;
1516 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1517 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1518
1519 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1520
1521 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1522 i >= 0;
1523 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1524 {
1525 Screen.u32ViewIndex = i;
1526
1527 VBVAINFOSCREEN CurScreen;
1528 VBVAINFOVIEW CurView;
1529
1530 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1531 AssertRC(rc);
1532
1533 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1534 continue;
1535
1536 if (!fDisable || !CurView.u32ViewSize)
1537 {
1538 View.u32ViewIndex = Screen.u32ViewIndex;
1539
1540 rc = VBVAInfoView(pVGAState, &View);
1541 if (RT_FAILURE(rc))
1542 {
1543 WARN(("VBVAInfoView failed %d\n", rc));
1544 break;
1545 }
1546 }
1547
1548 rc = VBVAInfoScreen(pVGAState, &Screen);
1549 if (RT_FAILURE(rc))
1550 {
1551 WARN(("VBVAInfoScreen failed %d\n", rc));
1552 break;
1553 }
1554 }
1555
1556 return rc;
1557}
1558
1559static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1560{
1561 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1562 switch (enmType)
1563 {
1564 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1565 {
1566 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1567 {
1568 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1569 return VERR_INVALID_STATE;
1570 }
1571 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1572 }
1573 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1574 {
1575 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1576 {
1577 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1578 return VERR_INVALID_STATE;
1579 }
1580
1581 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1582
1583 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1584 {
1585 WARN(("invalid buffer size\n"));
1586 return VERR_INVALID_PARAMETER;
1587 }
1588
1589 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1590 if (!cElements)
1591 {
1592 WARN(("invalid buffer size\n"));
1593 return VERR_INVALID_PARAMETER;
1594 }
1595
1596 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1597
1598 int rc = VINF_SUCCESS;
1599
1600 for (uint32_t i = 0; i < cElements; ++i)
1601 {
1602 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1603 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1604 if (RT_FAILURE(rc))
1605 {
1606 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1607 break;
1608 }
1609 }
1610 return rc;
1611 }
1612 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1613 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1614 {
1615 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1616 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1617 uint32_t u32Offset = pEnable->u32Offset;
1618 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1619 if (!RT_SUCCESS(rc))
1620 {
1621 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1622 return rc;
1623 }
1624
1625 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1626 {
1627 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1628 if (!RT_SUCCESS(rc))
1629 {
1630 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1631 return rc;
1632 }
1633 }
1634
1635 return VINF_SUCCESS;
1636 }
1637 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1638 {
1639 int rc = vdmaVBVADisableProcess(pVdma, true);
1640 if (RT_FAILURE(rc))
1641 {
1642 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1643 return rc;
1644 }
1645
1646 /* do vgaUpdateDisplayAll right away */
1647 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1648 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1649
1650 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1651 }
1652 default:
1653 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1654 return VERR_INVALID_PARAMETER;
1655 }
1656}
1657
1658/**
1659 * @param fIn - whether this is a page in or out op.
1660 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1661 */
1662static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1663{
1664 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1665 PGMPAGEMAPLOCK Lock;
1666 int rc;
1667
1668 if (fIn)
1669 {
1670 const void * pvPage;
1671 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1672 if (!RT_SUCCESS(rc))
1673 {
1674 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1675 return rc;
1676 }
1677
1678 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1679
1680 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1681 }
1682 else
1683 {
1684 void * pvPage;
1685 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1686 if (!RT_SUCCESS(rc))
1687 {
1688 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1689 return rc;
1690 }
1691
1692 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1693
1694 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1695 }
1696
1697 return VINF_SUCCESS;
1698}
1699
1700static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1701{
1702 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1703 {
1704 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1705 if (!RT_SUCCESS(rc))
1706 {
1707 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1708 return rc;
1709 }
1710 }
1711
1712 return VINF_SUCCESS;
1713}
1714
1715static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1716 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1717 uint8_t **ppu8Vram, bool *pfIn)
1718{
1719 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1720 {
1721 WARN(("cmd too small"));
1722 return -1;
1723 }
1724
1725 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1726 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1727 {
1728 WARN(("invalid cmd size"));
1729 return -1;
1730 }
1731 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1732
1733 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1734 if (offVRAM & PAGE_OFFSET_MASK)
1735 {
1736 WARN(("offVRAM address is not on page boundary\n"));
1737 return -1;
1738 }
1739 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1740
1741 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1742 if (offVRAM >= pVGAState->vram_size)
1743 {
1744 WARN(("invalid vram offset"));
1745 return -1;
1746 }
1747
1748 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1749 {
1750 WARN(("invalid cPages %d", cPages));
1751 return -1;
1752 }
1753
1754 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1755 {
1756 WARN(("invalid cPages %d, exceeding vram size", cPages));
1757 return -1;
1758 }
1759
1760 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1761 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1762
1763 *ppPages = pPages;
1764 *pcPages = cPages;
1765 *ppu8Vram = pu8Vram;
1766 *pfIn = fIn;
1767 return 0;
1768}
1769
1770static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1771{
1772 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1773 if (offVRAM & PAGE_OFFSET_MASK)
1774 {
1775 WARN(("offVRAM address is not on page boundary\n"));
1776 return -1;
1777 }
1778
1779 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1780 if (offVRAM >= pVGAState->vram_size)
1781 {
1782 WARN(("invalid vram offset"));
1783 return -1;
1784 }
1785
1786 uint32_t cbFill = pFill->u32CbFill;
1787
1788 if (offVRAM + cbFill >= pVGAState->vram_size)
1789 {
1790 WARN(("invalid cPages"));
1791 return -1;
1792 }
1793
1794 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1795 uint32_t u32Color = pFill->u32Pattern;
1796
1797 Assert(!(cbFill % 4));
1798 for (uint32_t i = 0; i < cbFill / 4; ++i)
1799 {
1800 pu32Vram[i] = u32Color;
1801 }
1802
1803 return 0;
1804}
1805
1806static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1807{
1808 switch (pCmd->u8OpCode)
1809 {
1810 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1811 return 0;
1812 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1813 {
1814 PVGASTATE pVGAState = pVdma->pVGAState;
1815 const VBOXCMDVBVAPAGEIDX *pPages;
1816 uint32_t cPages;
1817 uint8_t *pu8Vram;
1818 bool fIn;
1819 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1820 &pPages, &cPages,
1821 &pu8Vram, &fIn);
1822 if (i8Result < 0)
1823 {
1824 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1825 return i8Result;
1826 }
1827
1828 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1829 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1830 if (!RT_SUCCESS(rc))
1831 {
1832 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1833 return -1;
1834 }
1835
1836 return 0;
1837 }
1838 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1839 {
1840 PVGASTATE pVGAState = pVdma->pVGAState;
1841 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1842 {
1843 WARN(("cmd too small"));
1844 return -1;
1845 }
1846
1847 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1848 }
1849 default:
1850 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1851 }
1852}
1853
1854#if 0
1855typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1856{
1857 VBOXCMDVBVA_HDR Hdr;
1858 /* for now can only contain offVRAM.
1859 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1860 VBOXCMDVBVA_ALLOCINFO Alloc;
1861 uint32_t u32Reserved;
1862 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1863} VBOXCMDVBVA_PAGING_TRANSFER;
1864#endif
1865
1866AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1867AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1868AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1869AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1870
1871#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1872
1873static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1874{
1875 switch (pCmd->u8OpCode)
1876 {
1877 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1878 {
1879 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1880 {
1881 WARN(("invalid command size"));
1882 return -1;
1883 }
1884 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1885 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1886 uint32_t cbRealCmd = pCmd->u8Flags;
1887 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1888 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1889 {
1890 WARN(("invalid sysmem cmd size"));
1891 return -1;
1892 }
1893
1894 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1895
1896 PGMPAGEMAPLOCK Lock;
1897 PVGASTATE pVGAState = pVdma->pVGAState;
1898 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1899 const void * pvCmd;
1900 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1901 if (!RT_SUCCESS(rc))
1902 {
1903 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1904 return -1;
1905 }
1906
1907 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1908
1909 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1910
1911 if (cbRealCmd <= cbCmdPart)
1912 {
1913 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1914 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1915 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1916 return i8Result;
1917 }
1918
1919 VBOXCMDVBVA_HDR Hdr;
1920 const void *pvCurCmdTail;
1921 uint32_t cbCurCmdTail;
1922 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1923 {
1924 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1925 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1926 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1927 }
1928 else
1929 {
1930 memcpy(&Hdr, pvCmd, cbCmdPart);
1931 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1932 phCmd += cbCmdPart;
1933 Assert(!(phCmd & PAGE_OFFSET_MASK));
1934 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1935 if (!RT_SUCCESS(rc))
1936 {
1937 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1938 return -1;
1939 }
1940
1941 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1942 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1943 pRealCmdHdr = &Hdr;
1944 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1945 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1946 }
1947
1948 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1949 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1950
1951 int8_t i8Result = 0;
1952
1953 switch (pRealCmdHdr->u8OpCode)
1954 {
1955 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1956 {
1957 const uint32_t *pPages;
1958 uint32_t cPages;
1959 uint8_t *pu8Vram;
1960 bool fIn;
1961 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1962 &pPages, &cPages,
1963 &pu8Vram, &fIn);
1964 if (i8Result < 0)
1965 {
1966 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1967 /* we need to break, not return, to ensure currently locked page is released */
1968 break;
1969 }
1970
1971 if (cbCurCmdTail & 3)
1972 {
1973 WARN(("command is not alligned properly %d", cbCurCmdTail));
1974 i8Result = -1;
1975 /* we need to break, not return, to ensure currently locked page is released */
1976 break;
1977 }
1978
1979 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1980 Assert(cCurPages < cPages);
1981
1982 do
1983 {
1984 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1985 if (!RT_SUCCESS(rc))
1986 {
1987 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1988 i8Result = -1;
1989 /* we need to break, not return, to ensure currently locked page is released */
1990 break;
1991 }
1992
1993 Assert(cPages >= cCurPages);
1994 cPages -= cCurPages;
1995
1996 if (!cPages)
1997 break;
1998
1999 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2000
2001 Assert(!(phCmd & PAGE_OFFSET_MASK));
2002
2003 phCmd += PAGE_SIZE;
2004 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
2005
2006 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2007 if (!RT_SUCCESS(rc))
2008 {
2009 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2010 /* the page is not locked, return */
2011 return -1;
2012 }
2013
2014 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2015 if (cCurPages > cPages)
2016 cCurPages = cPages;
2017 } while (1);
2018 break;
2019 }
2020 default:
2021 WARN(("command can not be splitted"));
2022 i8Result = -1;
2023 break;
2024 }
2025
2026 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2027 return i8Result;
2028 }
2029 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2030 {
2031 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2032 ++pCmd;
2033 cbCmd -= sizeof (*pCmd);
2034 uint32_t cbCurCmd = 0;
2035 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2036 {
2037 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2038 {
2039 WARN(("invalid command size"));
2040 return -1;
2041 }
2042
2043 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2044 if (cbCmd < cbCurCmd)
2045 {
2046 WARN(("invalid command size"));
2047 return -1;
2048 }
2049
2050 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2051 if (i8Result < 0)
2052 {
2053 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2054 return i8Result;
2055 }
2056 }
2057 return 0;
2058 }
2059 default:
2060 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2061 }
2062}
2063
2064static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2065{
2066 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2067 return;
2068
2069 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2070 {
2071 WARN(("invalid command size"));
2072 return;
2073 }
2074
2075 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2076
2077 /* check if the command is cancelled */
2078 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2079 {
2080 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2081 return;
2082 }
2083
2084 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2085}
2086
2087static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2088{
2089 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2090 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2091 int rc = VERR_NO_MEMORY;
2092 if (pCmd)
2093 {
2094 PVGASTATE pVGAState = pVdma->pVGAState;
2095 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2096 pCmd->cbVRam = pVGAState->vram_size;
2097 pCmd->pLed = &pVGAState->Led3D;
2098 pCmd->CrClientInfo.hClient = pVdma;
2099 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2100 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2101 if (RT_SUCCESS(rc))
2102 {
2103 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2104 if (RT_SUCCESS(rc))
2105 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2106 else if (rc != VERR_NOT_SUPPORTED)
2107 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2108 }
2109 else
2110 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2111
2112 vboxVDMACrCtlRelease(&pCmd->Hdr);
2113 }
2114
2115 if (!RT_SUCCESS(rc))
2116 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2117
2118 return rc;
2119}
2120
2121static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2122
2123/* check if this is external cmd to be passed to chromium backend */
2124static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2125{
2126 PVBOXVDMACMD pDmaCmd = NULL;
2127 uint32_t cbDmaCmd = 0;
2128 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2129 int rc = VINF_NOT_SUPPORTED;
2130
2131 cbDmaCmd = pCmdDr->cbBuf;
2132
2133 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2134 {
2135 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2136 {
2137 AssertMsgFailed(("invalid buffer data!"));
2138 return VERR_INVALID_PARAMETER;
2139 }
2140
2141 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2142 {
2143 AssertMsgFailed(("invalid command buffer data!"));
2144 return VERR_INVALID_PARAMETER;
2145 }
2146
2147 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2148 }
2149 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2150 {
2151 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2152 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2153 {
2154 AssertMsgFailed(("invalid command buffer data from offset!"));
2155 return VERR_INVALID_PARAMETER;
2156 }
2157 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2158 }
2159
2160 if (pDmaCmd)
2161 {
2162 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2163 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2164
2165 switch (pDmaCmd->enmType)
2166 {
2167 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2168 {
2169 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2170 if (cbBody < sizeof (*pCrCmd))
2171 {
2172 AssertMsgFailed(("invalid chromium command buffer size!"));
2173 return VERR_INVALID_PARAMETER;
2174 }
2175 PVGASTATE pVGAState = pVdma->pVGAState;
2176 rc = VINF_SUCCESS;
2177 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2178 {
2179 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2180 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2181 break;
2182 }
2183 else
2184 {
2185 Assert(0);
2186 }
2187
2188 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2189 AssertRC(tmpRc);
2190 break;
2191 }
2192 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2193 {
2194 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2195 if (cbBody < sizeof (*pTransfer))
2196 {
2197 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2198 return VERR_INVALID_PARAMETER;
2199 }
2200
2201 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2202 AssertRC(rc);
2203 if (RT_SUCCESS(rc))
2204 {
2205 pCmdDr->rc = VINF_SUCCESS;
2206 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2207 AssertRC(rc);
2208 rc = VINF_SUCCESS;
2209 }
2210 break;
2211 }
2212 default:
2213 break;
2214 }
2215 }
2216 return rc;
2217}
2218
2219int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2220{
2221 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2222 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2223 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2224 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2225 AssertRC(rc);
2226 pDr->rc = rc;
2227
2228 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2229 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2230 AssertRC(rc);
2231 return rc;
2232}
2233
2234int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2235{
2236 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2237 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2238 pCmdPrivate->rc = rc;
2239 if (pCmdPrivate->pfnCompletion)
2240 {
2241 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2242 }
2243 return VINF_SUCCESS;
2244}
2245
2246static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2247 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2248 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2249{
2250 RT_NOREF(pVdma);
2251 /* we do not support color conversion */
2252 Assert(pDstDesc->format == pSrcDesc->format);
2253 /* we do not support stretching */
2254 Assert(pDstRectl->height == pSrcRectl->height);
2255 Assert(pDstRectl->width == pSrcRectl->width);
2256 if (pDstDesc->format != pSrcDesc->format)
2257 return VERR_INVALID_FUNCTION;
2258 if (pDstDesc->width == pDstRectl->width
2259 && pSrcDesc->width == pSrcRectl->width
2260 && pSrcDesc->width == pDstDesc->width)
2261 {
2262 Assert(!pDstRectl->left);
2263 Assert(!pSrcRectl->left);
2264 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2265 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2266 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2267 }
2268 else
2269 {
2270 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2271 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2272 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2273 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2274 Assert(cbDstLine <= pDstDesc->pitch);
2275 uint32_t cbDstSkip = pDstDesc->pitch;
2276 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2277
2278 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2279#ifdef VBOX_STRICT
2280 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2281 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2282#endif
2283 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2284 Assert(cbSrcLine <= pSrcDesc->pitch);
2285 uint32_t cbSrcSkip = pSrcDesc->pitch;
2286 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2287
2288 Assert(cbDstLine == cbSrcLine);
2289
2290 for (uint32_t i = 0; ; ++i)
2291 {
2292 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2293 if (i == pDstRectl->height)
2294 break;
2295 pvDstStart += cbDstSkip;
2296 pvSrcStart += cbSrcSkip;
2297 }
2298 }
2299 return VINF_SUCCESS;
2300}
2301
2302static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2303{
2304 if (!pRectl1->width)
2305 *pRectl1 = *pRectl2;
2306 else
2307 {
2308 int16_t x21 = pRectl1->left + pRectl1->width;
2309 int16_t x22 = pRectl2->left + pRectl2->width;
2310 if (pRectl1->left > pRectl2->left)
2311 {
2312 pRectl1->left = pRectl2->left;
2313 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2314 }
2315 else if (x21 < x22)
2316 pRectl1->width = x22 - pRectl1->left;
2317
2318 x21 = pRectl1->top + pRectl1->height;
2319 x22 = pRectl2->top + pRectl2->height;
2320 if (pRectl1->top > pRectl2->top)
2321 {
2322 pRectl1->top = pRectl2->top;
2323 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2324 }
2325 else if (x21 < x22)
2326 pRectl1->height = x22 - pRectl1->top;
2327 }
2328}
2329
2330/*
2331 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2332 */
2333static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2334{
2335 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2336 Assert(cbBlt <= cbBuffer);
2337 if (cbBuffer < cbBlt)
2338 return VERR_INVALID_FUNCTION;
2339
2340 /* we do not support stretching for now */
2341 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2342 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2343 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2344 return VERR_INVALID_FUNCTION;
2345 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2346 return VERR_INVALID_FUNCTION;
2347 Assert(pBlt->cDstSubRects);
2348
2349 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2350 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2351
2352 if (pBlt->cDstSubRects)
2353 {
2354 VBOXVDMA_RECTL dstRectl, srcRectl;
2355 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2356 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2357 {
2358 pDstRectl = &pBlt->aDstSubRects[i];
2359 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2360 {
2361 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2362 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2363 dstRectl.width = pDstRectl->width;
2364 dstRectl.height = pDstRectl->height;
2365 pDstRectl = &dstRectl;
2366 }
2367
2368 pSrcRectl = &pBlt->aDstSubRects[i];
2369 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2370 {
2371 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2372 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2373 srcRectl.width = pSrcRectl->width;
2374 srcRectl.height = pSrcRectl->height;
2375 pSrcRectl = &srcRectl;
2376 }
2377
2378 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2379 &pBlt->dstDesc, &pBlt->srcDesc,
2380 pDstRectl,
2381 pSrcRectl);
2382 AssertRC(rc);
2383 if (!RT_SUCCESS(rc))
2384 return rc;
2385
2386 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2387 }
2388 }
2389 else
2390 {
2391 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2392 &pBlt->dstDesc, &pBlt->srcDesc,
2393 &pBlt->dstRectl,
2394 &pBlt->srcRectl);
2395 AssertRC(rc);
2396 if (!RT_SUCCESS(rc))
2397 return rc;
2398
2399 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2400 }
2401
2402 return cbBlt;
2403}
2404
2405static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2406{
2407 if (cbBuffer < sizeof (*pTransfer))
2408 return VERR_INVALID_PARAMETER;
2409
2410 PVGASTATE pVGAState = pVdma->pVGAState;
2411 uint8_t * pvRam = pVGAState->vram_ptrR3;
2412 PGMPAGEMAPLOCK SrcLock;
2413 PGMPAGEMAPLOCK DstLock;
2414 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2415 const void * pvSrc;
2416 void * pvDst;
2417 int rc = VINF_SUCCESS;
2418 uint32_t cbTransfer = pTransfer->cbTransferSize;
2419 uint32_t cbTransfered = 0;
2420 bool bSrcLocked = false;
2421 bool bDstLocked = false;
2422 do
2423 {
2424 uint32_t cbSubTransfer = cbTransfer;
2425 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2426 {
2427 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2428 }
2429 else
2430 {
2431 RTGCPHYS phPage = pTransfer->Src.phBuf;
2432 phPage += cbTransfered;
2433 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2434 AssertRC(rc);
2435 if (RT_SUCCESS(rc))
2436 {
2437 bSrcLocked = true;
2438 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2439 }
2440 else
2441 {
2442 break;
2443 }
2444 }
2445
2446 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2447 {
2448 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2449 }
2450 else
2451 {
2452 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2453 phPage += cbTransfered;
2454 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2455 AssertRC(rc);
2456 if (RT_SUCCESS(rc))
2457 {
2458 bDstLocked = true;
2459 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2460 }
2461 else
2462 {
2463 break;
2464 }
2465 }
2466
2467 if (RT_SUCCESS(rc))
2468 {
2469 memcpy(pvDst, pvSrc, cbSubTransfer);
2470 cbTransfer -= cbSubTransfer;
2471 cbTransfered += cbSubTransfer;
2472 }
2473 else
2474 {
2475 cbTransfer = 0; /* to break */
2476 }
2477
2478 if (bSrcLocked)
2479 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2480 if (bDstLocked)
2481 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2482 } while (cbTransfer);
2483
2484 if (RT_SUCCESS(rc))
2485 return sizeof (*pTransfer);
2486 return rc;
2487}
2488
2489static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2490{
2491 do
2492 {
2493 Assert(pvBuffer);
2494 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2495
2496 if (!pvBuffer)
2497 return VERR_INVALID_PARAMETER;
2498 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2499 return VERR_INVALID_PARAMETER;
2500
2501 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2502 switch (pCmd->enmType)
2503 {
2504 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2505 {
2506#ifdef VBOXWDDM_TEST_UHGSMI
2507 static int count = 0;
2508 static uint64_t start, end;
2509 if (count==0)
2510 {
2511 start = RTTimeNanoTS();
2512 }
2513 ++count;
2514 if (count==100000)
2515 {
2516 end = RTTimeNanoTS();
2517 float ems = (end-start)/1000000.f;
2518 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2519 }
2520#endif
2521 /* todo: post the buffer to chromium */
2522 return VINF_SUCCESS;
2523 }
2524 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2525 {
2526 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2527 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2528 Assert(cbBlt >= 0);
2529 Assert((uint32_t)cbBlt <= cbBuffer);
2530 if (cbBlt >= 0)
2531 {
2532 if ((uint32_t)cbBlt == cbBuffer)
2533 return VINF_SUCCESS;
2534 else
2535 {
2536 cbBuffer -= (uint32_t)cbBlt;
2537 pvBuffer -= cbBlt;
2538 }
2539 }
2540 else
2541 return cbBlt; /* error */
2542 break;
2543 }
2544 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2545 {
2546 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2547 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2548 Assert(cbTransfer >= 0);
2549 Assert((uint32_t)cbTransfer <= cbBuffer);
2550 if (cbTransfer >= 0)
2551 {
2552 if ((uint32_t)cbTransfer == cbBuffer)
2553 return VINF_SUCCESS;
2554 else
2555 {
2556 cbBuffer -= (uint32_t)cbTransfer;
2557 pvBuffer -= cbTransfer;
2558 }
2559 }
2560 else
2561 return cbTransfer; /* error */
2562 break;
2563 }
2564 case VBOXVDMACMD_TYPE_DMA_NOP:
2565 return VINF_SUCCESS;
2566 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2567 return VINF_SUCCESS;
2568 default:
2569 AssertBreakpoint();
2570 return VERR_INVALID_FUNCTION;
2571 }
2572 } while (1);
2573
2574 /* we should not be here */
2575 AssertBreakpoint();
2576 return VERR_INVALID_STATE;
2577}
2578
2579static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2580{
2581 RT_NOREF(hThreadSelf);
2582 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2583 PVGASTATE pVGAState = pVdma->pVGAState;
2584 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2585 uint8_t *pCmd;
2586 uint32_t cbCmd;
2587 int rc;
2588
2589 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2590
2591 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2592 {
2593 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2594 switch (enmType)
2595 {
2596 case VBVAEXHOST_DATA_TYPE_CMD:
2597 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2598 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2599 VBVARaiseIrq(pVGAState, 0);
2600 break;
2601 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2602 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2603 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2604 break;
2605 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2606 {
2607 bool fContinue = true;
2608 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2609 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2610 if (fContinue)
2611 break;
2612 }
2613 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2614 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2615 AssertRC(rc);
2616 break;
2617 default:
2618 WARN(("unexpected type %d\n", enmType));
2619 break;
2620 }
2621 }
2622
2623 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2624
2625 return VINF_SUCCESS;
2626}
2627
2628static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2629{
2630 RT_NOREF(cbCmd);
2631 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2632 const uint8_t * pvBuf;
2633 PGMPAGEMAPLOCK Lock;
2634 int rc;
2635 bool bReleaseLocked = false;
2636
2637 do
2638 {
2639 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2640
2641 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2642 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2643 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2644 {
2645 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2646 pvBuf = pvRam + pCmd->Location.offVramBuf;
2647 }
2648 else
2649 {
2650 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2651 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2652 Assert(offset + pCmd->cbBuf <= 0x1000);
2653 if (offset + pCmd->cbBuf > 0x1000)
2654 {
2655 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2656 rc = VERR_INVALID_PARAMETER;
2657 break;
2658 }
2659
2660 const void * pvPageBuf;
2661 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2662 AssertRC(rc);
2663 if (!RT_SUCCESS(rc))
2664 {
2665 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2666 break;
2667 }
2668
2669 pvBuf = (const uint8_t *)pvPageBuf;
2670 pvBuf += offset;
2671
2672 bReleaseLocked = true;
2673 }
2674
2675 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2676 AssertRC(rc);
2677
2678 if (bReleaseLocked)
2679 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2680 } while (0);
2681
2682 pCmd->rc = rc;
2683
2684 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2685 AssertRC(rc);
2686}
2687
2688# if 0 /** @todo vboxVDMAControlProcess is unused */
2689static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2690{
2691 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2692 pCmd->i32Result = VINF_SUCCESS;
2693 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2694 AssertRC(rc);
2695}
2696# endif
2697
2698#endif /* #ifdef VBOX_WITH_CRHGSMI */
2699
2700#ifdef VBOX_VDMA_WITH_WATCHDOG
2701static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2702{
2703 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2704 PVGASTATE pVGAState = pVdma->pVGAState;
2705 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2706}
2707
2708static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2709{
2710 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2711 if (cMillis)
2712 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2713 else
2714 TMTimerStop(pVdma->WatchDogTimer);
2715 return VINF_SUCCESS;
2716}
2717#endif
2718
2719int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2720{
2721 RT_NOREF(cPipeElements);
2722 int rc;
2723 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2724 Assert(pVdma);
2725 if (pVdma)
2726 {
2727 pVdma->pHgsmi = pVGAState->pHGSMI;
2728 pVdma->pVGAState = pVGAState;
2729
2730#ifdef VBOX_VDMA_WITH_WATCHDOG
2731 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2732 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2733 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2734 AssertRC(rc);
2735#endif
2736
2737#ifdef VBOX_WITH_CRHGSMI
2738 VBoxVDMAThreadInit(&pVdma->Thread);
2739
2740 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2741 if (RT_SUCCESS(rc))
2742 {
2743 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2744 if (RT_SUCCESS(rc))
2745 {
2746 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2747 if (RT_SUCCESS(rc))
2748 {
2749 pVGAState->pVdma = pVdma;
2750 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2751 return VINF_SUCCESS;
2752 }
2753 WARN(("RTCritSectInit failed %d\n", rc));
2754
2755 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2756 }
2757 else
2758 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2759
2760 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2761 }
2762 else
2763 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2764
2765
2766 RTMemFree(pVdma);
2767#else
2768 pVGAState->pVdma = pVdma;
2769 return VINF_SUCCESS;
2770#endif
2771 }
2772 else
2773 rc = VERR_OUT_OF_RESOURCES;
2774
2775 return rc;
2776}
2777
2778int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2779{
2780#ifdef VBOX_WITH_CRHGSMI
2781 vdmaVBVACtlDisableSync(pVdma);
2782#endif
2783 return VINF_SUCCESS;
2784}
2785
2786int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2787{
2788 if (!pVdma)
2789 return VINF_SUCCESS;
2790#ifdef VBOX_WITH_CRHGSMI
2791 vdmaVBVACtlDisableSync(pVdma);
2792 VBoxVDMAThreadCleanup(&pVdma->Thread);
2793 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2794 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2795 RTCritSectDelete(&pVdma->CalloutCritSect);
2796#endif
2797 RTMemFree(pVdma);
2798 return VINF_SUCCESS;
2799}
2800
2801void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2802{
2803 RT_NOREF(cbCmd);
2804 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2805
2806 switch (pCmd->enmCtl)
2807 {
2808 case VBOXVDMA_CTL_TYPE_ENABLE:
2809 pCmd->i32Result = VINF_SUCCESS;
2810 break;
2811 case VBOXVDMA_CTL_TYPE_DISABLE:
2812 pCmd->i32Result = VINF_SUCCESS;
2813 break;
2814 case VBOXVDMA_CTL_TYPE_FLUSH:
2815 pCmd->i32Result = VINF_SUCCESS;
2816 break;
2817#ifdef VBOX_VDMA_WITH_WATCHDOG
2818 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2819 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2820 break;
2821#endif
2822 default:
2823 WARN(("cmd not supported"));
2824 pCmd->i32Result = VERR_NOT_SUPPORTED;
2825 }
2826
2827 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2828 AssertRC(rc);
2829}
2830
2831void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2832{
2833 int rc = VERR_NOT_IMPLEMENTED;
2834
2835#ifdef VBOX_WITH_CRHGSMI
2836 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2837 * this is why we process them specially */
2838 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2839 if (rc == VINF_SUCCESS)
2840 return;
2841
2842 if (RT_FAILURE(rc))
2843 {
2844 pCmd->rc = rc;
2845 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2846 AssertRC(rc);
2847 return;
2848 }
2849
2850 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2851#else
2852 pCmd->rc = rc;
2853 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2854 AssertRC(rc);
2855#endif
2856}
2857
2858/**/
2859#ifdef VBOX_WITH_CRHGSMI
2860
2861static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2862
2863static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2864{
2865 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2866 if (RT_SUCCESS(rc))
2867 {
2868 if (rc == VINF_SUCCESS)
2869 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2870 else
2871 Assert(rc == VINF_ALREADY_INITIALIZED);
2872 }
2873 else
2874 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2875
2876 return rc;
2877}
2878
2879static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2880{
2881 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2882 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2883 AssertRC(rc);
2884 pGCtl->i32Result = rc;
2885
2886 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2887 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2888 AssertRC(rc);
2889
2890 VBoxVBVAExHCtlFree(pVbva, pCtl);
2891}
2892
2893static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2894{
2895 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2896 if (!pHCtl)
2897 {
2898 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2899 return VERR_NO_MEMORY;
2900 }
2901
2902 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2903 pHCtl->u.cmd.cbCmd = cbCmd;
2904 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2905 if (RT_FAILURE(rc))
2906 {
2907 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2908 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2909 return rc;;
2910 }
2911 return VINF_SUCCESS;
2912}
2913
2914static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2915{
2916 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2917 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2918 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2919 if (RT_SUCCESS(rc))
2920 return VINF_SUCCESS;
2921
2922 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2923 pCtl->i32Result = rc;
2924 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2925 AssertRC(rc);
2926 return VINF_SUCCESS;
2927}
2928
2929static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2930{
2931 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2932 if (pVboxCtl->u.pfnInternal)
2933 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2934 VBoxVBVAExHCtlFree(pVbva, pCtl);
2935}
2936
2937static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2938 PFNCRCTLCOMPLETION pfnCompletion,
2939 void *pvCompletion)
2940{
2941 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2942 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2943 if (RT_FAILURE(rc))
2944 {
2945 if (rc == VERR_INVALID_STATE)
2946 {
2947 pCmd->u.pfnInternal = NULL;
2948 PVGASTATE pVGAState = pVdma->pVGAState;
2949 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2950 if (!RT_SUCCESS(rc))
2951 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2952
2953 return rc;
2954 }
2955 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2956 return rc;
2957 }
2958
2959 return VINF_SUCCESS;
2960}
2961
2962static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2963{
2964 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2965 {
2966 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2967 if (!RT_SUCCESS(rc))
2968 {
2969 WARN(("pfnVBVAEnable failed %d\n", rc));
2970 for (uint32_t j = 0; j < i; j++)
2971 {
2972 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2973 }
2974
2975 return rc;
2976 }
2977 }
2978 return VINF_SUCCESS;
2979}
2980
2981static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2982{
2983 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2984 {
2985 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2986 }
2987 return VINF_SUCCESS;
2988}
2989
2990static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
2991 void *pvThreadContext, void *pvContext)
2992{
2993 RT_NOREF(pThread);
2994 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2995 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2996
2997 if (RT_SUCCESS(rc))
2998 {
2999 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3000 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3001 if (rc == VINF_SUCCESS)
3002 {
3003 /* we need to inform Main about VBVA enable/disable
3004 * main expects notifications to be done from the main thread
3005 * submit it there */
3006 PVGASTATE pVGAState = pVdma->pVGAState;
3007
3008 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3009 vdmaVBVANotifyEnable(pVGAState);
3010 else
3011 vdmaVBVANotifyDisable(pVGAState);
3012 }
3013 else if (RT_FAILURE(rc))
3014 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3015 }
3016 else
3017 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3018
3019 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3020}
3021
3022static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3023{
3024 int rc;
3025 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3026 if (pHCtl)
3027 {
3028 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3029 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3030 pHCtl->pfnComplete = pfnComplete;
3031 pHCtl->pvComplete = pvComplete;
3032
3033 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3034 if (RT_SUCCESS(rc))
3035 return VINF_SUCCESS;
3036 else
3037 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3038
3039 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3040 }
3041 else
3042 {
3043 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3044 rc = VERR_NO_MEMORY;
3045 }
3046
3047 return rc;
3048}
3049
3050static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3051{
3052 VBVAENABLE Enable = {0};
3053 Enable.u32Flags = VBVA_F_ENABLE;
3054 Enable.u32Offset = offVram;
3055
3056 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3057 Data.rc = VERR_NOT_IMPLEMENTED;
3058 int rc = RTSemEventCreate(&Data.hEvent);
3059 if (!RT_SUCCESS(rc))
3060 {
3061 WARN(("RTSemEventCreate failed %d\n", rc));
3062 return rc;
3063 }
3064
3065 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3066 if (RT_SUCCESS(rc))
3067 {
3068 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3069 if (RT_SUCCESS(rc))
3070 {
3071 rc = Data.rc;
3072 if (!RT_SUCCESS(rc))
3073 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3074 }
3075 else
3076 WARN(("RTSemEventWait failed %d\n", rc));
3077 }
3078 else
3079 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3080
3081 RTSemEventDestroy(Data.hEvent);
3082
3083 return rc;
3084}
3085
3086static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3087{
3088 int rc;
3089 VBVAEXHOSTCTL* pHCtl;
3090 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3091 {
3092 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3093 return VINF_SUCCESS;
3094 }
3095
3096 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3097 if (!pHCtl)
3098 {
3099 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3100 return VERR_NO_MEMORY;
3101 }
3102
3103 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3104 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3105 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3106 if (RT_SUCCESS(rc))
3107 return VINF_SUCCESS;
3108
3109 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3110 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3111 return rc;
3112}
3113
3114static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3115{
3116 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3117 if (fEnable)
3118 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3119 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3120}
3121
3122static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3123{
3124 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3125 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3126 if (RT_SUCCESS(rc))
3127 return VINF_SUCCESS;
3128
3129 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3130 pEnable->Hdr.i32Result = rc;
3131 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3132 AssertRC(rc);
3133 return VINF_SUCCESS;
3134}
3135
3136static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3137 int rc, void *pvContext)
3138{
3139 RT_NOREF(pVbva, pCtl);
3140 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3141 pData->rc = rc;
3142 rc = RTSemEventSignal(pData->hEvent);
3143 if (!RT_SUCCESS(rc))
3144 WARN(("RTSemEventSignal failed %d\n", rc));
3145}
3146
3147static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3148{
3149 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3150 Data.rc = VERR_NOT_IMPLEMENTED;
3151 int rc = RTSemEventCreate(&Data.hEvent);
3152 if (!RT_SUCCESS(rc))
3153 {
3154 WARN(("RTSemEventCreate failed %d\n", rc));
3155 return rc;
3156 }
3157
3158 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3159 if (RT_SUCCESS(rc))
3160 {
3161 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3162 if (RT_SUCCESS(rc))
3163 {
3164 rc = Data.rc;
3165 if (!RT_SUCCESS(rc))
3166 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3167 }
3168 else
3169 WARN(("RTSemEventWait failed %d\n", rc));
3170 }
3171 else
3172 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3173
3174 RTSemEventDestroy(Data.hEvent);
3175
3176 return rc;
3177}
3178
3179static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3180{
3181 VBVAEXHOSTCTL Ctl;
3182 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3183 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3184}
3185
3186static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3187{
3188 VBVAEXHOSTCTL Ctl;
3189 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3190 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3191}
3192
3193static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3194{
3195 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3196 switch (rc)
3197 {
3198 case VINF_SUCCESS:
3199 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3200 case VINF_ALREADY_INITIALIZED:
3201 case VINF_EOF:
3202 case VERR_INVALID_STATE:
3203 return VINF_SUCCESS;
3204 default:
3205 Assert(!RT_FAILURE(rc));
3206 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3207 }
3208}
3209
3210
3211int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3212 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3213 PFNCRCTLCOMPLETION pfnCompletion,
3214 void *pvCompletion)
3215{
3216 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3217 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3218 if (pVdma == NULL)
3219 return VERR_INVALID_STATE;
3220 pCmd->CalloutList.List.pNext = NULL;
3221 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3222}
3223
3224typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3225{
3226 struct VBOXVDMAHOST *pVdma;
3227 uint32_t fProcessing;
3228 int rc;
3229} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3230
3231static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3232{
3233 RT_NOREF(pCmd, cbCmd);
3234 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3235
3236 pData->rc = rc;
3237
3238 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3239
3240 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3241
3242 pData->fProcessing = 0;
3243
3244 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3245}
3246
3247static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3248{
3249 pEntry->pfnCb = pfnCb;
3250 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3251 if (RT_SUCCESS(rc))
3252 {
3253 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3254 RTCritSectLeave(&pVdma->CalloutCritSect);
3255
3256 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3257 }
3258 else
3259 WARN(("RTCritSectEnter failed %d\n", rc));
3260
3261 return rc;
3262}
3263
3264
3265static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3266{
3267 int rc = VINF_SUCCESS;
3268 for (;;)
3269 {
3270 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3271 if (RT_SUCCESS(rc))
3272 {
3273 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3274 if (pEntry)
3275 RTListNodeRemove(&pEntry->Node);
3276 RTCritSectLeave(&pVdma->CalloutCritSect);
3277
3278 if (!pEntry)
3279 break;
3280
3281 pEntry->pfnCb(pEntry);
3282 }
3283 else
3284 {
3285 WARN(("RTCritSectEnter failed %d\n", rc));
3286 break;
3287 }
3288 }
3289
3290 return rc;
3291}
3292
3293DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3294 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3295{
3296 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3297 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3298 if (pVdma == NULL)
3299 return VERR_INVALID_STATE;
3300 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3301 Data.pVdma = pVdma;
3302 Data.fProcessing = 1;
3303 Data.rc = VERR_INTERNAL_ERROR;
3304 RTListInit(&pCmd->CalloutList.List);
3305 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3306 if (!RT_SUCCESS(rc))
3307 {
3308 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3309 return rc;
3310 }
3311
3312 while (Data.fProcessing)
3313 {
3314 /* Poll infrequently to make sure no completed message has been missed. */
3315 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3316
3317 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3318
3319 if (Data.fProcessing)
3320 RTThreadYield();
3321 }
3322
3323 /* extra check callouts */
3324 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3325
3326 /* 'Our' message has been processed, so should reset the semaphore.
3327 * There is still possible that another message has been processed
3328 * and the semaphore has been signalled again.
3329 * Reset only if there are no other messages completed.
3330 */
3331 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3332 Assert(c >= 0);
3333 if (!c)
3334 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3335
3336 rc = Data.rc;
3337 if (!RT_SUCCESS(rc))
3338 WARN(("host call failed %d", rc));
3339
3340 return rc;
3341}
3342
3343int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3344{
3345 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3346 int rc = VINF_SUCCESS;
3347 switch (pCtl->u32Type)
3348 {
3349 case VBOXCMDVBVACTL_TYPE_3DCTL:
3350 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3351 case VBOXCMDVBVACTL_TYPE_RESIZE:
3352 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3353 case VBOXCMDVBVACTL_TYPE_ENABLE:
3354 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3355 {
3356 WARN(("incorrect enable size\n"));
3357 rc = VERR_INVALID_PARAMETER;
3358 break;
3359 }
3360 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3361 default:
3362 WARN(("unsupported type\n"));
3363 rc = VERR_INVALID_PARAMETER;
3364 break;
3365 }
3366
3367 pCtl->i32Result = rc;
3368 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3369 AssertRC(rc);
3370 return VINF_SUCCESS;
3371}
3372
3373int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3374{
3375 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3376 {
3377 WARN(("vdma VBVA is disabled\n"));
3378 return VERR_INVALID_STATE;
3379 }
3380
3381 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3382}
3383
3384int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3385{
3386 WARN(("flush\n"));
3387 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3388 {
3389 WARN(("vdma VBVA is disabled\n"));
3390 return VERR_INVALID_STATE;
3391 }
3392 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3393}
3394
3395void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3396{
3397 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3398 return;
3399 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3400}
3401
3402bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3403{
3404 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3405}
3406#endif
3407
3408int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3409{
3410#ifdef VBOX_WITH_CRHGSMI
3411 int rc = vdmaVBVAPause(pVdma);
3412 if (RT_SUCCESS(rc))
3413 return VINF_SUCCESS;
3414
3415 if (rc != VERR_INVALID_STATE)
3416 {
3417 WARN(("vdmaVBVAPause failed %d\n", rc));
3418 return rc;
3419 }
3420
3421#ifdef DEBUG_misha
3422 WARN(("debug prep"));
3423#endif
3424
3425 PVGASTATE pVGAState = pVdma->pVGAState;
3426 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3427 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3428 Assert(pCmd);
3429 if (pCmd)
3430 {
3431 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3432 AssertRC(rc);
3433 if (RT_SUCCESS(rc))
3434 {
3435 rc = vboxVDMACrCtlGetRc(pCmd);
3436 }
3437 vboxVDMACrCtlRelease(pCmd);
3438 return rc;
3439 }
3440 return VERR_NO_MEMORY;
3441#else
3442 return VINF_SUCCESS;
3443#endif
3444}
3445
3446int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3447{
3448#ifdef VBOX_WITH_CRHGSMI
3449 int rc = vdmaVBVAResume(pVdma);
3450 if (RT_SUCCESS(rc))
3451 return VINF_SUCCESS;
3452
3453 if (rc != VERR_INVALID_STATE)
3454 {
3455 WARN(("vdmaVBVAResume failed %d\n", rc));
3456 return rc;
3457 }
3458
3459#ifdef DEBUG_misha
3460 WARN(("debug done"));
3461#endif
3462
3463 PVGASTATE pVGAState = pVdma->pVGAState;
3464 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3465 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3466 Assert(pCmd);
3467 if (pCmd)
3468 {
3469 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3470 AssertRC(rc);
3471 if (RT_SUCCESS(rc))
3472 {
3473 rc = vboxVDMACrCtlGetRc(pCmd);
3474 }
3475 vboxVDMACrCtlRelease(pCmd);
3476 return rc;
3477 }
3478 return VERR_NO_MEMORY;
3479#else
3480 return VINF_SUCCESS;
3481#endif
3482}
3483
3484int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3485{
3486 int rc;
3487
3488#ifdef VBOX_WITH_CRHGSMI
3489 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3490#endif
3491 {
3492 rc = SSMR3PutU32(pSSM, 0xffffffff);
3493 AssertRCReturn(rc, rc);
3494 return VINF_SUCCESS;
3495 }
3496
3497#ifdef VBOX_WITH_CRHGSMI
3498 PVGASTATE pVGAState = pVdma->pVGAState;
3499 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3500
3501 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3502 AssertRCReturn(rc, rc);
3503
3504 VBVAEXHOSTCTL HCtl;
3505 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3506 HCtl.u.state.pSSM = pSSM;
3507 HCtl.u.state.u32Version = 0;
3508 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3509#endif
3510}
3511
3512int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3513{
3514 uint32_t u32;
3515 int rc = SSMR3GetU32(pSSM, &u32);
3516 AssertLogRelRCReturn(rc, rc);
3517
3518 if (u32 != 0xffffffff)
3519 {
3520#ifdef VBOX_WITH_CRHGSMI
3521 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3522 AssertLogRelRCReturn(rc, rc);
3523
3524 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3525
3526 VBVAEXHOSTCTL HCtl;
3527 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3528 HCtl.u.state.pSSM = pSSM;
3529 HCtl.u.state.u32Version = u32Version;
3530 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3531 AssertLogRelRCReturn(rc, rc);
3532
3533 rc = vdmaVBVAResume(pVdma);
3534 AssertLogRelRCReturn(rc, rc);
3535
3536 return VINF_SUCCESS;
3537#else
3538 WARN(("Unsupported VBVACtl info!\n"));
3539 return VERR_VERSION_MISMATCH;
3540#endif
3541 }
3542
3543 return VINF_SUCCESS;
3544}
3545
3546int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3547{
3548#ifdef VBOX_WITH_CRHGSMI
3549 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3550 return VINF_SUCCESS;
3551
3552/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3553 * the purpose of this code is. */
3554 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3555 if (!pHCtl)
3556 {
3557 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3558 return VERR_NO_MEMORY;
3559 }
3560
3561 /* sanity */
3562 pHCtl->u.cmd.pu8Cmd = NULL;
3563 pHCtl->u.cmd.cbCmd = 0;
3564
3565 /* NULL completion will just free the ctl up */
3566 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3567 if (RT_FAILURE(rc))
3568 {
3569 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3570 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3571 return rc;
3572 }
3573#endif
3574 return VINF_SUCCESS;
3575}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette