VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 62952

Last change on this file since 62952 was 62952, checked in by vboxsync, 8 years ago

warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.6 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 62952 2016-08-04 07:25:44Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17#include <VBox/VMMDev.h>
18#include <VBox/vmm/pdmdev.h>
19#include <VBox/vmm/pgm.h>
20#include <VBox/VBoxVideo.h>
21#include <iprt/semaphore.h>
22#include <iprt/thread.h>
23#include <iprt/mem.h>
24#include <iprt/asm.h>
25#include <iprt/list.h>
26#include <iprt/param.h>
27
28#include "DevVGA.h"
29#include "HGSMI/SHGSMIHost.h"
30
31#include <VBox/VBoxVideo3D.h>
32#include <VBox/VBoxVideoHost3D.h>
33
34#ifdef DEBUG_misha
35# define VBOXVDBG_MEMCACHE_DISABLE
36#endif
37
38#ifndef VBOXVDBG_MEMCACHE_DISABLE
39# include <iprt/memcache.h>
40#endif
41
42#ifdef DEBUG_misha
43#define WARN_BP() do { AssertFailed(); } while (0)
44#else
45#define WARN_BP() do { } while (0)
46#endif
47#define WARN(_msg) do { \
48 LogRel(_msg); \
49 WARN_BP(); \
50 } while (0)
51
52#define VBOXVDMATHREAD_STATE_TERMINATED 0
53#define VBOXVDMATHREAD_STATE_CREATING 1
54#define VBOXVDMATHREAD_STATE_CREATED 3
55#define VBOXVDMATHREAD_STATE_TERMINATING 4
56
57struct VBOXVDMATHREAD;
58
59typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
60
61static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
62
63
64typedef struct VBOXVDMATHREAD
65{
66 RTTHREAD hWorkerThread;
67 RTSEMEVENT hEvent;
68 volatile uint32_t u32State;
69 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
70 void *pvChanged;
71} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
72
73
74/* state transformations:
75 *
76 * submitter | processor
77 *
78 * LISTENING ---> PROCESSING
79 *
80 * */
81#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
82#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
83
84#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
85#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
86#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
87
88typedef struct VBVAEXHOSTCONTEXT
89{
90 VBVABUFFER *pVBVA;
91 volatile int32_t i32State;
92 volatile int32_t i32EnableState;
93 volatile uint32_t u32cCtls;
94 /* critical section for accessing ctl lists */
95 RTCRITSECT CltCritSect;
96 RTLISTANCHOR GuestCtlList;
97 RTLISTANCHOR HostCtlList;
98#ifndef VBOXVDBG_MEMCACHE_DISABLE
99 RTMEMCACHE CtlCache;
100#endif
101} VBVAEXHOSTCONTEXT;
102
103typedef enum
104{
105 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
106 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
107 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
108 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
109 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
110 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
111 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
112 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
113 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
114 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
115 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
116 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
117 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
118} VBVAEXHOSTCTL_TYPE;
119
120struct VBVAEXHOSTCTL;
121
122typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
123
124typedef struct VBVAEXHOSTCTL
125{
126 RTLISTNODE Node;
127 VBVAEXHOSTCTL_TYPE enmType;
128 union
129 {
130 struct
131 {
132 uint8_t * pu8Cmd;
133 uint32_t cbCmd;
134 } cmd;
135
136 struct
137 {
138 PSSMHANDLE pSSM;
139 uint32_t u32Version;
140 } state;
141 } u;
142 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
143 void *pvComplete;
144} VBVAEXHOSTCTL;
145
146/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
147 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
148 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
149 * see mor edetailed comments in headers for function definitions */
150typedef enum
151{
152 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
153 VBVAEXHOST_DATA_TYPE_CMD,
154 VBVAEXHOST_DATA_TYPE_HOSTCTL,
155 VBVAEXHOST_DATA_TYPE_GUESTCTL
156} VBVAEXHOST_DATA_TYPE;
157
158static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
159
160
161static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
162
163static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
164static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
165
166/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
167 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
168static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
169
170static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
171static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
172static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
173static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
174static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
175static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
176
177static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
178{
179#ifndef VBOXVDBG_MEMCACHE_DISABLE
180 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
181#else
182 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
183#endif
184}
185
186static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
187{
188#ifndef VBOXVDBG_MEMCACHE_DISABLE
189 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
190#else
191 RTMemFree(pCtl);
192#endif
193}
194
195static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
196{
197 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
198 if (!pCtl)
199 {
200 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
201 return NULL;
202 }
203
204 pCtl->enmType = enmType;
205 return pCtl;
206}
207
208static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
209{
210 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
211
212 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
213 return VINF_SUCCESS;
214 return VERR_SEM_BUSY;
215}
216
217static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
218{
219 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
220
221 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
222 return NULL;
223
224 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
225 if (RT_SUCCESS(rc))
226 {
227 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
228 if (pCtl)
229 *pfHostCtl = true;
230 else if (!fHostOnlyMode)
231 {
232 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
233 {
234 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
235 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
236 * and there are no HostCtl commands*/
237 Assert(pCtl);
238 *pfHostCtl = false;
239 }
240 }
241
242 if (pCtl)
243 {
244 RTListNodeRemove(&pCtl->Node);
245 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
246 }
247
248 RTCritSectLeave(&pCmdVbva->CltCritSect);
249
250 return pCtl;
251 }
252 else
253 WARN(("RTCritSectEnter failed %d\n", rc));
254
255 return NULL;
256}
257
258static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
259{
260 bool fHostCtl = false;
261 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
262 Assert(!pCtl || fHostCtl);
263 return pCtl;
264}
265
266static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
267{
268 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
269 {
270 WARN(("Invalid state\n"));
271 return VERR_INVALID_STATE;
272 }
273
274 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
275 return VINF_SUCCESS;
276}
277
278static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
279{
280 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
281 {
282 WARN(("Invalid state\n"));
283 return VERR_INVALID_STATE;
284 }
285
286 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
287 return VINF_SUCCESS;
288}
289
290
291static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
292{
293 switch (pCtl->enmType)
294 {
295 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
296 {
297 VBoxVBVAExHPPause(pCmdVbva);
298 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
299 return true;
300 }
301 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
302 {
303 VBoxVBVAExHPResume(pCmdVbva);
304 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
305 return true;
306 }
307 default:
308 return false;
309 }
310}
311
312static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
313{
314 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
315
316 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
317}
318
319static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
320{
321 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
322 if (pCmdVbva->pVBVA)
323 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
324}
325
326static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
327{
328 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
329 if (pCmdVbva->pVBVA)
330 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
331}
332
333static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
334{
335 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
336 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
337
338 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
339
340 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
341 uint32_t indexRecordFree = pVBVA->indexRecordFree;
342
343 Log(("first = %d, free = %d\n",
344 indexRecordFirst, indexRecordFree));
345
346 if (indexRecordFirst == indexRecordFree)
347 {
348 /* No records to process. Return without assigning output variables. */
349 return VINF_EOF;
350 }
351
352 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
353
354 /* A new record need to be processed. */
355 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
356 {
357 /* the record is being recorded, try again */
358 return VINF_TRY_AGAIN;
359 }
360
361 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
362
363 if (!cbRecord)
364 {
365 /* the record is being recorded, try again */
366 return VINF_TRY_AGAIN;
367 }
368
369 /* we should not get partial commands here actually */
370 Assert(cbRecord);
371
372 /* The size of largest contiguous chunk in the ring biffer. */
373 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
374
375 /* The pointer to data in the ring buffer. */
376 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
377
378 /* Fetch or point the data. */
379 if (u32BytesTillBoundary >= cbRecord)
380 {
381 /* The command does not cross buffer boundary. Return address in the buffer. */
382 *ppCmd = pSrc;
383 *pcbCmd = cbRecord;
384 return VINF_SUCCESS;
385 }
386
387 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
388 return VERR_INVALID_STATE;
389}
390
391static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
392{
393 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
394 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
395
396 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
397}
398
399static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
400{
401 if (pCtl->pfnComplete)
402 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
403 else
404 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
405}
406
407static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
408{
409 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
410 VBVAEXHOSTCTL*pCtl;
411 bool fHostClt;
412
413 for (;;)
414 {
415 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
416 if (pCtl)
417 {
418 if (fHostClt)
419 {
420 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
421 {
422 *ppCmd = (uint8_t*)pCtl;
423 *pcbCmd = sizeof (*pCtl);
424 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
425 }
426 continue;
427 }
428 else
429 {
430 *ppCmd = (uint8_t*)pCtl;
431 *pcbCmd = sizeof (*pCtl);
432 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
433 }
434 }
435
436 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
437 return VBVAEXHOST_DATA_TYPE_NO_DATA;
438
439 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
440 switch (rc)
441 {
442 case VINF_SUCCESS:
443 return VBVAEXHOST_DATA_TYPE_CMD;
444 case VINF_EOF:
445 return VBVAEXHOST_DATA_TYPE_NO_DATA;
446 case VINF_TRY_AGAIN:
447 RTThreadSleep(1);
448 continue;
449 default:
450 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
451 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
452 return VBVAEXHOST_DATA_TYPE_NO_DATA;
453 }
454 }
455 /* not reached */
456}
457
458static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
459{
460 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
461 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
462 {
463 vboxVBVAExHPHgEventClear(pCmdVbva);
464 vboxVBVAExHPProcessorRelease(pCmdVbva);
465 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
466 * 1. we check the queue -> and it is empty
467 * 2. submitter adds command to the queue
468 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
469 * 4. we clear the "processing" state
470 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
471 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
472 **/
473 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
474 if (RT_SUCCESS(rc))
475 {
476 /* we are the processor now */
477 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
478 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
479 {
480 vboxVBVAExHPProcessorRelease(pCmdVbva);
481 return VBVAEXHOST_DATA_TYPE_NO_DATA;
482 }
483
484 vboxVBVAExHPHgEventSet(pCmdVbva);
485 }
486 }
487
488 return enmType;
489}
490
491DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
492{
493 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
494
495 if (pVBVA)
496 {
497 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
498 uint32_t indexRecordFree = pVBVA->indexRecordFree;
499
500 if (indexRecordFirst != indexRecordFree)
501 return true;
502 }
503
504 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
505}
506
507/* Checks whether the new commands are ready for processing
508 * @returns
509 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
510 * VINF_EOF - no commands in a queue
511 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
512 * VERR_INVALID_STATE - the VBVA is paused or pausing */
513static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
514{
515 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
516 if (RT_SUCCESS(rc))
517 {
518 /* we are the processor now */
519 if (vboxVBVAExHSHasCommands(pCmdVbva))
520 {
521 vboxVBVAExHPHgEventSet(pCmdVbva);
522 return VINF_SUCCESS;
523 }
524
525 vboxVBVAExHPProcessorRelease(pCmdVbva);
526 return VINF_EOF;
527 }
528 if (rc == VERR_SEM_BUSY)
529 return VINF_ALREADY_INITIALIZED;
530 return VERR_INVALID_STATE;
531}
532
533static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
534{
535 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
536 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
537 if (RT_SUCCESS(rc))
538 {
539#ifndef VBOXVDBG_MEMCACHE_DISABLE
540 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
541 0, /* size_t cbAlignment */
542 UINT32_MAX, /* uint32_t cMaxObjects */
543 NULL, /* PFNMEMCACHECTOR pfnCtor*/
544 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
545 NULL, /* void *pvUser*/
546 0 /* uint32_t fFlags*/
547 );
548 if (RT_SUCCESS(rc))
549#endif
550 {
551 RTListInit(&pCmdVbva->GuestCtlList);
552 RTListInit(&pCmdVbva->HostCtlList);
553 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
554 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
555 return VINF_SUCCESS;
556 }
557#ifndef VBOXVDBG_MEMCACHE_DISABLE
558 else
559 WARN(("RTMemCacheCreate failed %d\n", rc));
560#endif
561 }
562 else
563 WARN(("RTCritSectInit failed %d\n", rc));
564
565 return rc;
566}
567
568DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
569{
570 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
571}
572
573DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
574{
575 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
576}
577
578static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
579{
580 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
581 {
582 WARN(("VBVAEx is enabled already\n"));
583 return VERR_INVALID_STATE;
584 }
585
586 pCmdVbva->pVBVA = pVBVA;
587 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
588 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
589 return VINF_SUCCESS;
590}
591
592static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
593{
594 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
595 return VINF_SUCCESS;
596
597 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
598 return VINF_SUCCESS;
599}
600
601static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
602{
603 /* ensure the processor is stopped */
604 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
605
606 /* ensure no one tries to submit the command */
607 if (pCmdVbva->pVBVA)
608 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
609
610 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
611 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
612
613 RTCritSectDelete(&pCmdVbva->CltCritSect);
614
615#ifndef VBOXVDBG_MEMCACHE_DISABLE
616 RTMemCacheDestroy(pCmdVbva->CtlCache);
617#endif
618
619 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
620}
621
622static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
623{
624 RT_NOREF(pCmdVbva);
625 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
626 AssertRCReturn(rc, rc);
627 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
628 AssertRCReturn(rc, rc);
629 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
630 AssertRCReturn(rc, rc);
631
632 return VINF_SUCCESS;
633}
634
635static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
636{
637 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
638 {
639 WARN(("vbva not paused\n"));
640 return VERR_INVALID_STATE;
641 }
642
643 VBVAEXHOSTCTL* pCtl;
644 int rc;
645 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
646 {
647 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
648 AssertRCReturn(rc, rc);
649 }
650
651 rc = SSMR3PutU32(pSSM, 0);
652 AssertRCReturn(rc, rc);
653
654 return VINF_SUCCESS;
655}
656/* Saves state
657 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
658 */
659static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
660{
661 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
662 if (RT_FAILURE(rc))
663 {
664 WARN(("RTCritSectEnter failed %d\n", rc));
665 return rc;
666 }
667
668 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
669 if (RT_FAILURE(rc))
670 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
671
672 RTCritSectLeave(&pCmdVbva->CltCritSect);
673
674 return rc;
675}
676
677static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
678{
679 RT_NOREF(u32Version);
680 uint32_t u32;
681 int rc = SSMR3GetU32(pSSM, &u32);
682 AssertLogRelRCReturn(rc, rc);
683
684 if (!u32)
685 return VINF_EOF;
686
687 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
688 if (!pHCtl)
689 {
690 WARN(("VBoxVBVAExHCtlCreate failed\n"));
691 return VERR_NO_MEMORY;
692 }
693
694 rc = SSMR3GetU32(pSSM, &u32);
695 AssertLogRelRCReturn(rc, rc);
696 pHCtl->u.cmd.cbCmd = u32;
697
698 rc = SSMR3GetU32(pSSM, &u32);
699 AssertLogRelRCReturn(rc, rc);
700 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
701
702 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
703 ++pCmdVbva->u32cCtls;
704
705 return VINF_SUCCESS;
706}
707
708
709static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
710{
711 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
712 {
713 WARN(("vbva not stopped\n"));
714 return VERR_INVALID_STATE;
715 }
716
717 int rc;
718
719 do {
720 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
721 AssertLogRelRCReturn(rc, rc);
722 } while (VINF_EOF != rc);
723
724 return VINF_SUCCESS;
725}
726
727/* Loads state
728 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
729 */
730static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
731{
732 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
733 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
734 if (RT_FAILURE(rc))
735 {
736 WARN(("RTCritSectEnter failed %d\n", rc));
737 return rc;
738 }
739
740 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
741 if (RT_FAILURE(rc))
742 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
743
744 RTCritSectLeave(&pCmdVbva->CltCritSect);
745
746 return rc;
747}
748
749typedef enum
750{
751 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
752 VBVAEXHOSTCTL_SOURCE_HOST
753} VBVAEXHOSTCTL_SOURCE;
754
755
756static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
757{
758 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
759 {
760 Log(("cmd vbva not enabled\n"));
761 return VERR_INVALID_STATE;
762 }
763
764 pCtl->pfnComplete = pfnComplete;
765 pCtl->pvComplete = pvComplete;
766
767 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
768 if (RT_SUCCESS(rc))
769 {
770 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
771 {
772 Log(("cmd vbva not enabled\n"));
773 RTCritSectLeave(&pCmdVbva->CltCritSect);
774 return VERR_INVALID_STATE;
775 }
776
777 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
778 {
779 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
780 }
781 else
782 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
783
784 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
785
786 RTCritSectLeave(&pCmdVbva->CltCritSect);
787
788 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
789 }
790 else
791 WARN(("RTCritSectEnter failed %d\n", rc));
792
793 return rc;
794}
795
796#ifdef VBOX_WITH_CRHGSMI
797typedef struct VBOXVDMA_SOURCE
798{
799 VBVAINFOSCREEN Screen;
800 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
801} VBOXVDMA_SOURCE;
802#endif
803
804typedef struct VBOXVDMAHOST
805{
806 PHGSMIINSTANCE pHgsmi;
807 PVGASTATE pVGAState;
808#ifdef VBOX_WITH_CRHGSMI
809 VBVAEXHOSTCONTEXT CmdVbva;
810 VBOXVDMATHREAD Thread;
811 VBOXCRCMD_SVRINFO CrSrvInfo;
812 VBVAEXHOSTCTL* pCurRemainingHostCtl;
813 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
814 int32_t volatile i32cHostCrCtlCompleted;
815 RTCRITSECT CalloutCritSect;
816// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
817#endif
818#ifdef VBOX_VDMA_WITH_WATCHDOG
819 PTMTIMERR3 WatchDogTimer;
820#endif
821} VBOXVDMAHOST, *PVBOXVDMAHOST;
822
823#ifdef VBOX_WITH_CRHGSMI
824
825void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
826{
827 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
828 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
829 void *pvChanged = pThread->pvChanged;
830
831 pThread->pfnChanged = NULL;
832 pThread->pvChanged = NULL;
833
834 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
835
836 if (pfnChanged)
837 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
838}
839
840void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
841{
842 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
843 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
844 void *pvChanged = pThread->pvChanged;
845
846 pThread->pfnChanged = NULL;
847 pThread->pvChanged = NULL;
848
849 if (pfnChanged)
850 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
851}
852
853DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
854{
855 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
856}
857
858void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
859{
860 memset(pThread, 0, sizeof (*pThread));
861 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
862}
863
864int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
865{
866 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
867 switch (u32State)
868 {
869 case VBOXVDMATHREAD_STATE_TERMINATED:
870 return VINF_SUCCESS;
871 case VBOXVDMATHREAD_STATE_TERMINATING:
872 {
873 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
874 if (!RT_SUCCESS(rc))
875 {
876 WARN(("RTThreadWait failed %d\n", rc));
877 return rc;
878 }
879
880 RTSemEventDestroy(pThread->hEvent);
881
882 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
883 return VINF_SUCCESS;
884 }
885 default:
886 WARN(("invalid state"));
887 return VERR_INVALID_STATE;
888 }
889}
890
891int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
892{
893 int rc = VBoxVDMAThreadCleanup(pThread);
894 if (RT_FAILURE(rc))
895 {
896 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
897 return rc;
898 }
899
900 rc = RTSemEventCreate(&pThread->hEvent);
901 if (RT_SUCCESS(rc))
902 {
903 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
904 pThread->pfnChanged = pfnCreated;
905 pThread->pvChanged = pvCreated;
906 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
907 if (RT_SUCCESS(rc))
908 return VINF_SUCCESS;
909 else
910 WARN(("RTThreadCreate failed %d\n", rc));
911
912 RTSemEventDestroy(pThread->hEvent);
913 }
914 else
915 WARN(("RTSemEventCreate failed %d\n", rc));
916
917 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
918
919 return rc;
920}
921
922DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
923{
924 int rc = RTSemEventSignal(pThread->hEvent);
925 AssertRC(rc);
926 return rc;
927}
928
929DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
930{
931 int rc = RTSemEventWait(pThread->hEvent, cMillies);
932 AssertRC(rc);
933 return rc;
934}
935
936int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
937{
938 int rc;
939 do
940 {
941 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
942 switch (u32State)
943 {
944 case VBOXVDMATHREAD_STATE_CREATED:
945 pThread->pfnChanged = pfnTerminated;
946 pThread->pvChanged = pvTerminated;
947 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
948 if (fNotify)
949 {
950 rc = VBoxVDMAThreadEventNotify(pThread);
951 AssertRC(rc);
952 }
953 return VINF_SUCCESS;
954 case VBOXVDMATHREAD_STATE_TERMINATING:
955 case VBOXVDMATHREAD_STATE_TERMINATED:
956 {
957 WARN(("thread is marked to termination or terminated\nn"));
958 return VERR_INVALID_STATE;
959 }
960 case VBOXVDMATHREAD_STATE_CREATING:
961 {
962 /* wait till the thread creation is completed */
963 WARN(("concurrent thread create/destron\n"));
964 RTThreadYield();
965 continue;
966 }
967 default:
968 WARN(("invalid state"));
969 return VERR_INVALID_STATE;
970 }
971 } while (1);
972
973 WARN(("should never be here\n"));
974 return VERR_INTERNAL_ERROR;
975}
976
977static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
978
979typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
980typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
981
982typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
983{
984 uint32_t cRefs;
985 int32_t rc;
986 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
987 void *pvCompletion;
988 VBOXVDMACMD_CHROMIUM_CTL Cmd;
989} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
990
991#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
992
993static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
994{
995 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
996 Assert(pHdr);
997 if (pHdr)
998 {
999 pHdr->cRefs = 1;
1000 pHdr->rc = VERR_NOT_IMPLEMENTED;
1001 pHdr->Cmd.enmType = enmCmd;
1002 pHdr->Cmd.cbCmd = cbCmd;
1003 return &pHdr->Cmd;
1004 }
1005
1006 return NULL;
1007}
1008
1009DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1010{
1011 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1012 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1013 if (!cRefs)
1014 RTMemFree(pHdr);
1015}
1016
1017DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1018{
1019 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1020 ASMAtomicIncU32(&pHdr->cRefs);
1021}
1022
1023DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1024{
1025 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1026 return pHdr->rc;
1027}
1028
1029static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1030{
1031 RT_NOREF(pVGAState, pCmd);
1032 RTSemEventSignal((RTSEMEVENT)pvContext);
1033}
1034
1035static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1036{
1037 RT_NOREF(pVGAState, pvContext);
1038 vboxVDMACrCtlRelease(pCmd);
1039}
1040
1041
1042static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1043{
1044 if ( pVGAState->pDrv
1045 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1046 {
1047 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1048 pHdr->pfnCompletion = pfnCompletion;
1049 pHdr->pvCompletion = pvCompletion;
1050 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1051 return VINF_SUCCESS;
1052 }
1053#ifdef DEBUG_misha
1054 Assert(0);
1055#endif
1056 return VERR_NOT_SUPPORTED;
1057}
1058
1059static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1060{
1061 RTSEMEVENT hComplEvent;
1062 int rc = RTSemEventCreate(&hComplEvent);
1063 AssertRC(rc);
1064 if (RT_SUCCESS(rc))
1065 {
1066 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1067#ifdef DEBUG_misha
1068 AssertRC(rc);
1069#endif
1070 if (RT_SUCCESS(rc))
1071 {
1072 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1073 AssertRC(rc);
1074 if (RT_SUCCESS(rc))
1075 {
1076 RTSemEventDestroy(hComplEvent);
1077 }
1078 }
1079 else
1080 {
1081 /* the command is completed */
1082 RTSemEventDestroy(hComplEvent);
1083 }
1084 }
1085 return rc;
1086}
1087
1088typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1089{
1090 int rc;
1091 RTSEMEVENT hEvent;
1092} VDMA_VBVA_CTL_CYNC_COMPLETION;
1093
1094static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1095{
1096 RT_NOREF(pCmd, cbCmd);
1097 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1098 pData->rc = rc;
1099 rc = RTSemEventSignal(pData->hEvent);
1100 if (!RT_SUCCESS(rc))
1101 WARN(("RTSemEventSignal failed %d\n", rc));
1102}
1103
1104static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1105{
1106 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1107 Data.rc = VERR_NOT_IMPLEMENTED;
1108 int rc = RTSemEventCreate(&Data.hEvent);
1109 if (!RT_SUCCESS(rc))
1110 {
1111 WARN(("RTSemEventCreate failed %d\n", rc));
1112 return rc;
1113 }
1114
1115 pCtl->CalloutList.List.pNext = NULL;
1116
1117 PVGASTATE pVGAState = pVdma->pVGAState;
1118 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1119 if (RT_SUCCESS(rc))
1120 {
1121 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1122 if (RT_SUCCESS(rc))
1123 {
1124 rc = Data.rc;
1125 if (!RT_SUCCESS(rc))
1126 {
1127 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1128 }
1129
1130 }
1131 else
1132 WARN(("RTSemEventWait failed %d\n", rc));
1133 }
1134 else
1135 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1136
1137
1138 RTSemEventDestroy(Data.hEvent);
1139
1140 return rc;
1141}
1142
1143static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1144{
1145 VBVAEXHOSTCTL HCtl;
1146 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1147 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1148 if (RT_FAILURE(rc))
1149 {
1150 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1151 return rc;
1152 }
1153
1154 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1155
1156 return VINF_SUCCESS;
1157}
1158
1159static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1160{
1161 struct VBOXVDMAHOST *pVdma = hClient;
1162 if (!pVdma->pCurRemainingHostCtl)
1163 {
1164 /* disable VBVA, all subsequent host commands will go HGCM way */
1165 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1166 }
1167 else
1168 {
1169 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1170 }
1171
1172 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1173 if (pVdma->pCurRemainingHostCtl)
1174 {
1175 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1176 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1177 }
1178
1179 *pcbCtl = 0;
1180 return NULL;
1181}
1182
1183static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1184{
1185#ifdef VBOX_STRICT
1186 struct VBOXVDMAHOST *pVdma = hClient;
1187 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1188 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1189#else
1190 RT_NOREF(hClient);
1191#endif
1192}
1193
1194static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1195{
1196 struct VBOXVDMAHOST *pVdma = hClient;
1197 VBVAEXHOSTCTL HCtl;
1198 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1199 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1200
1201 pHgcmEnableData->hRHCmd = pVdma;
1202 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1203
1204 if (RT_FAILURE(rc))
1205 {
1206 if (rc == VERR_INVALID_STATE)
1207 rc = VINF_SUCCESS;
1208 else
1209 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1210 }
1211
1212 return rc;
1213}
1214
1215static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1216{
1217 VBOXCRCMDCTL_ENABLE Enable;
1218 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1219 Enable.Data.hRHCmd = pVdma;
1220 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1221
1222 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1223 Assert(!pVdma->pCurRemainingHostCtl);
1224 if (RT_SUCCESS(rc))
1225 {
1226 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1227 return VINF_SUCCESS;
1228 }
1229
1230 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1231 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1232
1233 return rc;
1234}
1235
1236static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1237{
1238 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1239 {
1240 WARN(("vdma VBVA is already enabled\n"));
1241 return VERR_INVALID_STATE;
1242 }
1243
1244 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1245 if (!pVBVA)
1246 {
1247 WARN(("invalid offset %d\n", u32Offset));
1248 return VERR_INVALID_PARAMETER;
1249 }
1250
1251 if (!pVdma->CrSrvInfo.pfnEnable)
1252 {
1253#ifdef DEBUG_misha
1254 WARN(("pfnEnable is NULL\n"));
1255 return VERR_NOT_SUPPORTED;
1256#endif
1257 }
1258
1259 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1260 if (RT_SUCCESS(rc))
1261 {
1262 VBOXCRCMDCTL_DISABLE Disable;
1263 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1264 Disable.Data.hNotifyTerm = pVdma;
1265 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1266 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1267 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1268 if (RT_SUCCESS(rc))
1269 {
1270 PVGASTATE pVGAState = pVdma->pVGAState;
1271 VBOXCRCMD_SVRENABLE_INFO Info;
1272 Info.hCltScr = pVGAState->pDrv;
1273 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1274 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1275 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1276 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1277 if (RT_SUCCESS(rc))
1278 return VINF_SUCCESS;
1279 else
1280 WARN(("pfnEnable failed %d\n", rc));
1281
1282 vboxVDMACrHgcmHandleEnable(pVdma);
1283 }
1284 else
1285 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1286
1287 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1288 }
1289 else
1290 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1291
1292 return rc;
1293}
1294
1295static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1296{
1297 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1298 {
1299 Log(("vdma VBVA is already disabled\n"));
1300 return VINF_SUCCESS;
1301 }
1302
1303 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1304 if (RT_SUCCESS(rc))
1305 {
1306 if (fDoHgcmEnable)
1307 {
1308 PVGASTATE pVGAState = pVdma->pVGAState;
1309
1310 /* disable is a bit tricky
1311 * we need to ensure the host ctl commands do not come out of order
1312 * and do not come over HGCM channel until after it is enabled */
1313 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1314 if (RT_SUCCESS(rc))
1315 {
1316 vdmaVBVANotifyDisable(pVGAState);
1317 return VINF_SUCCESS;
1318 }
1319
1320 VBOXCRCMD_SVRENABLE_INFO Info;
1321 Info.hCltScr = pVGAState->pDrv;
1322 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1323 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1324 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1325 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1326 }
1327 }
1328 else
1329 WARN(("pfnDisable failed %d\n", rc));
1330
1331 return rc;
1332}
1333
1334static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1335{
1336 *pfContinue = true;
1337
1338 switch (pCmd->enmType)
1339 {
1340 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1341 {
1342 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1343 {
1344 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1345 return VERR_INVALID_STATE;
1346 }
1347 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1348 }
1349 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1350 {
1351 int rc = vdmaVBVADisableProcess(pVdma, true);
1352 if (RT_FAILURE(rc))
1353 {
1354 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1355 return rc;
1356 }
1357
1358 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1359 }
1360 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1361 {
1362 int rc = vdmaVBVADisableProcess(pVdma, false);
1363 if (RT_FAILURE(rc))
1364 {
1365 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1366 return rc;
1367 }
1368
1369 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1370 if (RT_FAILURE(rc))
1371 {
1372 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1373 return rc;
1374 }
1375
1376 *pfContinue = false;
1377 return VINF_SUCCESS;
1378 }
1379 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1380 {
1381 PVGASTATE pVGAState = pVdma->pVGAState;
1382 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1383 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1384 if (RT_FAILURE(rc))
1385 {
1386 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1387 return rc;
1388 }
1389 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1390
1391 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1392 }
1393 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1394 {
1395 PVGASTATE pVGAState = pVdma->pVGAState;
1396 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1397
1398 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1399 if (RT_FAILURE(rc))
1400 {
1401 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1402 return rc;
1403 }
1404
1405 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1406 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1407 if (RT_FAILURE(rc))
1408 {
1409 WARN(("pfnLoadState failed %d\n", rc));
1410 return rc;
1411 }
1412
1413 return VINF_SUCCESS;
1414 }
1415 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1416 {
1417 PVGASTATE pVGAState = pVdma->pVGAState;
1418
1419 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1420 {
1421 VBVAINFOSCREEN CurScreen;
1422 VBVAINFOVIEW CurView;
1423
1424 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1425 if (RT_FAILURE(rc))
1426 {
1427 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1428 return rc;
1429 }
1430
1431 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1432 if (RT_FAILURE(rc))
1433 {
1434 WARN(("VBVAInfoScreen failed %d\n", rc));
1435 return rc;
1436 }
1437 }
1438
1439 return VINF_SUCCESS;
1440 }
1441 default:
1442 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1443 return VERR_INVALID_PARAMETER;
1444 }
1445}
1446
1447static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1448{
1449 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1450 const bool fDisabled = RT_BOOL(pScreen->u16Flags & VBVA_SCREEN_F_DISABLED);
1451
1452 if (fDisabled)
1453 {
1454 if ( u32ViewIndex < pVGAState->cMonitors
1455 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1456 {
1457 RT_ZERO(*pScreen);
1458 pScreen->u32ViewIndex = u32ViewIndex;
1459 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1460 return VINF_SUCCESS;
1461 }
1462 }
1463 else
1464 {
1465 if ( u32ViewIndex < pVGAState->cMonitors
1466 && pScreen->u16BitsPerPixel <= 32
1467 && pScreen->u32Width <= UINT16_MAX
1468 && pScreen->u32Height <= UINT16_MAX
1469 && pScreen->u32LineSize <= UINT16_MAX * 4)
1470 {
1471 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1472 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1473 {
1474 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1475 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1476 && u64ScreenSize <= pVGAState->vram_size
1477 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1478 {
1479 return VINF_SUCCESS;
1480 }
1481 }
1482 }
1483 }
1484
1485 return VERR_INVALID_PARAMETER;
1486}
1487
1488static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1489{
1490 PVGASTATE pVGAState = pVdma->pVGAState;
1491 VBVAINFOSCREEN Screen = pEntry->Screen;
1492
1493 /* Verify and cleanup local copy of the input data. */
1494 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1495 if (RT_FAILURE(rc))
1496 {
1497 WARN(("invalid screen data\n"));
1498 return rc;
1499 }
1500
1501 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1502 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1503 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1504
1505 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1506 if (RT_FAILURE(rc))
1507 {
1508 WARN(("pfnResize failed %d\n", rc));
1509 return rc;
1510 }
1511
1512 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1513 VBVAINFOVIEW View;
1514 View.u32ViewOffset = 0;
1515 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1516 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1517
1518 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1519
1520 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1521 i >= 0;
1522 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1523 {
1524 Screen.u32ViewIndex = i;
1525
1526 VBVAINFOSCREEN CurScreen;
1527 VBVAINFOVIEW CurView;
1528
1529 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1530 AssertRC(rc);
1531
1532 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1533 continue;
1534
1535 if (!fDisable || !CurView.u32ViewSize)
1536 {
1537 View.u32ViewIndex = Screen.u32ViewIndex;
1538
1539 rc = VBVAInfoView(pVGAState, &View);
1540 if (RT_FAILURE(rc))
1541 {
1542 WARN(("VBVAInfoView failed %d\n", rc));
1543 break;
1544 }
1545 }
1546
1547 rc = VBVAInfoScreen(pVGAState, &Screen);
1548 if (RT_FAILURE(rc))
1549 {
1550 WARN(("VBVAInfoScreen failed %d\n", rc));
1551 break;
1552 }
1553 }
1554
1555 return rc;
1556}
1557
1558static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1559{
1560 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1561 switch (enmType)
1562 {
1563 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1564 {
1565 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1566 {
1567 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1568 return VERR_INVALID_STATE;
1569 }
1570 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1571 }
1572 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1573 {
1574 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1575 {
1576 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1577 return VERR_INVALID_STATE;
1578 }
1579
1580 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1581
1582 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1583 {
1584 WARN(("invalid buffer size\n"));
1585 return VERR_INVALID_PARAMETER;
1586 }
1587
1588 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1589 if (!cElements)
1590 {
1591 WARN(("invalid buffer size\n"));
1592 return VERR_INVALID_PARAMETER;
1593 }
1594
1595 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1596
1597 int rc = VINF_SUCCESS;
1598
1599 for (uint32_t i = 0; i < cElements; ++i)
1600 {
1601 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1602 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1603 if (RT_FAILURE(rc))
1604 {
1605 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1606 break;
1607 }
1608 }
1609 return rc;
1610 }
1611 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1612 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1613 {
1614 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1615 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1616 uint32_t u32Offset = pEnable->u32Offset;
1617 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1618 if (!RT_SUCCESS(rc))
1619 {
1620 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1621 return rc;
1622 }
1623
1624 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1625 {
1626 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1627 if (!RT_SUCCESS(rc))
1628 {
1629 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1630 return rc;
1631 }
1632 }
1633
1634 return VINF_SUCCESS;
1635 }
1636 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1637 {
1638 int rc = vdmaVBVADisableProcess(pVdma, true);
1639 if (RT_FAILURE(rc))
1640 {
1641 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1642 return rc;
1643 }
1644
1645 /* do vgaUpdateDisplayAll right away */
1646 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1647 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1648
1649 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1650 }
1651 default:
1652 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1653 return VERR_INVALID_PARAMETER;
1654 }
1655}
1656
1657/**
1658 * @param fIn - whether this is a page in or out op.
1659 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1660 */
1661static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1662{
1663 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1664 PGMPAGEMAPLOCK Lock;
1665 int rc;
1666
1667 if (fIn)
1668 {
1669 const void * pvPage;
1670 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1671 if (!RT_SUCCESS(rc))
1672 {
1673 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1674 return rc;
1675 }
1676
1677 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1678
1679 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1680 }
1681 else
1682 {
1683 void * pvPage;
1684 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1685 if (!RT_SUCCESS(rc))
1686 {
1687 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1688 return rc;
1689 }
1690
1691 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1692
1693 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1694 }
1695
1696 return VINF_SUCCESS;
1697}
1698
1699static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1700{
1701 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1702 {
1703 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1704 if (!RT_SUCCESS(rc))
1705 {
1706 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1707 return rc;
1708 }
1709 }
1710
1711 return VINF_SUCCESS;
1712}
1713
1714static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1715 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1716 uint8_t **ppu8Vram, bool *pfIn)
1717{
1718 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1719 {
1720 WARN(("cmd too small"));
1721 return -1;
1722 }
1723
1724 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1725 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1726 {
1727 WARN(("invalid cmd size"));
1728 return -1;
1729 }
1730 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1731
1732 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1733 if (offVRAM & PAGE_OFFSET_MASK)
1734 {
1735 WARN(("offVRAM address is not on page boundary\n"));
1736 return -1;
1737 }
1738 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1739
1740 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1741 if (offVRAM >= pVGAState->vram_size)
1742 {
1743 WARN(("invalid vram offset"));
1744 return -1;
1745 }
1746
1747 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1748 {
1749 WARN(("invalid cPages %d", cPages));
1750 return -1;
1751 }
1752
1753 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1754 {
1755 WARN(("invalid cPages %d, exceeding vram size", cPages));
1756 return -1;
1757 }
1758
1759 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1760 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1761
1762 *ppPages = pPages;
1763 *pcPages = cPages;
1764 *ppu8Vram = pu8Vram;
1765 *pfIn = fIn;
1766 return 0;
1767}
1768
1769static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1770{
1771 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1772 if (offVRAM & PAGE_OFFSET_MASK)
1773 {
1774 WARN(("offVRAM address is not on page boundary\n"));
1775 return -1;
1776 }
1777
1778 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1779 if (offVRAM >= pVGAState->vram_size)
1780 {
1781 WARN(("invalid vram offset"));
1782 return -1;
1783 }
1784
1785 uint32_t cbFill = pFill->u32CbFill;
1786
1787 if (offVRAM + cbFill >= pVGAState->vram_size)
1788 {
1789 WARN(("invalid cPages"));
1790 return -1;
1791 }
1792
1793 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1794 uint32_t u32Color = pFill->u32Pattern;
1795
1796 Assert(!(cbFill % 4));
1797 for (uint32_t i = 0; i < cbFill / 4; ++i)
1798 {
1799 pu32Vram[i] = u32Color;
1800 }
1801
1802 return 0;
1803}
1804
1805static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1806{
1807 switch (pCmd->u8OpCode)
1808 {
1809 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1810 return 0;
1811 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1812 {
1813 PVGASTATE pVGAState = pVdma->pVGAState;
1814 const VBOXCMDVBVAPAGEIDX *pPages;
1815 uint32_t cPages;
1816 uint8_t *pu8Vram;
1817 bool fIn;
1818 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1819 &pPages, &cPages,
1820 &pu8Vram, &fIn);
1821 if (i8Result < 0)
1822 {
1823 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1824 return i8Result;
1825 }
1826
1827 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1828 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1829 if (!RT_SUCCESS(rc))
1830 {
1831 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1832 return -1;
1833 }
1834
1835 return 0;
1836 }
1837 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1838 {
1839 PVGASTATE pVGAState = pVdma->pVGAState;
1840 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1841 {
1842 WARN(("cmd too small"));
1843 return -1;
1844 }
1845
1846 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1847 }
1848 default:
1849 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1850 }
1851}
1852
1853#if 0
1854typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1855{
1856 VBOXCMDVBVA_HDR Hdr;
1857 /* for now can only contain offVRAM.
1858 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1859 VBOXCMDVBVA_ALLOCINFO Alloc;
1860 uint32_t u32Reserved;
1861 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1862} VBOXCMDVBVA_PAGING_TRANSFER;
1863#endif
1864
1865AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1866AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1867AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1868AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1869
1870#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1871
1872static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1873{
1874 switch (pCmd->u8OpCode)
1875 {
1876 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1877 {
1878 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1879 {
1880 WARN(("invalid command size"));
1881 return -1;
1882 }
1883 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1884 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1885 uint32_t cbRealCmd = pCmd->u8Flags;
1886 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1887 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1888 {
1889 WARN(("invalid sysmem cmd size"));
1890 return -1;
1891 }
1892
1893 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1894
1895 PGMPAGEMAPLOCK Lock;
1896 PVGASTATE pVGAState = pVdma->pVGAState;
1897 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1898 const void * pvCmd;
1899 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1900 if (!RT_SUCCESS(rc))
1901 {
1902 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1903 return -1;
1904 }
1905
1906 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1907
1908 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1909
1910 if (cbRealCmd <= cbCmdPart)
1911 {
1912 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1913 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1914 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1915 return i8Result;
1916 }
1917
1918 VBOXCMDVBVA_HDR Hdr;
1919 const void *pvCurCmdTail;
1920 uint32_t cbCurCmdTail;
1921 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1922 {
1923 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1924 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1925 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1926 }
1927 else
1928 {
1929 memcpy(&Hdr, pvCmd, cbCmdPart);
1930 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1931 phCmd += cbCmdPart;
1932 Assert(!(phCmd & PAGE_OFFSET_MASK));
1933 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1934 if (!RT_SUCCESS(rc))
1935 {
1936 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1937 return -1;
1938 }
1939
1940 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1941 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1942 pRealCmdHdr = &Hdr;
1943 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1944 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1945 }
1946
1947 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1948 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1949
1950 int8_t i8Result = 0;
1951
1952 switch (pRealCmdHdr->u8OpCode)
1953 {
1954 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1955 {
1956 const uint32_t *pPages;
1957 uint32_t cPages;
1958 uint8_t *pu8Vram;
1959 bool fIn;
1960 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1961 &pPages, &cPages,
1962 &pu8Vram, &fIn);
1963 if (i8Result < 0)
1964 {
1965 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1966 /* we need to break, not return, to ensure currently locked page is released */
1967 break;
1968 }
1969
1970 if (cbCurCmdTail & 3)
1971 {
1972 WARN(("command is not alligned properly %d", cbCurCmdTail));
1973 i8Result = -1;
1974 /* we need to break, not return, to ensure currently locked page is released */
1975 break;
1976 }
1977
1978 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1979 Assert(cCurPages < cPages);
1980
1981 do
1982 {
1983 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1984 if (!RT_SUCCESS(rc))
1985 {
1986 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1987 i8Result = -1;
1988 /* we need to break, not return, to ensure currently locked page is released */
1989 break;
1990 }
1991
1992 Assert(cPages >= cCurPages);
1993 cPages -= cCurPages;
1994
1995 if (!cPages)
1996 break;
1997
1998 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1999
2000 Assert(!(phCmd & PAGE_OFFSET_MASK));
2001
2002 phCmd += PAGE_SIZE;
2003 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
2004
2005 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2006 if (!RT_SUCCESS(rc))
2007 {
2008 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2009 /* the page is not locked, return */
2010 return -1;
2011 }
2012
2013 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2014 if (cCurPages > cPages)
2015 cCurPages = cPages;
2016 } while (1);
2017 break;
2018 }
2019 default:
2020 WARN(("command can not be splitted"));
2021 i8Result = -1;
2022 break;
2023 }
2024
2025 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2026 return i8Result;
2027 }
2028 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2029 {
2030 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2031 ++pCmd;
2032 cbCmd -= sizeof (*pCmd);
2033 uint32_t cbCurCmd = 0;
2034 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2035 {
2036 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2037 {
2038 WARN(("invalid command size"));
2039 return -1;
2040 }
2041
2042 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2043 if (cbCmd < cbCurCmd)
2044 {
2045 WARN(("invalid command size"));
2046 return -1;
2047 }
2048
2049 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2050 if (i8Result < 0)
2051 {
2052 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2053 return i8Result;
2054 }
2055 }
2056 return 0;
2057 }
2058 default:
2059 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2060 }
2061}
2062
2063static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2064{
2065 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2066 return;
2067
2068 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2069 {
2070 WARN(("invalid command size"));
2071 return;
2072 }
2073
2074 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2075
2076 /* check if the command is cancelled */
2077 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2078 {
2079 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2080 return;
2081 }
2082
2083 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2084}
2085
2086static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2087{
2088 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2089 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2090 int rc = VERR_NO_MEMORY;
2091 if (pCmd)
2092 {
2093 PVGASTATE pVGAState = pVdma->pVGAState;
2094 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2095 pCmd->cbVRam = pVGAState->vram_size;
2096 pCmd->pLed = &pVGAState->Led3D;
2097 pCmd->CrClientInfo.hClient = pVdma;
2098 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2099 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2100 if (RT_SUCCESS(rc))
2101 {
2102 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2103 if (RT_SUCCESS(rc))
2104 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2105 else if (rc != VERR_NOT_SUPPORTED)
2106 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2107 }
2108 else
2109 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2110
2111 vboxVDMACrCtlRelease(&pCmd->Hdr);
2112 }
2113
2114 if (!RT_SUCCESS(rc))
2115 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2116
2117 return rc;
2118}
2119
2120static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2121
2122/* check if this is external cmd to be passed to chromium backend */
2123static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2124{
2125 PVBOXVDMACMD pDmaCmd = NULL;
2126 uint32_t cbDmaCmd = 0;
2127 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2128 int rc = VINF_NOT_SUPPORTED;
2129
2130 cbDmaCmd = pCmdDr->cbBuf;
2131
2132 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2133 {
2134 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2135 {
2136 AssertMsgFailed(("invalid buffer data!"));
2137 return VERR_INVALID_PARAMETER;
2138 }
2139
2140 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2141 {
2142 AssertMsgFailed(("invalid command buffer data!"));
2143 return VERR_INVALID_PARAMETER;
2144 }
2145
2146 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2147 }
2148 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2149 {
2150 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2151 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2152 {
2153 AssertMsgFailed(("invalid command buffer data from offset!"));
2154 return VERR_INVALID_PARAMETER;
2155 }
2156 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2157 }
2158
2159 if (pDmaCmd)
2160 {
2161 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2162 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2163
2164 switch (pDmaCmd->enmType)
2165 {
2166 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2167 {
2168 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2169 if (cbBody < sizeof (*pCrCmd))
2170 {
2171 AssertMsgFailed(("invalid chromium command buffer size!"));
2172 return VERR_INVALID_PARAMETER;
2173 }
2174 PVGASTATE pVGAState = pVdma->pVGAState;
2175 rc = VINF_SUCCESS;
2176 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2177 {
2178 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2179 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2180 break;
2181 }
2182 else
2183 {
2184 Assert(0);
2185 }
2186
2187 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2188 AssertRC(tmpRc);
2189 break;
2190 }
2191 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2192 {
2193 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2194 if (cbBody < sizeof (*pTransfer))
2195 {
2196 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2197 return VERR_INVALID_PARAMETER;
2198 }
2199
2200 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2201 AssertRC(rc);
2202 if (RT_SUCCESS(rc))
2203 {
2204 pCmdDr->rc = VINF_SUCCESS;
2205 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2206 AssertRC(rc);
2207 rc = VINF_SUCCESS;
2208 }
2209 break;
2210 }
2211 default:
2212 break;
2213 }
2214 }
2215 return rc;
2216}
2217
2218int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2219{
2220 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2221 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2222 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2223 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2224 AssertRC(rc);
2225 pDr->rc = rc;
2226
2227 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2228 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2229 AssertRC(rc);
2230 return rc;
2231}
2232
2233int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2234{
2235 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2236 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2237 pCmdPrivate->rc = rc;
2238 if (pCmdPrivate->pfnCompletion)
2239 {
2240 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2241 }
2242 return VINF_SUCCESS;
2243}
2244
2245static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2246 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2247 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2248{
2249 RT_NOREF(pVdma);
2250 /* we do not support color conversion */
2251 Assert(pDstDesc->format == pSrcDesc->format);
2252 /* we do not support stretching */
2253 Assert(pDstRectl->height == pSrcRectl->height);
2254 Assert(pDstRectl->width == pSrcRectl->width);
2255 if (pDstDesc->format != pSrcDesc->format)
2256 return VERR_INVALID_FUNCTION;
2257 if (pDstDesc->width == pDstRectl->width
2258 && pSrcDesc->width == pSrcRectl->width
2259 && pSrcDesc->width == pDstDesc->width)
2260 {
2261 Assert(!pDstRectl->left);
2262 Assert(!pSrcRectl->left);
2263 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2264 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2265 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2266 }
2267 else
2268 {
2269 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2270 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2271 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2272 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2273 Assert(cbDstLine <= pDstDesc->pitch);
2274 uint32_t cbDstSkip = pDstDesc->pitch;
2275 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2276
2277 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2278#ifdef VBOX_STRICT
2279 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2280 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2281#endif
2282 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2283 Assert(cbSrcLine <= pSrcDesc->pitch);
2284 uint32_t cbSrcSkip = pSrcDesc->pitch;
2285 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2286
2287 Assert(cbDstLine == cbSrcLine);
2288
2289 for (uint32_t i = 0; ; ++i)
2290 {
2291 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2292 if (i == pDstRectl->height)
2293 break;
2294 pvDstStart += cbDstSkip;
2295 pvSrcStart += cbSrcSkip;
2296 }
2297 }
2298 return VINF_SUCCESS;
2299}
2300
2301static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2302{
2303 if (!pRectl1->width)
2304 *pRectl1 = *pRectl2;
2305 else
2306 {
2307 int16_t x21 = pRectl1->left + pRectl1->width;
2308 int16_t x22 = pRectl2->left + pRectl2->width;
2309 if (pRectl1->left > pRectl2->left)
2310 {
2311 pRectl1->left = pRectl2->left;
2312 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2313 }
2314 else if (x21 < x22)
2315 pRectl1->width = x22 - pRectl1->left;
2316
2317 x21 = pRectl1->top + pRectl1->height;
2318 x22 = pRectl2->top + pRectl2->height;
2319 if (pRectl1->top > pRectl2->top)
2320 {
2321 pRectl1->top = pRectl2->top;
2322 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2323 }
2324 else if (x21 < x22)
2325 pRectl1->height = x22 - pRectl1->top;
2326 }
2327}
2328
2329/*
2330 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2331 */
2332static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2333{
2334 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2335 Assert(cbBlt <= cbBuffer);
2336 if (cbBuffer < cbBlt)
2337 return VERR_INVALID_FUNCTION;
2338
2339 /* we do not support stretching for now */
2340 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2341 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2342 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2343 return VERR_INVALID_FUNCTION;
2344 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2345 return VERR_INVALID_FUNCTION;
2346 Assert(pBlt->cDstSubRects);
2347
2348 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2349 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2350
2351 if (pBlt->cDstSubRects)
2352 {
2353 VBOXVDMA_RECTL dstRectl, srcRectl;
2354 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2355 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2356 {
2357 pDstRectl = &pBlt->aDstSubRects[i];
2358 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2359 {
2360 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2361 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2362 dstRectl.width = pDstRectl->width;
2363 dstRectl.height = pDstRectl->height;
2364 pDstRectl = &dstRectl;
2365 }
2366
2367 pSrcRectl = &pBlt->aDstSubRects[i];
2368 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2369 {
2370 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2371 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2372 srcRectl.width = pSrcRectl->width;
2373 srcRectl.height = pSrcRectl->height;
2374 pSrcRectl = &srcRectl;
2375 }
2376
2377 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2378 &pBlt->dstDesc, &pBlt->srcDesc,
2379 pDstRectl,
2380 pSrcRectl);
2381 AssertRC(rc);
2382 if (!RT_SUCCESS(rc))
2383 return rc;
2384
2385 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2386 }
2387 }
2388 else
2389 {
2390 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2391 &pBlt->dstDesc, &pBlt->srcDesc,
2392 &pBlt->dstRectl,
2393 &pBlt->srcRectl);
2394 AssertRC(rc);
2395 if (!RT_SUCCESS(rc))
2396 return rc;
2397
2398 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2399 }
2400
2401 return cbBlt;
2402}
2403
2404static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2405{
2406 if (cbBuffer < sizeof (*pTransfer))
2407 return VERR_INVALID_PARAMETER;
2408
2409 PVGASTATE pVGAState = pVdma->pVGAState;
2410 uint8_t * pvRam = pVGAState->vram_ptrR3;
2411 PGMPAGEMAPLOCK SrcLock;
2412 PGMPAGEMAPLOCK DstLock;
2413 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2414 const void * pvSrc;
2415 void * pvDst;
2416 int rc = VINF_SUCCESS;
2417 uint32_t cbTransfer = pTransfer->cbTransferSize;
2418 uint32_t cbTransfered = 0;
2419 bool bSrcLocked = false;
2420 bool bDstLocked = false;
2421 do
2422 {
2423 uint32_t cbSubTransfer = cbTransfer;
2424 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2425 {
2426 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2427 }
2428 else
2429 {
2430 RTGCPHYS phPage = pTransfer->Src.phBuf;
2431 phPage += cbTransfered;
2432 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2433 AssertRC(rc);
2434 if (RT_SUCCESS(rc))
2435 {
2436 bSrcLocked = true;
2437 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2438 }
2439 else
2440 {
2441 break;
2442 }
2443 }
2444
2445 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2446 {
2447 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2448 }
2449 else
2450 {
2451 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2452 phPage += cbTransfered;
2453 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2454 AssertRC(rc);
2455 if (RT_SUCCESS(rc))
2456 {
2457 bDstLocked = true;
2458 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2459 }
2460 else
2461 {
2462 break;
2463 }
2464 }
2465
2466 if (RT_SUCCESS(rc))
2467 {
2468 memcpy(pvDst, pvSrc, cbSubTransfer);
2469 cbTransfer -= cbSubTransfer;
2470 cbTransfered += cbSubTransfer;
2471 }
2472 else
2473 {
2474 cbTransfer = 0; /* to break */
2475 }
2476
2477 if (bSrcLocked)
2478 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2479 if (bDstLocked)
2480 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2481 } while (cbTransfer);
2482
2483 if (RT_SUCCESS(rc))
2484 return sizeof (*pTransfer);
2485 return rc;
2486}
2487
2488static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2489{
2490 do
2491 {
2492 Assert(pvBuffer);
2493 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2494
2495 if (!pvBuffer)
2496 return VERR_INVALID_PARAMETER;
2497 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2498 return VERR_INVALID_PARAMETER;
2499
2500 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2501 switch (pCmd->enmType)
2502 {
2503 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2504 {
2505#ifdef VBOXWDDM_TEST_UHGSMI
2506 static int count = 0;
2507 static uint64_t start, end;
2508 if (count==0)
2509 {
2510 start = RTTimeNanoTS();
2511 }
2512 ++count;
2513 if (count==100000)
2514 {
2515 end = RTTimeNanoTS();
2516 float ems = (end-start)/1000000.f;
2517 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2518 }
2519#endif
2520 /* todo: post the buffer to chromium */
2521 return VINF_SUCCESS;
2522 }
2523 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2524 {
2525 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2526 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2527 Assert(cbBlt >= 0);
2528 Assert((uint32_t)cbBlt <= cbBuffer);
2529 if (cbBlt >= 0)
2530 {
2531 if ((uint32_t)cbBlt == cbBuffer)
2532 return VINF_SUCCESS;
2533 else
2534 {
2535 cbBuffer -= (uint32_t)cbBlt;
2536 pvBuffer -= cbBlt;
2537 }
2538 }
2539 else
2540 return cbBlt; /* error */
2541 break;
2542 }
2543 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2544 {
2545 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2546 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2547 Assert(cbTransfer >= 0);
2548 Assert((uint32_t)cbTransfer <= cbBuffer);
2549 if (cbTransfer >= 0)
2550 {
2551 if ((uint32_t)cbTransfer == cbBuffer)
2552 return VINF_SUCCESS;
2553 else
2554 {
2555 cbBuffer -= (uint32_t)cbTransfer;
2556 pvBuffer -= cbTransfer;
2557 }
2558 }
2559 else
2560 return cbTransfer; /* error */
2561 break;
2562 }
2563 case VBOXVDMACMD_TYPE_DMA_NOP:
2564 return VINF_SUCCESS;
2565 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2566 return VINF_SUCCESS;
2567 default:
2568 AssertBreakpoint();
2569 return VERR_INVALID_FUNCTION;
2570 }
2571 } while (1);
2572
2573 /* we should not be here */
2574 AssertBreakpoint();
2575 return VERR_INVALID_STATE;
2576}
2577
2578static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2579{
2580 RT_NOREF(hThreadSelf);
2581 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2582 PVGASTATE pVGAState = pVdma->pVGAState;
2583 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2584 uint8_t *pCmd;
2585 uint32_t cbCmd;
2586 int rc;
2587
2588 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2589
2590 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2591 {
2592 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2593 switch (enmType)
2594 {
2595 case VBVAEXHOST_DATA_TYPE_CMD:
2596 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2597 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2598 VBVARaiseIrq(pVGAState, 0);
2599 break;
2600 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2601 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2602 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2603 break;
2604 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2605 {
2606 bool fContinue = true;
2607 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2608 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2609 if (fContinue)
2610 break;
2611 }
2612 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2613 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2614 AssertRC(rc);
2615 break;
2616 default:
2617 WARN(("unexpected type %d\n", enmType));
2618 break;
2619 }
2620 }
2621
2622 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2623
2624 return VINF_SUCCESS;
2625}
2626
2627static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2628{
2629 RT_NOREF(cbCmd);
2630 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2631 const uint8_t * pvBuf;
2632 PGMPAGEMAPLOCK Lock;
2633 int rc;
2634 bool bReleaseLocked = false;
2635
2636 do
2637 {
2638 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2639
2640 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2641 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2642 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2643 {
2644 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2645 pvBuf = pvRam + pCmd->Location.offVramBuf;
2646 }
2647 else
2648 {
2649 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2650 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2651 Assert(offset + pCmd->cbBuf <= 0x1000);
2652 if (offset + pCmd->cbBuf > 0x1000)
2653 {
2654 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2655 rc = VERR_INVALID_PARAMETER;
2656 break;
2657 }
2658
2659 const void * pvPageBuf;
2660 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2661 AssertRC(rc);
2662 if (!RT_SUCCESS(rc))
2663 {
2664 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2665 break;
2666 }
2667
2668 pvBuf = (const uint8_t *)pvPageBuf;
2669 pvBuf += offset;
2670
2671 bReleaseLocked = true;
2672 }
2673
2674 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2675 AssertRC(rc);
2676
2677 if (bReleaseLocked)
2678 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2679 } while (0);
2680
2681 pCmd->rc = rc;
2682
2683 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2684 AssertRC(rc);
2685}
2686
2687static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2688{
2689 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2690 pCmd->i32Result = VINF_SUCCESS;
2691 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2692 AssertRC(rc);
2693}
2694
2695#endif /* #ifdef VBOX_WITH_CRHGSMI */
2696
2697#ifdef VBOX_VDMA_WITH_WATCHDOG
2698static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2699{
2700 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2701 PVGASTATE pVGAState = pVdma->pVGAState;
2702 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2703}
2704
2705static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2706{
2707 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2708 if (cMillis)
2709 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2710 else
2711 TMTimerStop(pVdma->WatchDogTimer);
2712 return VINF_SUCCESS;
2713}
2714#endif
2715
2716int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2717{
2718 RT_NOREF(cPipeElements);
2719 int rc;
2720 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2721 Assert(pVdma);
2722 if (pVdma)
2723 {
2724 pVdma->pHgsmi = pVGAState->pHGSMI;
2725 pVdma->pVGAState = pVGAState;
2726
2727#ifdef VBOX_VDMA_WITH_WATCHDOG
2728 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2729 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2730 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2731 AssertRC(rc);
2732#endif
2733
2734#ifdef VBOX_WITH_CRHGSMI
2735 VBoxVDMAThreadInit(&pVdma->Thread);
2736
2737 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2738 if (RT_SUCCESS(rc))
2739 {
2740 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2741 if (RT_SUCCESS(rc))
2742 {
2743 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2744 if (RT_SUCCESS(rc))
2745 {
2746 pVGAState->pVdma = pVdma;
2747 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2748 return VINF_SUCCESS;
2749 }
2750 WARN(("RTCritSectInit failed %d\n", rc));
2751
2752 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2753 }
2754 else
2755 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2756
2757 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2758 }
2759 else
2760 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2761
2762
2763 RTMemFree(pVdma);
2764#else
2765 pVGAState->pVdma = pVdma;
2766 return VINF_SUCCESS;
2767#endif
2768 }
2769 else
2770 rc = VERR_OUT_OF_RESOURCES;
2771
2772 return rc;
2773}
2774
2775int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2776{
2777#ifdef VBOX_WITH_CRHGSMI
2778 vdmaVBVACtlDisableSync(pVdma);
2779#endif
2780 return VINF_SUCCESS;
2781}
2782
2783int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2784{
2785 if (!pVdma)
2786 return VINF_SUCCESS;
2787#ifdef VBOX_WITH_CRHGSMI
2788 vdmaVBVACtlDisableSync(pVdma);
2789 VBoxVDMAThreadCleanup(&pVdma->Thread);
2790 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2791 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2792 RTCritSectDelete(&pVdma->CalloutCritSect);
2793#endif
2794 RTMemFree(pVdma);
2795 return VINF_SUCCESS;
2796}
2797
2798void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2799{
2800 RT_NOREF(cbCmd);
2801 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2802
2803 switch (pCmd->enmCtl)
2804 {
2805 case VBOXVDMA_CTL_TYPE_ENABLE:
2806 pCmd->i32Result = VINF_SUCCESS;
2807 break;
2808 case VBOXVDMA_CTL_TYPE_DISABLE:
2809 pCmd->i32Result = VINF_SUCCESS;
2810 break;
2811 case VBOXVDMA_CTL_TYPE_FLUSH:
2812 pCmd->i32Result = VINF_SUCCESS;
2813 break;
2814#ifdef VBOX_VDMA_WITH_WATCHDOG
2815 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2816 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2817 break;
2818#endif
2819 default:
2820 WARN(("cmd not supported"));
2821 pCmd->i32Result = VERR_NOT_SUPPORTED;
2822 }
2823
2824 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2825 AssertRC(rc);
2826}
2827
2828void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2829{
2830 int rc = VERR_NOT_IMPLEMENTED;
2831
2832#ifdef VBOX_WITH_CRHGSMI
2833 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2834 * this is why we process them specially */
2835 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2836 if (rc == VINF_SUCCESS)
2837 return;
2838
2839 if (RT_FAILURE(rc))
2840 {
2841 pCmd->rc = rc;
2842 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2843 AssertRC(rc);
2844 return;
2845 }
2846
2847 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2848#else
2849 pCmd->rc = rc;
2850 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2851 AssertRC(rc);
2852#endif
2853}
2854
2855/**/
2856#ifdef VBOX_WITH_CRHGSMI
2857
2858static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2859
2860static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2861{
2862 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2863 if (RT_SUCCESS(rc))
2864 {
2865 if (rc == VINF_SUCCESS)
2866 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2867 else
2868 Assert(rc == VINF_ALREADY_INITIALIZED);
2869 }
2870 else
2871 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2872
2873 return rc;
2874}
2875
2876static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2877{
2878 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2879 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2880 AssertRC(rc);
2881 pGCtl->i32Result = rc;
2882
2883 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2884 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2885 AssertRC(rc);
2886
2887 VBoxVBVAExHCtlFree(pVbva, pCtl);
2888}
2889
2890static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2891{
2892 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2893 if (!pHCtl)
2894 {
2895 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2896 return VERR_NO_MEMORY;
2897 }
2898
2899 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2900 pHCtl->u.cmd.cbCmd = cbCmd;
2901 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2902 if (RT_FAILURE(rc))
2903 {
2904 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2905 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2906 return rc;;
2907 }
2908 return VINF_SUCCESS;
2909}
2910
2911static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2912{
2913 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2914 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2915 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2916 if (RT_SUCCESS(rc))
2917 return VINF_SUCCESS;
2918
2919 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2920 pCtl->i32Result = rc;
2921 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2922 AssertRC(rc);
2923 return VINF_SUCCESS;
2924}
2925
2926static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2927{
2928 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2929 if (pVboxCtl->u.pfnInternal)
2930 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2931 VBoxVBVAExHCtlFree(pVbva, pCtl);
2932}
2933
2934static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2935 PFNCRCTLCOMPLETION pfnCompletion,
2936 void *pvCompletion)
2937{
2938 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2939 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2940 if (RT_FAILURE(rc))
2941 {
2942 if (rc == VERR_INVALID_STATE)
2943 {
2944 pCmd->u.pfnInternal = NULL;
2945 PVGASTATE pVGAState = pVdma->pVGAState;
2946 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2947 if (!RT_SUCCESS(rc))
2948 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2949
2950 return rc;
2951 }
2952 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2953 return rc;
2954 }
2955
2956 return VINF_SUCCESS;
2957}
2958
2959static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2960{
2961 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2962 {
2963 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2964 if (!RT_SUCCESS(rc))
2965 {
2966 WARN(("pfnVBVAEnable failed %d\n", rc));
2967 for (uint32_t j = 0; j < i; j++)
2968 {
2969 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2970 }
2971
2972 return rc;
2973 }
2974 }
2975 return VINF_SUCCESS;
2976}
2977
2978static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2979{
2980 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2981 {
2982 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2983 }
2984 return VINF_SUCCESS;
2985}
2986
2987static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
2988 void *pvThreadContext, void *pvContext)
2989{
2990 RT_NOREF(pThread);
2991 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2992 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2993
2994 if (RT_SUCCESS(rc))
2995 {
2996 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2997 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2998 if (rc == VINF_SUCCESS)
2999 {
3000 /* we need to inform Main about VBVA enable/disable
3001 * main expects notifications to be done from the main thread
3002 * submit it there */
3003 PVGASTATE pVGAState = pVdma->pVGAState;
3004
3005 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3006 vdmaVBVANotifyEnable(pVGAState);
3007 else
3008 vdmaVBVANotifyDisable(pVGAState);
3009 }
3010 else if (RT_FAILURE(rc))
3011 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3012 }
3013 else
3014 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3015
3016 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3017}
3018
3019static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3020{
3021 int rc;
3022 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3023 if (pHCtl)
3024 {
3025 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3026 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3027 pHCtl->pfnComplete = pfnComplete;
3028 pHCtl->pvComplete = pvComplete;
3029
3030 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3031 if (RT_SUCCESS(rc))
3032 return VINF_SUCCESS;
3033 else
3034 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3035
3036 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3037 }
3038 else
3039 {
3040 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3041 rc = VERR_NO_MEMORY;
3042 }
3043
3044 return rc;
3045}
3046
3047static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3048{
3049 VBVAENABLE Enable = {0};
3050 Enable.u32Flags = VBVA_F_ENABLE;
3051 Enable.u32Offset = offVram;
3052
3053 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3054 Data.rc = VERR_NOT_IMPLEMENTED;
3055 int rc = RTSemEventCreate(&Data.hEvent);
3056 if (!RT_SUCCESS(rc))
3057 {
3058 WARN(("RTSemEventCreate failed %d\n", rc));
3059 return rc;
3060 }
3061
3062 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3063 if (RT_SUCCESS(rc))
3064 {
3065 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3066 if (RT_SUCCESS(rc))
3067 {
3068 rc = Data.rc;
3069 if (!RT_SUCCESS(rc))
3070 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3071 }
3072 else
3073 WARN(("RTSemEventWait failed %d\n", rc));
3074 }
3075 else
3076 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3077
3078 RTSemEventDestroy(Data.hEvent);
3079
3080 return rc;
3081}
3082
3083static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3084{
3085 int rc;
3086 VBVAEXHOSTCTL* pHCtl;
3087 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3088 {
3089 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3090 return VINF_SUCCESS;
3091 }
3092
3093 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3094 if (!pHCtl)
3095 {
3096 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3097 return VERR_NO_MEMORY;
3098 }
3099
3100 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3101 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3102 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3103 if (RT_SUCCESS(rc))
3104 return VINF_SUCCESS;
3105
3106 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3107 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3108 return rc;
3109}
3110
3111static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3112{
3113 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3114 if (fEnable)
3115 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3116 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3117}
3118
3119static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3120{
3121 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3122 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3123 if (RT_SUCCESS(rc))
3124 return VINF_SUCCESS;
3125
3126 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3127 pEnable->Hdr.i32Result = rc;
3128 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3129 AssertRC(rc);
3130 return VINF_SUCCESS;
3131}
3132
3133static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3134 int rc, void *pvContext)
3135{
3136 RT_NOREF(pVbva, pCtl);
3137 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3138 pData->rc = rc;
3139 rc = RTSemEventSignal(pData->hEvent);
3140 if (!RT_SUCCESS(rc))
3141 WARN(("RTSemEventSignal failed %d\n", rc));
3142}
3143
3144static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3145{
3146 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3147 Data.rc = VERR_NOT_IMPLEMENTED;
3148 int rc = RTSemEventCreate(&Data.hEvent);
3149 if (!RT_SUCCESS(rc))
3150 {
3151 WARN(("RTSemEventCreate failed %d\n", rc));
3152 return rc;
3153 }
3154
3155 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3156 if (RT_SUCCESS(rc))
3157 {
3158 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3159 if (RT_SUCCESS(rc))
3160 {
3161 rc = Data.rc;
3162 if (!RT_SUCCESS(rc))
3163 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3164 }
3165 else
3166 WARN(("RTSemEventWait failed %d\n", rc));
3167 }
3168 else
3169 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3170
3171 RTSemEventDestroy(Data.hEvent);
3172
3173 return rc;
3174}
3175
3176static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3177{
3178 VBVAEXHOSTCTL Ctl;
3179 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3180 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3181}
3182
3183static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3184{
3185 VBVAEXHOSTCTL Ctl;
3186 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3187 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3188}
3189
3190static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3191{
3192 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3193 switch (rc)
3194 {
3195 case VINF_SUCCESS:
3196 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3197 case VINF_ALREADY_INITIALIZED:
3198 case VINF_EOF:
3199 case VERR_INVALID_STATE:
3200 return VINF_SUCCESS;
3201 default:
3202 Assert(!RT_FAILURE(rc));
3203 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3204 }
3205}
3206
3207
3208int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3209 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3210 PFNCRCTLCOMPLETION pfnCompletion,
3211 void *pvCompletion)
3212{
3213 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3214 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3215 if (pVdma == NULL)
3216 return VERR_INVALID_STATE;
3217 pCmd->CalloutList.List.pNext = NULL;
3218 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3219}
3220
3221typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3222{
3223 struct VBOXVDMAHOST *pVdma;
3224 uint32_t fProcessing;
3225 int rc;
3226} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3227
3228static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3229{
3230 RT_NOREF(pCmd, cbCmd);
3231 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3232
3233 pData->rc = rc;
3234
3235 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3236
3237 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3238
3239 pData->fProcessing = 0;
3240
3241 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3242}
3243
3244static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3245{
3246 pEntry->pfnCb = pfnCb;
3247 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3248 if (RT_SUCCESS(rc))
3249 {
3250 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3251 RTCritSectLeave(&pVdma->CalloutCritSect);
3252
3253 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3254 }
3255 else
3256 WARN(("RTCritSectEnter failed %d\n", rc));
3257
3258 return rc;
3259}
3260
3261
3262static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3263{
3264 int rc = VINF_SUCCESS;
3265 for (;;)
3266 {
3267 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3268 if (RT_SUCCESS(rc))
3269 {
3270 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3271 if (pEntry)
3272 RTListNodeRemove(&pEntry->Node);
3273 RTCritSectLeave(&pVdma->CalloutCritSect);
3274
3275 if (!pEntry)
3276 break;
3277
3278 pEntry->pfnCb(pEntry);
3279 }
3280 else
3281 {
3282 WARN(("RTCritSectEnter failed %d\n", rc));
3283 break;
3284 }
3285 }
3286
3287 return rc;
3288}
3289
3290DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3291 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3292{
3293 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3294 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3295 if (pVdma == NULL)
3296 return VERR_INVALID_STATE;
3297 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3298 Data.pVdma = pVdma;
3299 Data.fProcessing = 1;
3300 Data.rc = VERR_INTERNAL_ERROR;
3301 RTListInit(&pCmd->CalloutList.List);
3302 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3303 if (!RT_SUCCESS(rc))
3304 {
3305 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3306 return rc;
3307 }
3308
3309 while (Data.fProcessing)
3310 {
3311 /* Poll infrequently to make sure no completed message has been missed. */
3312 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3313
3314 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3315
3316 if (Data.fProcessing)
3317 RTThreadYield();
3318 }
3319
3320 /* extra check callouts */
3321 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3322
3323 /* 'Our' message has been processed, so should reset the semaphore.
3324 * There is still possible that another message has been processed
3325 * and the semaphore has been signalled again.
3326 * Reset only if there are no other messages completed.
3327 */
3328 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3329 Assert(c >= 0);
3330 if (!c)
3331 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3332
3333 rc = Data.rc;
3334 if (!RT_SUCCESS(rc))
3335 WARN(("host call failed %d", rc));
3336
3337 return rc;
3338}
3339
3340int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3341{
3342 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3343 int rc = VINF_SUCCESS;
3344 switch (pCtl->u32Type)
3345 {
3346 case VBOXCMDVBVACTL_TYPE_3DCTL:
3347 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3348 case VBOXCMDVBVACTL_TYPE_RESIZE:
3349 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3350 case VBOXCMDVBVACTL_TYPE_ENABLE:
3351 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3352 {
3353 WARN(("incorrect enable size\n"));
3354 rc = VERR_INVALID_PARAMETER;
3355 break;
3356 }
3357 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3358 default:
3359 WARN(("unsupported type\n"));
3360 rc = VERR_INVALID_PARAMETER;
3361 break;
3362 }
3363
3364 pCtl->i32Result = rc;
3365 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3366 AssertRC(rc);
3367 return VINF_SUCCESS;
3368}
3369
3370int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3371{
3372 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3373 {
3374 WARN(("vdma VBVA is disabled\n"));
3375 return VERR_INVALID_STATE;
3376 }
3377
3378 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3379}
3380
3381int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3382{
3383 WARN(("flush\n"));
3384 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3385 {
3386 WARN(("vdma VBVA is disabled\n"));
3387 return VERR_INVALID_STATE;
3388 }
3389 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3390}
3391
3392void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3393{
3394 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3395 return;
3396 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3397}
3398
3399bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3400{
3401 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3402}
3403#endif
3404
3405int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3406{
3407#ifdef VBOX_WITH_CRHGSMI
3408 int rc = vdmaVBVAPause(pVdma);
3409 if (RT_SUCCESS(rc))
3410 return VINF_SUCCESS;
3411
3412 if (rc != VERR_INVALID_STATE)
3413 {
3414 WARN(("vdmaVBVAPause failed %d\n", rc));
3415 return rc;
3416 }
3417
3418#ifdef DEBUG_misha
3419 WARN(("debug prep"));
3420#endif
3421
3422 PVGASTATE pVGAState = pVdma->pVGAState;
3423 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3424 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3425 Assert(pCmd);
3426 if (pCmd)
3427 {
3428 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3429 AssertRC(rc);
3430 if (RT_SUCCESS(rc))
3431 {
3432 rc = vboxVDMACrCtlGetRc(pCmd);
3433 }
3434 vboxVDMACrCtlRelease(pCmd);
3435 return rc;
3436 }
3437 return VERR_NO_MEMORY;
3438#else
3439 return VINF_SUCCESS;
3440#endif
3441}
3442
3443int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3444{
3445#ifdef VBOX_WITH_CRHGSMI
3446 int rc = vdmaVBVAResume(pVdma);
3447 if (RT_SUCCESS(rc))
3448 return VINF_SUCCESS;
3449
3450 if (rc != VERR_INVALID_STATE)
3451 {
3452 WARN(("vdmaVBVAResume failed %d\n", rc));
3453 return rc;
3454 }
3455
3456#ifdef DEBUG_misha
3457 WARN(("debug done"));
3458#endif
3459
3460 PVGASTATE pVGAState = pVdma->pVGAState;
3461 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3462 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3463 Assert(pCmd);
3464 if (pCmd)
3465 {
3466 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3467 AssertRC(rc);
3468 if (RT_SUCCESS(rc))
3469 {
3470 rc = vboxVDMACrCtlGetRc(pCmd);
3471 }
3472 vboxVDMACrCtlRelease(pCmd);
3473 return rc;
3474 }
3475 return VERR_NO_MEMORY;
3476#else
3477 return VINF_SUCCESS;
3478#endif
3479}
3480
3481int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3482{
3483 int rc;
3484
3485#ifdef VBOX_WITH_CRHGSMI
3486 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3487#endif
3488 {
3489 rc = SSMR3PutU32(pSSM, 0xffffffff);
3490 AssertRCReturn(rc, rc);
3491 return VINF_SUCCESS;
3492 }
3493
3494#ifdef VBOX_WITH_CRHGSMI
3495 PVGASTATE pVGAState = pVdma->pVGAState;
3496 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3497
3498 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3499 AssertRCReturn(rc, rc);
3500
3501 VBVAEXHOSTCTL HCtl;
3502 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3503 HCtl.u.state.pSSM = pSSM;
3504 HCtl.u.state.u32Version = 0;
3505 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3506#endif
3507}
3508
3509int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3510{
3511 uint32_t u32;
3512 int rc = SSMR3GetU32(pSSM, &u32);
3513 AssertLogRelRCReturn(rc, rc);
3514
3515 if (u32 != 0xffffffff)
3516 {
3517#ifdef VBOX_WITH_CRHGSMI
3518 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3519 AssertLogRelRCReturn(rc, rc);
3520
3521 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3522
3523 VBVAEXHOSTCTL HCtl;
3524 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3525 HCtl.u.state.pSSM = pSSM;
3526 HCtl.u.state.u32Version = u32Version;
3527 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3528 AssertLogRelRCReturn(rc, rc);
3529
3530 rc = vdmaVBVAResume(pVdma);
3531 AssertLogRelRCReturn(rc, rc);
3532
3533 return VINF_SUCCESS;
3534#else
3535 WARN(("Unsupported VBVACtl info!\n"));
3536 return VERR_VERSION_MISMATCH;
3537#endif
3538 }
3539
3540 return VINF_SUCCESS;
3541}
3542
3543int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3544{
3545#ifdef VBOX_WITH_CRHGSMI
3546 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3547 return VINF_SUCCESS;
3548
3549/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3550 * the purpose of this code is. */
3551 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3552 if (!pHCtl)
3553 {
3554 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3555 return VERR_NO_MEMORY;
3556 }
3557
3558 /* sanity */
3559 pHCtl->u.cmd.pu8Cmd = NULL;
3560 pHCtl->u.cmd.cbCmd = 0;
3561
3562 /* NULL completion will just free the ctl up */
3563 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3564 if (RT_FAILURE(rc))
3565 {
3566 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3567 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3568 return rc;
3569 }
3570#endif
3571 return VINF_SUCCESS;
3572}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette