VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 55705

Last change on this file since 55705 was 55493, checked in by vboxsync, 10 years ago

PGM,++: Separated physical access handler callback function pointers from the access handler registrations to reduce footprint and simplify adding a couple of more callbacks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.2 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 55493 2015-04-28 16:51:35Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17#include <VBox/VMMDev.h>
18#include <VBox/vmm/pdmdev.h>
19#include <VBox/vmm/pgm.h>
20#include <VBox/VBoxVideo.h>
21#include <iprt/semaphore.h>
22#include <iprt/thread.h>
23#include <iprt/mem.h>
24#include <iprt/asm.h>
25#include <iprt/list.h>
26#include <iprt/param.h>
27
28#include "DevVGA.h"
29#include "HGSMI/SHGSMIHost.h"
30
31#include <VBox/VBoxVideo3D.h>
32#include <VBox/VBoxVideoHost3D.h>
33
34#ifdef DEBUG_misha
35# define VBOXVDBG_MEMCACHE_DISABLE
36#endif
37
38#ifndef VBOXVDBG_MEMCACHE_DISABLE
39# include <iprt/memcache.h>
40#endif
41
42#ifdef DEBUG_misha
43#define WARN_BP() do { AssertFailed(); } while (0)
44#else
45#define WARN_BP() do { } while (0)
46#endif
47#define WARN(_msg) do { \
48 LogRel(_msg); \
49 WARN_BP(); \
50 } while (0)
51
52#define VBOXVDMATHREAD_STATE_TERMINATED 0
53#define VBOXVDMATHREAD_STATE_CREATING 1
54#define VBOXVDMATHREAD_STATE_CREATED 3
55#define VBOXVDMATHREAD_STATE_TERMINATING 4
56
57struct VBOXVDMATHREAD;
58
59typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
60
61static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
62
63
64typedef struct VBOXVDMATHREAD
65{
66 RTTHREAD hWorkerThread;
67 RTSEMEVENT hEvent;
68 volatile uint32_t u32State;
69 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
70 void *pvChanged;
71} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
72
73
74/* state transformations:
75 *
76 * submitter | processor
77 *
78 * LISTENING ---> PROCESSING
79 *
80 * */
81#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
82#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
83
84#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
85#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
86#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
87
88typedef struct VBVAEXHOSTCONTEXT
89{
90 VBVABUFFER *pVBVA;
91 volatile int32_t i32State;
92 volatile int32_t i32EnableState;
93 volatile uint32_t u32cCtls;
94 /* critical section for accessing ctl lists */
95 RTCRITSECT CltCritSect;
96 RTLISTANCHOR GuestCtlList;
97 RTLISTANCHOR HostCtlList;
98#ifndef VBOXVDBG_MEMCACHE_DISABLE
99 RTMEMCACHE CtlCache;
100#endif
101} VBVAEXHOSTCONTEXT;
102
103typedef enum
104{
105 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
106 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
107 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
108 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
109 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
110 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
111 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
112 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
113 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
114 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
115 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
116 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
117 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
118} VBVAEXHOSTCTL_TYPE;
119
120struct VBVAEXHOSTCTL;
121
122typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
123
124typedef struct VBVAEXHOSTCTL
125{
126 RTLISTNODE Node;
127 VBVAEXHOSTCTL_TYPE enmType;
128 union
129 {
130 struct
131 {
132 uint8_t * pu8Cmd;
133 uint32_t cbCmd;
134 } cmd;
135
136 struct
137 {
138 PSSMHANDLE pSSM;
139 uint32_t u32Version;
140 } state;
141 } u;
142 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
143 void *pvComplete;
144} VBVAEXHOSTCTL;
145
146/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
147 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
148 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
149 * see mor edetailed comments in headers for function definitions */
150typedef enum
151{
152 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
153 VBVAEXHOST_DATA_TYPE_CMD,
154 VBVAEXHOST_DATA_TYPE_HOSTCTL,
155 VBVAEXHOST_DATA_TYPE_GUESTCTL
156} VBVAEXHOST_DATA_TYPE;
157
158static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
159
160
161static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
162
163static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
164static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
165
166/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
167 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
168static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
169
170static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
171static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
172static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
173static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
174static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
175static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
176
177static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
178{
179#ifndef VBOXVDBG_MEMCACHE_DISABLE
180 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
181#else
182 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
183#endif
184}
185
186static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
187{
188#ifndef VBOXVDBG_MEMCACHE_DISABLE
189 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
190#else
191 RTMemFree(pCtl);
192#endif
193}
194
195static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
196{
197 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
198 if (!pCtl)
199 {
200 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
201 return NULL;
202 }
203
204 pCtl->enmType = enmType;
205 return pCtl;
206}
207
208static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
209{
210 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
211
212 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
213 return VINF_SUCCESS;
214 return VERR_SEM_BUSY;
215}
216
217static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
218{
219 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
220
221 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
222 return NULL;
223
224 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
225 if (RT_SUCCESS(rc))
226 {
227 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
228 if (pCtl)
229 *pfHostCtl = true;
230 else if (!fHostOnlyMode)
231 {
232 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
233 {
234 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
235 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
236 * and there are no HostCtl commands*/
237 Assert(pCtl);
238 *pfHostCtl = false;
239 }
240 }
241
242 if (pCtl)
243 {
244 RTListNodeRemove(&pCtl->Node);
245 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
246 }
247
248 RTCritSectLeave(&pCmdVbva->CltCritSect);
249
250 return pCtl;
251 }
252 else
253 WARN(("RTCritSectEnter failed %d\n", rc));
254
255 return NULL;
256}
257
258static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
259{
260 bool fHostCtl = false;
261 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
262 Assert(!pCtl || fHostCtl);
263 return pCtl;
264}
265
266static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
267{
268 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
269 {
270 WARN(("Invalid state\n"));
271 return VERR_INVALID_STATE;
272 }
273
274 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
275 return VINF_SUCCESS;
276}
277
278static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
279{
280 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
281 {
282 WARN(("Invalid state\n"));
283 return VERR_INVALID_STATE;
284 }
285
286 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
287 return VINF_SUCCESS;
288}
289
290
291static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
292{
293 switch (pCtl->enmType)
294 {
295 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
296 {
297 int rc = VBoxVBVAExHPPause(pCmdVbva);
298 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
299 return true;
300 }
301 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
302 {
303 int rc = VBoxVBVAExHPResume(pCmdVbva);
304 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
305 return true;
306 }
307 default:
308 return false;
309 }
310}
311
312static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
313{
314 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
315
316 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
317}
318
319static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
320{
321 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
322 if (pCmdVbva->pVBVA)
323 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
324}
325
326static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
327{
328 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
329 if (pCmdVbva->pVBVA)
330 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
331}
332
333static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
334{
335 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
336 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
337
338 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
339
340 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
341 uint32_t indexRecordFree = pVBVA->indexRecordFree;
342
343 Log(("first = %d, free = %d\n",
344 indexRecordFirst, indexRecordFree));
345
346 if (indexRecordFirst == indexRecordFree)
347 {
348 /* No records to process. Return without assigning output variables. */
349 return VINF_EOF;
350 }
351
352 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
353
354 /* A new record need to be processed. */
355 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
356 {
357 /* the record is being recorded, try again */
358 return VINF_TRY_AGAIN;
359 }
360
361 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
362
363 if (!cbRecord)
364 {
365 /* the record is being recorded, try again */
366 return VINF_TRY_AGAIN;
367 }
368
369 /* we should not get partial commands here actually */
370 Assert(cbRecord);
371
372 /* The size of largest contiguous chunk in the ring biffer. */
373 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
374
375 /* The pointer to data in the ring buffer. */
376 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
377
378 /* Fetch or point the data. */
379 if (u32BytesTillBoundary >= cbRecord)
380 {
381 /* The command does not cross buffer boundary. Return address in the buffer. */
382 *ppCmd = pSrc;
383 *pcbCmd = cbRecord;
384 return VINF_SUCCESS;
385 }
386
387 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
388 return VERR_INVALID_STATE;
389}
390
391static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
392{
393 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
394 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
395
396 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
397}
398
399static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
400{
401 if (pCtl->pfnComplete)
402 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
403 else
404 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
405}
406
407static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
408{
409 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
410 VBVAEXHOSTCTL*pCtl;
411 bool fHostClt;
412
413 for(;;)
414 {
415 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
416 if (pCtl)
417 {
418 if (fHostClt)
419 {
420 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
421 {
422 *ppCmd = (uint8_t*)pCtl;
423 *pcbCmd = sizeof (*pCtl);
424 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
425 }
426 continue;
427 }
428 else
429 {
430 *ppCmd = (uint8_t*)pCtl;
431 *pcbCmd = sizeof (*pCtl);
432 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
433 }
434 }
435
436 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
437 return VBVAEXHOST_DATA_TYPE_NO_DATA;
438
439 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
440 switch (rc)
441 {
442 case VINF_SUCCESS:
443 return VBVAEXHOST_DATA_TYPE_CMD;
444 case VINF_EOF:
445 return VBVAEXHOST_DATA_TYPE_NO_DATA;
446 case VINF_TRY_AGAIN:
447 RTThreadSleep(1);
448 continue;
449 default:
450 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
451 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
452 return VBVAEXHOST_DATA_TYPE_NO_DATA;
453 }
454 }
455
456 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
457 return VBVAEXHOST_DATA_TYPE_NO_DATA;
458}
459
460static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
461{
462 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
463 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
464 {
465 vboxVBVAExHPHgEventClear(pCmdVbva);
466 vboxVBVAExHPProcessorRelease(pCmdVbva);
467 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
468 * 1. we check the queue -> and it is empty
469 * 2. submitter adds command to the queue
470 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
471 * 4. we clear the "processing" state
472 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
473 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
474 **/
475 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
476 if (RT_SUCCESS(rc))
477 {
478 /* we are the processor now */
479 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
480 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
481 {
482 vboxVBVAExHPProcessorRelease(pCmdVbva);
483 return VBVAEXHOST_DATA_TYPE_NO_DATA;
484 }
485
486 vboxVBVAExHPHgEventSet(pCmdVbva);
487 }
488 }
489
490 return enmType;
491}
492
493DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
494{
495 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
496
497 if (pVBVA)
498 {
499 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
500 uint32_t indexRecordFree = pVBVA->indexRecordFree;
501
502 if (indexRecordFirst != indexRecordFree)
503 return true;
504 }
505
506 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
507}
508
509/* Checks whether the new commands are ready for processing
510 * @returns
511 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
512 * VINF_EOF - no commands in a queue
513 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
514 * VERR_INVALID_STATE - the VBVA is paused or pausing */
515static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
516{
517 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
518 if (RT_SUCCESS(rc))
519 {
520 /* we are the processor now */
521 if (vboxVBVAExHSHasCommands(pCmdVbva))
522 {
523 vboxVBVAExHPHgEventSet(pCmdVbva);
524 return VINF_SUCCESS;
525 }
526
527 vboxVBVAExHPProcessorRelease(pCmdVbva);
528 return VINF_EOF;
529 }
530 if (rc == VERR_SEM_BUSY)
531 return VINF_ALREADY_INITIALIZED;
532 return VERR_INVALID_STATE;
533}
534
535static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
536{
537 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
538 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
539 if (RT_SUCCESS(rc))
540 {
541#ifndef VBOXVDBG_MEMCACHE_DISABLE
542 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
543 0, /* size_t cbAlignment */
544 UINT32_MAX, /* uint32_t cMaxObjects */
545 NULL, /* PFNMEMCACHECTOR pfnCtor*/
546 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
547 NULL, /* void *pvUser*/
548 0 /* uint32_t fFlags*/
549 );
550 if (RT_SUCCESS(rc))
551#endif
552 {
553 RTListInit(&pCmdVbva->GuestCtlList);
554 RTListInit(&pCmdVbva->HostCtlList);
555 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
556 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
557 return VINF_SUCCESS;
558 }
559#ifndef VBOXVDBG_MEMCACHE_DISABLE
560 else
561 WARN(("RTMemCacheCreate failed %d\n", rc));
562#endif
563 }
564 else
565 WARN(("RTCritSectInit failed %d\n", rc));
566
567 return rc;
568}
569
570DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
571{
572 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
573}
574
575DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
576{
577 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
578}
579
580static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
581{
582 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
583 {
584 WARN(("VBVAEx is enabled already\n"));
585 return VERR_INVALID_STATE;
586 }
587
588 pCmdVbva->pVBVA = pVBVA;
589 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
590 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
591 return VINF_SUCCESS;
592}
593
594static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
595{
596 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
597 return VINF_SUCCESS;
598
599 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
600 return VINF_SUCCESS;
601}
602
603static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
604{
605 /* ensure the processor is stopped */
606 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
607
608 /* ensure no one tries to submit the command */
609 if (pCmdVbva->pVBVA)
610 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
611
612 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
613 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
614
615 RTCritSectDelete(&pCmdVbva->CltCritSect);
616
617#ifndef VBOXVDBG_MEMCACHE_DISABLE
618 RTMemCacheDestroy(pCmdVbva->CtlCache);
619#endif
620
621 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
622}
623
624static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
625{
626 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
627 AssertRCReturn(rc, rc);
628 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
629 AssertRCReturn(rc, rc);
630 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
631 AssertRCReturn(rc, rc);
632
633 return VINF_SUCCESS;
634}
635
636static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
637{
638 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
639 {
640 WARN(("vbva not paused\n"));
641 return VERR_INVALID_STATE;
642 }
643
644 VBVAEXHOSTCTL* pCtl;
645 int rc;
646 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
647 {
648 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
649 AssertRCReturn(rc, rc);
650 }
651
652 rc = SSMR3PutU32(pSSM, 0);
653 AssertRCReturn(rc, rc);
654
655 return VINF_SUCCESS;
656}
657/* Saves state
658 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
659 */
660static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
661{
662 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
663 if (RT_FAILURE(rc))
664 {
665 WARN(("RTCritSectEnter failed %d\n", rc));
666 return rc;
667 }
668
669 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
670 if (RT_FAILURE(rc))
671 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
672
673 RTCritSectLeave(&pCmdVbva->CltCritSect);
674
675 return rc;
676}
677
678static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
679{
680 uint32_t u32;
681 int rc = SSMR3GetU32(pSSM, &u32);
682 AssertRCReturn(rc, rc);
683
684 if (!u32)
685 return VINF_EOF;
686
687 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
688 if (!pHCtl)
689 {
690 WARN(("VBoxVBVAExHCtlCreate failed\n"));
691 return VERR_NO_MEMORY;
692 }
693
694 rc = SSMR3GetU32(pSSM, &u32);
695 AssertRCReturn(rc, rc);
696 pHCtl->u.cmd.cbCmd = u32;
697
698 rc = SSMR3GetU32(pSSM, &u32);
699 AssertRCReturn(rc, rc);
700 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
701
702 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
703 ++pCmdVbva->u32cCtls;
704
705 return VINF_SUCCESS;
706}
707
708
709static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
710{
711 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
712 {
713 WARN(("vbva not stopped\n"));
714 return VERR_INVALID_STATE;
715 }
716
717 int rc;
718
719 do {
720 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
721 AssertRCReturn(rc, rc);
722 } while (VINF_EOF != rc);
723
724 return VINF_SUCCESS;
725}
726
727/* Loads state
728 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
729 */
730static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
731{
732 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
733 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
734 if (RT_FAILURE(rc))
735 {
736 WARN(("RTCritSectEnter failed %d\n", rc));
737 return rc;
738 }
739
740 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
741 if (RT_FAILURE(rc))
742 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
743
744 RTCritSectLeave(&pCmdVbva->CltCritSect);
745
746 return rc;
747}
748
749typedef enum
750{
751 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
752 VBVAEXHOSTCTL_SOURCE_HOST
753} VBVAEXHOSTCTL_SOURCE;
754
755
756static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
757{
758 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
759 {
760 Log(("cmd vbva not enabled\n"));
761 return VERR_INVALID_STATE;
762 }
763
764 pCtl->pfnComplete = pfnComplete;
765 pCtl->pvComplete = pvComplete;
766
767 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
768 if (RT_SUCCESS(rc))
769 {
770 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
771 {
772 Log(("cmd vbva not enabled\n"));
773 RTCritSectLeave(&pCmdVbva->CltCritSect);
774 return VERR_INVALID_STATE;
775 }
776
777 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
778 {
779 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
780 }
781 else
782 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
783
784 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
785
786 RTCritSectLeave(&pCmdVbva->CltCritSect);
787
788 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
789 }
790 else
791 WARN(("RTCritSectEnter failed %d\n", rc));
792
793 return rc;
794}
795
796#ifdef VBOX_WITH_CRHGSMI
797typedef struct VBOXVDMA_SOURCE
798{
799 VBVAINFOSCREEN Screen;
800 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
801} VBOXVDMA_SOURCE;
802#endif
803
804typedef struct VBOXVDMAHOST
805{
806 PHGSMIINSTANCE pHgsmi;
807 PVGASTATE pVGAState;
808#ifdef VBOX_WITH_CRHGSMI
809 VBVAEXHOSTCONTEXT CmdVbva;
810 VBOXVDMATHREAD Thread;
811 VBOXCRCMD_SVRINFO CrSrvInfo;
812 VBVAEXHOSTCTL* pCurRemainingHostCtl;
813 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
814 int32_t volatile i32cHostCrCtlCompleted;
815 RTCRITSECT CalloutCritSect;
816// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
817#endif
818#ifdef VBOX_VDMA_WITH_WATCHDOG
819 PTMTIMERR3 WatchDogTimer;
820#endif
821} VBOXVDMAHOST, *PVBOXVDMAHOST;
822
823#ifdef VBOX_WITH_CRHGSMI
824
825void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
826{
827 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
828 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
829 void *pvChanged = pThread->pvChanged;
830
831 pThread->pfnChanged = NULL;
832 pThread->pvChanged = NULL;
833
834 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
835
836 if (pfnChanged)
837 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
838}
839
840void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
841{
842 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
843 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
844 void *pvChanged = pThread->pvChanged;
845
846 pThread->pfnChanged = NULL;
847 pThread->pvChanged = NULL;
848
849 if (pfnChanged)
850 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
851}
852
853DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
854{
855 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
856}
857
858void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
859{
860 memset(pThread, 0, sizeof (*pThread));
861 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
862}
863
864int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
865{
866 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
867 switch (u32State)
868 {
869 case VBOXVDMATHREAD_STATE_TERMINATED:
870 return VINF_SUCCESS;
871 case VBOXVDMATHREAD_STATE_TERMINATING:
872 {
873 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
874 if (!RT_SUCCESS(rc))
875 {
876 WARN(("RTThreadWait failed %d\n", rc));
877 return rc;
878 }
879
880 RTSemEventDestroy(pThread->hEvent);
881
882 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
883 return VINF_SUCCESS;
884 }
885 default:
886 WARN(("invalid state"));
887 return VERR_INVALID_STATE;
888 }
889}
890
891int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
892{
893 int rc = VBoxVDMAThreadCleanup(pThread);
894 if (RT_FAILURE(rc))
895 {
896 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
897 return rc;
898 }
899
900 rc = RTSemEventCreate(&pThread->hEvent);
901 if (RT_SUCCESS(rc))
902 {
903 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
904 pThread->pfnChanged = pfnCreated;
905 pThread->pvChanged = pvCreated;
906 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
907 if (RT_SUCCESS(rc))
908 return VINF_SUCCESS;
909 else
910 WARN(("RTThreadCreate failed %d\n", rc));
911
912 RTSemEventDestroy(pThread->hEvent);
913 }
914 else
915 WARN(("RTSemEventCreate failed %d\n", rc));
916
917 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
918
919 return rc;
920}
921
922DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
923{
924 int rc = RTSemEventSignal(pThread->hEvent);
925 AssertRC(rc);
926 return rc;
927}
928
929DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
930{
931 int rc = RTSemEventWait(pThread->hEvent, cMillies);
932 AssertRC(rc);
933 return rc;
934}
935
936int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
937{
938 int rc;
939 do
940 {
941 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
942 switch (u32State)
943 {
944 case VBOXVDMATHREAD_STATE_CREATED:
945 pThread->pfnChanged = pfnTerminated;
946 pThread->pvChanged = pvTerminated;
947 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
948 if (fNotify)
949 {
950 rc = VBoxVDMAThreadEventNotify(pThread);
951 AssertRC(rc);
952 }
953 return VINF_SUCCESS;
954 case VBOXVDMATHREAD_STATE_TERMINATING:
955 case VBOXVDMATHREAD_STATE_TERMINATED:
956 {
957 WARN(("thread is marked to termination or terminated\nn"));
958 return VERR_INVALID_STATE;
959 }
960 case VBOXVDMATHREAD_STATE_CREATING:
961 {
962 /* wait till the thread creation is completed */
963 WARN(("concurrent thread create/destron\n"));
964 RTThreadYield();
965 continue;
966 }
967 default:
968 WARN(("invalid state"));
969 return VERR_INVALID_STATE;
970 }
971 } while (1);
972
973 WARN(("should never be here\n"));
974 return VERR_INTERNAL_ERROR;
975}
976
977static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
978
979typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
980typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
981
982typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
983{
984 uint32_t cRefs;
985 int32_t rc;
986 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
987 void *pvCompletion;
988 VBOXVDMACMD_CHROMIUM_CTL Cmd;
989} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
990
991#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
992
993static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
994{
995 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
996 Assert(pHdr);
997 if (pHdr)
998 {
999 pHdr->cRefs = 1;
1000 pHdr->rc = VERR_NOT_IMPLEMENTED;
1001 pHdr->Cmd.enmType = enmCmd;
1002 pHdr->Cmd.cbCmd = cbCmd;
1003 return &pHdr->Cmd;
1004 }
1005
1006 return NULL;
1007}
1008
1009DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1010{
1011 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1012 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1013 if(!cRefs)
1014 {
1015 RTMemFree(pHdr);
1016 }
1017}
1018
1019DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1020{
1021 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1022 ASMAtomicIncU32(&pHdr->cRefs);
1023}
1024
1025DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1026{
1027 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1028 return pHdr->rc;
1029}
1030
1031static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1032{
1033 RTSemEventSignal((RTSEMEVENT)pvContext);
1034}
1035
1036static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1037{
1038 vboxVDMACrCtlRelease(pCmd);
1039}
1040
1041
1042static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1043{
1044 if ( pVGAState->pDrv
1045 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1046 {
1047 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1048 pHdr->pfnCompletion = pfnCompletion;
1049 pHdr->pvCompletion = pvCompletion;
1050 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1051 return VINF_SUCCESS;
1052 }
1053#ifdef DEBUG_misha
1054 Assert(0);
1055#endif
1056 return VERR_NOT_SUPPORTED;
1057}
1058
1059static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1060{
1061 RTSEMEVENT hComplEvent;
1062 int rc = RTSemEventCreate(&hComplEvent);
1063 AssertRC(rc);
1064 if(RT_SUCCESS(rc))
1065 {
1066 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1067#ifdef DEBUG_misha
1068 AssertRC(rc);
1069#endif
1070 if (RT_SUCCESS(rc))
1071 {
1072 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1073 AssertRC(rc);
1074 if(RT_SUCCESS(rc))
1075 {
1076 RTSemEventDestroy(hComplEvent);
1077 }
1078 }
1079 else
1080 {
1081 /* the command is completed */
1082 RTSemEventDestroy(hComplEvent);
1083 }
1084 }
1085 return rc;
1086}
1087
1088typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1089{
1090 int rc;
1091 RTSEMEVENT hEvent;
1092} VDMA_VBVA_CTL_CYNC_COMPLETION;
1093
1094static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1095{
1096 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1097 pData->rc = rc;
1098 rc = RTSemEventSignal(pData->hEvent);
1099 if (!RT_SUCCESS(rc))
1100 WARN(("RTSemEventSignal failed %d\n", rc));
1101}
1102
1103static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1104{
1105 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1106 Data.rc = VERR_NOT_IMPLEMENTED;
1107 int rc = RTSemEventCreate(&Data.hEvent);
1108 if (!RT_SUCCESS(rc))
1109 {
1110 WARN(("RTSemEventCreate failed %d\n", rc));
1111 return rc;
1112 }
1113
1114 pCtl->CalloutList.List.pNext = NULL;
1115
1116 PVGASTATE pVGAState = pVdma->pVGAState;
1117 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1118 if (RT_SUCCESS(rc))
1119 {
1120 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1121 if (RT_SUCCESS(rc))
1122 {
1123 rc = Data.rc;
1124 if (!RT_SUCCESS(rc))
1125 {
1126 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1127 }
1128
1129 }
1130 else
1131 WARN(("RTSemEventWait failed %d\n", rc));
1132 }
1133 else
1134 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1135
1136
1137 RTSemEventDestroy(Data.hEvent);
1138
1139 return rc;
1140}
1141
1142static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1143{
1144 VBVAEXHOSTCTL HCtl;
1145 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1146 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1147 if (RT_FAILURE(rc))
1148 {
1149 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1150 return rc;
1151 }
1152
1153 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1154
1155 return VINF_SUCCESS;
1156}
1157
1158static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1159{
1160 struct VBOXVDMAHOST *pVdma = hClient;
1161 if (!pVdma->pCurRemainingHostCtl)
1162 {
1163 /* disable VBVA, all subsequent host commands will go HGCM way */
1164 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1165 }
1166 else
1167 {
1168 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1169 }
1170
1171 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1172 if (pVdma->pCurRemainingHostCtl)
1173 {
1174 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1175 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1176 }
1177
1178 *pcbCtl = 0;
1179 return NULL;
1180}
1181
1182static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1183{
1184 struct VBOXVDMAHOST *pVdma = hClient;
1185 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1186 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1187}
1188
1189static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1190{
1191 struct VBOXVDMAHOST *pVdma = hClient;
1192 VBVAEXHOSTCTL HCtl;
1193 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1194 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1195
1196 pHgcmEnableData->hRHCmd = pVdma;
1197 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1198
1199 if (RT_FAILURE(rc))
1200 {
1201 if (rc == VERR_INVALID_STATE)
1202 rc = VINF_SUCCESS;
1203 else
1204 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1205 }
1206
1207 return rc;
1208}
1209
1210static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1211{
1212 VBOXCRCMDCTL_ENABLE Enable;
1213 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1214 Enable.Data.hRHCmd = pVdma;
1215 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1216
1217 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1218 Assert(!pVdma->pCurRemainingHostCtl);
1219 if (RT_SUCCESS(rc))
1220 {
1221 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1222 return VINF_SUCCESS;
1223 }
1224
1225 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1226 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1227
1228 return rc;
1229}
1230
1231static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1232{
1233 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1234 {
1235 WARN(("vdma VBVA is already enabled\n"));
1236 return VERR_INVALID_STATE;
1237 }
1238
1239 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1240 if (!pVBVA)
1241 {
1242 WARN(("invalid offset %d\n", u32Offset));
1243 return VERR_INVALID_PARAMETER;
1244 }
1245
1246 if (!pVdma->CrSrvInfo.pfnEnable)
1247 {
1248#ifdef DEBUG_misha
1249 WARN(("pfnEnable is NULL\n"));
1250 return VERR_NOT_SUPPORTED;
1251#endif
1252 }
1253
1254 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1255 if (RT_SUCCESS(rc))
1256 {
1257 VBOXCRCMDCTL_DISABLE Disable;
1258 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1259 Disable.Data.hNotifyTerm = pVdma;
1260 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1261 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1262 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1263 if (RT_SUCCESS(rc))
1264 {
1265 PVGASTATE pVGAState = pVdma->pVGAState;
1266 VBOXCRCMD_SVRENABLE_INFO Info;
1267 Info.hCltScr = pVGAState->pDrv;
1268 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1269 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1270 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1271 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1272 if (RT_SUCCESS(rc))
1273 return VINF_SUCCESS;
1274 else
1275 WARN(("pfnEnable failed %d\n", rc));
1276
1277 vboxVDMACrHgcmHandleEnable(pVdma);
1278 }
1279 else
1280 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1281
1282 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1283 }
1284 else
1285 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1286
1287 return rc;
1288}
1289
1290static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1291{
1292 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1293 {
1294 Log(("vdma VBVA is already disabled\n"));
1295 return VINF_SUCCESS;
1296 }
1297
1298 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1299 if (RT_SUCCESS(rc))
1300 {
1301 if (fDoHgcmEnable)
1302 {
1303 PVGASTATE pVGAState = pVdma->pVGAState;
1304
1305 /* disable is a bit tricky
1306 * we need to ensure the host ctl commands do not come out of order
1307 * and do not come over HGCM channel until after it is enabled */
1308 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1309 if (RT_SUCCESS(rc))
1310 {
1311 vdmaVBVANotifyDisable(pVGAState);
1312 return VINF_SUCCESS;
1313 }
1314
1315 VBOXCRCMD_SVRENABLE_INFO Info;
1316 Info.hCltScr = pVGAState->pDrv;
1317 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1318 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1319 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1320 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1321 }
1322 }
1323 else
1324 WARN(("pfnDisable failed %d\n", rc));
1325
1326 return rc;
1327}
1328
1329static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1330{
1331 *pfContinue = true;
1332
1333 switch (pCmd->enmType)
1334 {
1335 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1336 {
1337 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1338 {
1339 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1340 return VERR_INVALID_STATE;
1341 }
1342 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1343 }
1344 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1345 {
1346 int rc = vdmaVBVADisableProcess(pVdma, true);
1347 if (RT_FAILURE(rc))
1348 {
1349 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1350 return rc;
1351 }
1352
1353 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1354 }
1355 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1356 {
1357 int rc = vdmaVBVADisableProcess(pVdma, false);
1358 if (RT_FAILURE(rc))
1359 {
1360 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1361 return rc;
1362 }
1363
1364 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1365 if (RT_FAILURE(rc))
1366 {
1367 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1368 return rc;
1369 }
1370
1371 *pfContinue = false;
1372 return VINF_SUCCESS;
1373 }
1374 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1375 {
1376 PVGASTATE pVGAState = pVdma->pVGAState;
1377 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1378 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1379 if (RT_FAILURE(rc))
1380 {
1381 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1382 return rc;
1383 }
1384 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1385 }
1386 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1387 {
1388 PVGASTATE pVGAState = pVdma->pVGAState;
1389 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1390
1391 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1392 if (RT_FAILURE(rc))
1393 {
1394 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1395 return rc;
1396 }
1397
1398 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1399 if (RT_FAILURE(rc))
1400 {
1401 WARN(("pfnLoadState failed %d\n", rc));
1402 return rc;
1403 }
1404
1405 return VINF_SUCCESS;
1406 }
1407 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1408 {
1409 PVGASTATE pVGAState = pVdma->pVGAState;
1410
1411 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1412 {
1413 VBVAINFOSCREEN CurScreen;
1414 VBVAINFOVIEW CurView;
1415
1416 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1417 if (RT_FAILURE(rc))
1418 {
1419 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1420 return rc;
1421 }
1422
1423 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1424 if (RT_FAILURE(rc))
1425 {
1426 WARN(("VBVAInfoScreen failed %d\n", rc));
1427 return rc;
1428 }
1429 }
1430
1431 return VINF_SUCCESS;
1432 }
1433 default:
1434 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1435 return VERR_INVALID_PARAMETER;
1436 }
1437}
1438
1439static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1440{
1441 PVGASTATE pVGAState = pVdma->pVGAState;
1442 VBVAINFOSCREEN Screen = pEntry->Screen;
1443 VBVAINFOVIEW View;
1444 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1445 uint32_t u32ViewIndex = Screen.u32ViewIndex;
1446 uint16_t u16Flags = Screen.u16Flags;
1447 bool fDisable = false;
1448
1449 memcpy(aTargetMap, pEntry->aTargetMap, sizeof (aTargetMap));
1450
1451 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1452
1453 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1454 {
1455 fDisable = true;
1456 memset(&Screen, 0, sizeof (Screen));
1457 Screen.u32ViewIndex = u32ViewIndex;
1458 Screen.u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1459 }
1460
1461 if (u32ViewIndex > pVGAState->cMonitors)
1462 {
1463 if (u32ViewIndex != 0xffffffff)
1464 {
1465 WARN(("invalid view index\n"));
1466 return VERR_INVALID_PARAMETER;
1467 }
1468 else if (!fDisable)
1469 {
1470 WARN(("0xffffffff view index only valid for disable requests\n"));
1471 return VERR_INVALID_PARAMETER;
1472 }
1473 }
1474
1475 View.u32ViewOffset = 0;
1476 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1477 View.u32MaxScreenSize = View.u32ViewSize + Screen.u32Width + 1; /* <- make VBVAInfoScreen logic (offEnd < pView->u32MaxScreenSize) happy */
1478
1479 int rc = VINF_SUCCESS;
1480
1481 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1482 if (RT_FAILURE(rc))
1483 {
1484 WARN(("pfnResize failed %d\n", rc));
1485 return rc;
1486 }
1487
1488 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1489 i >= 0;
1490 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1491 {
1492 Screen.u32ViewIndex = i;
1493
1494 VBVAINFOSCREEN CurScreen;
1495 VBVAINFOVIEW CurView;
1496
1497 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1498 AssertRC(rc);
1499
1500 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1501 continue;
1502
1503 if (!fDisable || !CurView.u32ViewSize)
1504 {
1505 View.u32ViewIndex = Screen.u32ViewIndex;
1506
1507 rc = VBVAInfoView(pVGAState, &View);
1508 if (RT_FAILURE(rc))
1509 {
1510 WARN(("VBVAInfoView failed %d\n", rc));
1511 break;
1512 }
1513 }
1514
1515 rc = VBVAInfoScreen(pVGAState, &Screen);
1516 if (RT_FAILURE(rc))
1517 {
1518 WARN(("VBVAInfoScreen failed %d\n", rc));
1519 break;
1520 }
1521 }
1522
1523 if (RT_FAILURE(rc))
1524 return rc;
1525
1526 Screen.u32ViewIndex = u32ViewIndex;
1527
1528 return rc;
1529}
1530
1531static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1532{
1533 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1534 switch (enmType)
1535 {
1536 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1537 {
1538 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1539 {
1540 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1541 return VERR_INVALID_STATE;
1542 }
1543 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1544 }
1545 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1546 {
1547 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1548 {
1549 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1550 return VERR_INVALID_STATE;
1551 }
1552
1553 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1554
1555 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1556 {
1557 WARN(("invalid buffer size\n"));
1558 return VERR_INVALID_PARAMETER;
1559 }
1560
1561 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1562 if (!cElements)
1563 {
1564 WARN(("invalid buffer size\n"));
1565 return VERR_INVALID_PARAMETER;
1566 }
1567
1568 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1569
1570 int rc = VINF_SUCCESS;
1571
1572 for (uint32_t i = 0; i < cElements; ++i)
1573 {
1574 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1575 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1576 if (RT_FAILURE(rc))
1577 {
1578 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1579 break;
1580 }
1581 }
1582 return rc;
1583 }
1584 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1585 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1586 {
1587 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1588 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1589 uint32_t u32Offset = pEnable->u32Offset;
1590 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1591 if (!RT_SUCCESS(rc))
1592 {
1593 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1594 return rc;
1595 }
1596
1597 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1598 {
1599 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1600 if (!RT_SUCCESS(rc))
1601 {
1602 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1603 return rc;
1604 }
1605 }
1606
1607 return VINF_SUCCESS;
1608 }
1609 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1610 {
1611 int rc = vdmaVBVADisableProcess(pVdma, true);
1612 if (RT_FAILURE(rc))
1613 {
1614 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1615 return rc;
1616 }
1617
1618 /* do vgaUpdateDisplayAll right away */
1619 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1620 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1621
1622 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1623 }
1624 default:
1625 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1626 return VERR_INVALID_PARAMETER;
1627 }
1628}
1629
1630/**
1631 * @param fIn - whether this is a page in or out op.
1632 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1633 */
1634static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1635{
1636 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1637 PGMPAGEMAPLOCK Lock;
1638 int rc;
1639
1640 if (fIn)
1641 {
1642 const void * pvPage;
1643 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1644 if (!RT_SUCCESS(rc))
1645 {
1646 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1647 return rc;
1648 }
1649
1650 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1651
1652 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1653 }
1654 else
1655 {
1656 void * pvPage;
1657 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1658 if (!RT_SUCCESS(rc))
1659 {
1660 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1661 return rc;
1662 }
1663
1664 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1665
1666 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1667 }
1668
1669 return VINF_SUCCESS;
1670}
1671
1672static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1673{
1674 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1675 {
1676 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1677 if (!RT_SUCCESS(rc))
1678 {
1679 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1680 return rc;
1681 }
1682 }
1683
1684 return VINF_SUCCESS;
1685}
1686
1687static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1688 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1689 uint8_t **ppu8Vram, bool *pfIn)
1690{
1691 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1692 {
1693 WARN(("cmd too small"));
1694 return -1;
1695 }
1696
1697 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1698 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1699 {
1700 WARN(("invalid cmd size"));
1701 return -1;
1702 }
1703 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1704
1705 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1706 if (offVRAM & PAGE_OFFSET_MASK)
1707 {
1708 WARN(("offVRAM address is not on page boundary\n"));
1709 return -1;
1710 }
1711 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1712
1713 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1714 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1715 if (offVRAM >= pVGAState->vram_size)
1716 {
1717 WARN(("invalid vram offset"));
1718 return -1;
1719 }
1720
1721 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1722 {
1723 WARN(("invalid cPages %d", cPages));
1724 return -1;
1725 }
1726
1727 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1728 {
1729 WARN(("invalid cPages %d, exceeding vram size", cPages));
1730 return -1;
1731 }
1732
1733 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1734 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1735
1736 *ppPages = pPages;
1737 *pcPages = cPages;
1738 *ppu8Vram = pu8Vram;
1739 *pfIn = fIn;
1740 return 0;
1741}
1742
1743static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1744{
1745 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1746 if (offVRAM & PAGE_OFFSET_MASK)
1747 {
1748 WARN(("offVRAM address is not on page boundary\n"));
1749 return -1;
1750 }
1751
1752 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1753 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1754 if (offVRAM >= pVGAState->vram_size)
1755 {
1756 WARN(("invalid vram offset"));
1757 return -1;
1758 }
1759
1760 uint32_t cbFill = pFill->u32CbFill;
1761
1762 if (offVRAM + cbFill >= pVGAState->vram_size)
1763 {
1764 WARN(("invalid cPages"));
1765 return -1;
1766 }
1767
1768 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1769 uint32_t u32Color = pFill->u32Pattern;
1770
1771 Assert(!(cbFill % 4));
1772 for (uint32_t i = 0; i < cbFill / 4; ++i)
1773 {
1774 pu32Vram[i] = u32Color;
1775 }
1776
1777 return 0;
1778}
1779
1780static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1781{
1782 switch (pCmd->u8OpCode)
1783 {
1784 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1785 return 0;
1786 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1787 {
1788 PVGASTATE pVGAState = pVdma->pVGAState;
1789 const VBOXCMDVBVAPAGEIDX *pPages;
1790 uint32_t cPages;
1791 uint8_t *pu8Vram;
1792 bool fIn;
1793 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1794 &pPages, &cPages,
1795 &pu8Vram, &fIn);
1796 if (i8Result < 0)
1797 {
1798 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1799 return i8Result;
1800 }
1801
1802 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1803 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1804 if (!RT_SUCCESS(rc))
1805 {
1806 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1807 return -1;
1808 }
1809
1810 return 0;
1811 }
1812 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1813 {
1814 PVGASTATE pVGAState = pVdma->pVGAState;
1815 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1816 {
1817 WARN(("cmd too small"));
1818 return -1;
1819 }
1820
1821 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1822 }
1823 default:
1824 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1825 }
1826}
1827
1828#if 0
1829typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1830{
1831 VBOXCMDVBVA_HDR Hdr;
1832 /* for now can only contain offVRAM.
1833 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1834 VBOXCMDVBVA_ALLOCINFO Alloc;
1835 uint32_t u32Reserved;
1836 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1837} VBOXCMDVBVA_PAGING_TRANSFER;
1838#endif
1839
1840AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1841AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1842AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1843AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1844
1845#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1846
1847static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1848{
1849 switch (pCmd->u8OpCode)
1850 {
1851 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1852 {
1853 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1854 {
1855 WARN(("invalid command size"));
1856 return -1;
1857 }
1858 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1859 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1860 uint32_t cbRealCmd = pCmd->u8Flags;
1861 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1862 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1863 {
1864 WARN(("invalid sysmem cmd size"));
1865 return -1;
1866 }
1867
1868 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1869
1870 PGMPAGEMAPLOCK Lock;
1871 PVGASTATE pVGAState = pVdma->pVGAState;
1872 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1873 const void * pvCmd;
1874 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1875 if (!RT_SUCCESS(rc))
1876 {
1877 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1878 return -1;
1879 }
1880
1881 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1882
1883 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1884
1885 if (cbRealCmd <= cbCmdPart)
1886 {
1887 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1888 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1889 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1890 return i8Result;
1891 }
1892
1893 VBOXCMDVBVA_HDR Hdr;
1894 const void *pvCurCmdTail;
1895 uint32_t cbCurCmdTail;
1896 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1897 {
1898 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1899 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1900 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1901 }
1902 else
1903 {
1904 memcpy(&Hdr, pvCmd, cbCmdPart);
1905 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1906 phCmd += cbCmdPart;
1907 Assert(!(phCmd & PAGE_OFFSET_MASK));
1908 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1909 if (!RT_SUCCESS(rc))
1910 {
1911 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1912 return -1;
1913 }
1914
1915 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1916 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1917 pRealCmdHdr = &Hdr;
1918 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1919 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1920 }
1921
1922 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1923 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1924
1925 int8_t i8Result = 0;
1926
1927 switch (pRealCmdHdr->u8OpCode)
1928 {
1929 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1930 {
1931 const uint32_t *pPages;
1932 uint32_t cPages;
1933 uint8_t *pu8Vram;
1934 bool fIn;
1935 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1936 &pPages, &cPages,
1937 &pu8Vram, &fIn);
1938 if (i8Result < 0)
1939 {
1940 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1941 /* we need to break, not return, to ensure currently locked page is released */
1942 break;
1943 }
1944
1945 if (cbCurCmdTail & 3)
1946 {
1947 WARN(("command is not alligned properly %d", cbCurCmdTail));
1948 i8Result = -1;
1949 /* we need to break, not return, to ensure currently locked page is released */
1950 break;
1951 }
1952
1953 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1954 Assert(cCurPages < cPages);
1955
1956 do
1957 {
1958 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1959 if (!RT_SUCCESS(rc))
1960 {
1961 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1962 i8Result = -1;
1963 /* we need to break, not return, to ensure currently locked page is released */
1964 break;
1965 }
1966
1967 Assert(cPages >= cCurPages);
1968 cPages -= cCurPages;
1969
1970 if (!cPages)
1971 break;
1972
1973 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1974
1975 Assert(!(phCmd & PAGE_OFFSET_MASK));
1976
1977 phCmd += PAGE_SIZE;
1978 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
1979
1980 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1981 if (!RT_SUCCESS(rc))
1982 {
1983 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1984 /* the page is not locked, return */
1985 return -1;
1986 }
1987
1988 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1989 if (cCurPages > cPages)
1990 cCurPages = cPages;
1991 } while (1);
1992 break;
1993 }
1994 default:
1995 WARN(("command can not be splitted"));
1996 i8Result = -1;
1997 break;
1998 }
1999
2000 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2001 return i8Result;
2002 }
2003 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2004 {
2005 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2006 ++pCmd;
2007 cbCmd -= sizeof (*pCmd);
2008 uint32_t cbCurCmd = 0;
2009 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2010 {
2011 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2012 {
2013 WARN(("invalid command size"));
2014 return -1;
2015 }
2016
2017 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2018 if (cbCmd < cbCurCmd)
2019 {
2020 WARN(("invalid command size"));
2021 return -1;
2022 }
2023
2024 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2025 if (i8Result < 0)
2026 {
2027 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2028 return i8Result;
2029 }
2030 }
2031 return 0;
2032 }
2033 default:
2034 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2035 }
2036}
2037
2038static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2039{
2040 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2041 return;
2042
2043 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2044 {
2045 WARN(("invalid command size"));
2046 return;
2047 }
2048
2049 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2050
2051 /* check if the command is cancelled */
2052 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2053 {
2054 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2055 return;
2056 }
2057
2058 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2059}
2060
2061static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2062{
2063 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2064 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2065 int rc = VERR_NO_MEMORY;
2066 if (pCmd)
2067 {
2068 PVGASTATE pVGAState = pVdma->pVGAState;
2069 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2070 pCmd->cbVRam = pVGAState->vram_size;
2071 pCmd->pLed = &pVGAState->Led3D;
2072 pCmd->CrClientInfo.hClient = pVdma;
2073 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2074 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2075 if (RT_SUCCESS(rc))
2076 {
2077 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2078 if (RT_SUCCESS(rc))
2079 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2080 else if (rc != VERR_NOT_SUPPORTED)
2081 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2082 }
2083 else
2084 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2085
2086 vboxVDMACrCtlRelease(&pCmd->Hdr);
2087 }
2088
2089 if (!RT_SUCCESS(rc))
2090 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2091
2092 return rc;
2093}
2094
2095static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2096
2097/* check if this is external cmd to be passed to chromium backend */
2098static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2099{
2100 PVBOXVDMACMD pDmaCmd = NULL;
2101 uint32_t cbDmaCmd = 0;
2102 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2103 int rc = VINF_NOT_SUPPORTED;
2104
2105 cbDmaCmd = pCmdDr->cbBuf;
2106
2107 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2108 {
2109 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2110 {
2111 AssertMsgFailed(("invalid buffer data!"));
2112 return VERR_INVALID_PARAMETER;
2113 }
2114
2115 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2116 {
2117 AssertMsgFailed(("invalid command buffer data!"));
2118 return VERR_INVALID_PARAMETER;
2119 }
2120
2121 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2122 }
2123 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2124 {
2125 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2126 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2127 {
2128 AssertMsgFailed(("invalid command buffer data from offset!"));
2129 return VERR_INVALID_PARAMETER;
2130 }
2131 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2132 }
2133
2134 if (pDmaCmd)
2135 {
2136 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2137 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2138
2139 switch (pDmaCmd->enmType)
2140 {
2141 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2142 {
2143 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2144 if (cbBody < sizeof (*pCrCmd))
2145 {
2146 AssertMsgFailed(("invalid chromium command buffer size!"));
2147 return VERR_INVALID_PARAMETER;
2148 }
2149 PVGASTATE pVGAState = pVdma->pVGAState;
2150 rc = VINF_SUCCESS;
2151 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2152 {
2153 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2154 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2155 break;
2156 }
2157 else
2158 {
2159 Assert(0);
2160 }
2161
2162 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2163 AssertRC(tmpRc);
2164 break;
2165 }
2166 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2167 {
2168 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2169 if (cbBody < sizeof (*pTransfer))
2170 {
2171 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2172 return VERR_INVALID_PARAMETER;
2173 }
2174
2175 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2176 AssertRC(rc);
2177 if (RT_SUCCESS(rc))
2178 {
2179 pCmdDr->rc = VINF_SUCCESS;
2180 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2181 AssertRC(rc);
2182 rc = VINF_SUCCESS;
2183 }
2184 break;
2185 }
2186 default:
2187 break;
2188 }
2189 }
2190 return rc;
2191}
2192
2193int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2194{
2195 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2196 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2197 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2198 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2199 AssertRC(rc);
2200 pDr->rc = rc;
2201
2202 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2203 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2204 AssertRC(rc);
2205 return rc;
2206}
2207
2208int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2209{
2210 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2211 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2212 pCmdPrivate->rc = rc;
2213 if (pCmdPrivate->pfnCompletion)
2214 {
2215 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2216 }
2217 return VINF_SUCCESS;
2218}
2219
2220static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2221 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2222 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2223 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2224{
2225 /* we do not support color conversion */
2226 Assert(pDstDesc->format == pSrcDesc->format);
2227 /* we do not support stretching */
2228 Assert(pDstRectl->height == pSrcRectl->height);
2229 Assert(pDstRectl->width == pSrcRectl->width);
2230 if (pDstDesc->format != pSrcDesc->format)
2231 return VERR_INVALID_FUNCTION;
2232 if (pDstDesc->width == pDstRectl->width
2233 && pSrcDesc->width == pSrcRectl->width
2234 && pSrcDesc->width == pDstDesc->width)
2235 {
2236 Assert(!pDstRectl->left);
2237 Assert(!pSrcRectl->left);
2238 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2239 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2240 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2241 }
2242 else
2243 {
2244 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2245 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2246 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2247 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2248 Assert(cbDstLine <= pDstDesc->pitch);
2249 uint32_t cbDstSkip = pDstDesc->pitch;
2250 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2251
2252 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2253 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2254 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2255 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2256 Assert(cbSrcLine <= pSrcDesc->pitch);
2257 uint32_t cbSrcSkip = pSrcDesc->pitch;
2258 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2259
2260 Assert(cbDstLine == cbSrcLine);
2261
2262 for (uint32_t i = 0; ; ++i)
2263 {
2264 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2265 if (i == pDstRectl->height)
2266 break;
2267 pvDstStart += cbDstSkip;
2268 pvSrcStart += cbSrcSkip;
2269 }
2270 }
2271 return VINF_SUCCESS;
2272}
2273
2274static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2275{
2276 if (!pRectl1->width)
2277 *pRectl1 = *pRectl2;
2278 else
2279 {
2280 int16_t x21 = pRectl1->left + pRectl1->width;
2281 int16_t x22 = pRectl2->left + pRectl2->width;
2282 if (pRectl1->left > pRectl2->left)
2283 {
2284 pRectl1->left = pRectl2->left;
2285 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2286 }
2287 else if (x21 < x22)
2288 pRectl1->width = x22 - pRectl1->left;
2289
2290 x21 = pRectl1->top + pRectl1->height;
2291 x22 = pRectl2->top + pRectl2->height;
2292 if (pRectl1->top > pRectl2->top)
2293 {
2294 pRectl1->top = pRectl2->top;
2295 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2296 }
2297 else if (x21 < x22)
2298 pRectl1->height = x22 - pRectl1->top;
2299 }
2300}
2301
2302/*
2303 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2304 */
2305static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2306{
2307 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2308 Assert(cbBlt <= cbBuffer);
2309 if (cbBuffer < cbBlt)
2310 return VERR_INVALID_FUNCTION;
2311
2312 /* we do not support stretching for now */
2313 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2314 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2315 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2316 return VERR_INVALID_FUNCTION;
2317 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2318 return VERR_INVALID_FUNCTION;
2319 Assert(pBlt->cDstSubRects);
2320
2321 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2322 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2323
2324 if (pBlt->cDstSubRects)
2325 {
2326 VBOXVDMA_RECTL dstRectl, srcRectl;
2327 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2328 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2329 {
2330 pDstRectl = &pBlt->aDstSubRects[i];
2331 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2332 {
2333 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2334 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2335 dstRectl.width = pDstRectl->width;
2336 dstRectl.height = pDstRectl->height;
2337 pDstRectl = &dstRectl;
2338 }
2339
2340 pSrcRectl = &pBlt->aDstSubRects[i];
2341 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2342 {
2343 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2344 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2345 srcRectl.width = pSrcRectl->width;
2346 srcRectl.height = pSrcRectl->height;
2347 pSrcRectl = &srcRectl;
2348 }
2349
2350 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2351 &pBlt->dstDesc, &pBlt->srcDesc,
2352 pDstRectl,
2353 pSrcRectl);
2354 AssertRC(rc);
2355 if (!RT_SUCCESS(rc))
2356 return rc;
2357
2358 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2359 }
2360 }
2361 else
2362 {
2363 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2364 &pBlt->dstDesc, &pBlt->srcDesc,
2365 &pBlt->dstRectl,
2366 &pBlt->srcRectl);
2367 AssertRC(rc);
2368 if (!RT_SUCCESS(rc))
2369 return rc;
2370
2371 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2372 }
2373
2374 return cbBlt;
2375}
2376
2377static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2378{
2379 if (cbBuffer < sizeof (*pTransfer))
2380 return VERR_INVALID_PARAMETER;
2381
2382 PVGASTATE pVGAState = pVdma->pVGAState;
2383 uint8_t * pvRam = pVGAState->vram_ptrR3;
2384 PGMPAGEMAPLOCK SrcLock;
2385 PGMPAGEMAPLOCK DstLock;
2386 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2387 const void * pvSrc;
2388 void * pvDst;
2389 int rc = VINF_SUCCESS;
2390 uint32_t cbTransfer = pTransfer->cbTransferSize;
2391 uint32_t cbTransfered = 0;
2392 bool bSrcLocked = false;
2393 bool bDstLocked = false;
2394 do
2395 {
2396 uint32_t cbSubTransfer = cbTransfer;
2397 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2398 {
2399 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2400 }
2401 else
2402 {
2403 RTGCPHYS phPage = pTransfer->Src.phBuf;
2404 phPage += cbTransfered;
2405 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2406 AssertRC(rc);
2407 if (RT_SUCCESS(rc))
2408 {
2409 bSrcLocked = true;
2410 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2411 }
2412 else
2413 {
2414 break;
2415 }
2416 }
2417
2418 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2419 {
2420 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2421 }
2422 else
2423 {
2424 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2425 phPage += cbTransfered;
2426 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2427 AssertRC(rc);
2428 if (RT_SUCCESS(rc))
2429 {
2430 bDstLocked = true;
2431 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2432 }
2433 else
2434 {
2435 break;
2436 }
2437 }
2438
2439 if (RT_SUCCESS(rc))
2440 {
2441 memcpy(pvDst, pvSrc, cbSubTransfer);
2442 cbTransfer -= cbSubTransfer;
2443 cbTransfered += cbSubTransfer;
2444 }
2445 else
2446 {
2447 cbTransfer = 0; /* to break */
2448 }
2449
2450 if (bSrcLocked)
2451 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2452 if (bDstLocked)
2453 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2454 } while (cbTransfer);
2455
2456 if (RT_SUCCESS(rc))
2457 return sizeof (*pTransfer);
2458 return rc;
2459}
2460
2461static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2462{
2463 do
2464 {
2465 Assert(pvBuffer);
2466 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2467
2468 if (!pvBuffer)
2469 return VERR_INVALID_PARAMETER;
2470 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2471 return VERR_INVALID_PARAMETER;
2472
2473 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2474 uint32_t cbCmd = 0;
2475 switch (pCmd->enmType)
2476 {
2477 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2478 {
2479#ifdef VBOXWDDM_TEST_UHGSMI
2480 static int count = 0;
2481 static uint64_t start, end;
2482 if (count==0)
2483 {
2484 start = RTTimeNanoTS();
2485 }
2486 ++count;
2487 if (count==100000)
2488 {
2489 end = RTTimeNanoTS();
2490 float ems = (end-start)/1000000.f;
2491 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2492 }
2493#endif
2494 /* todo: post the buffer to chromium */
2495 return VINF_SUCCESS;
2496 }
2497 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2498 {
2499 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2500 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2501 Assert(cbBlt >= 0);
2502 Assert((uint32_t)cbBlt <= cbBuffer);
2503 if (cbBlt >= 0)
2504 {
2505 if ((uint32_t)cbBlt == cbBuffer)
2506 return VINF_SUCCESS;
2507 else
2508 {
2509 cbBuffer -= (uint32_t)cbBlt;
2510 pvBuffer -= cbBlt;
2511 }
2512 }
2513 else
2514 return cbBlt; /* error */
2515 break;
2516 }
2517 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2518 {
2519 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2520 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2521 Assert(cbTransfer >= 0);
2522 Assert((uint32_t)cbTransfer <= cbBuffer);
2523 if (cbTransfer >= 0)
2524 {
2525 if ((uint32_t)cbTransfer == cbBuffer)
2526 return VINF_SUCCESS;
2527 else
2528 {
2529 cbBuffer -= (uint32_t)cbTransfer;
2530 pvBuffer -= cbTransfer;
2531 }
2532 }
2533 else
2534 return cbTransfer; /* error */
2535 break;
2536 }
2537 case VBOXVDMACMD_TYPE_DMA_NOP:
2538 return VINF_SUCCESS;
2539 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2540 return VINF_SUCCESS;
2541 default:
2542 AssertBreakpoint();
2543 return VERR_INVALID_FUNCTION;
2544 }
2545 } while (1);
2546
2547 /* we should not be here */
2548 AssertBreakpoint();
2549 return VERR_INVALID_STATE;
2550}
2551
2552static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2553{
2554 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2555 PVGASTATE pVGAState = pVdma->pVGAState;
2556 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2557 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2558 uint8_t *pCmd;
2559 uint32_t cbCmd;
2560 int rc;
2561
2562 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2563
2564 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2565 {
2566 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2567 switch (enmType)
2568 {
2569 case VBVAEXHOST_DATA_TYPE_CMD:
2570 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2571 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2572 VBVARaiseIrqNoWait(pVGAState, 0);
2573 break;
2574 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2575 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2576 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2577 break;
2578 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2579 {
2580 bool fContinue = true;
2581 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2582 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2583 if (fContinue)
2584 break;
2585 }
2586 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2587 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2588 AssertRC(rc);
2589 break;
2590 default:
2591 WARN(("unexpected type %d\n", enmType));
2592 break;
2593 }
2594 }
2595
2596 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2597
2598 return VINF_SUCCESS;
2599}
2600
2601static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2602{
2603 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2604 const uint8_t * pvBuf;
2605 PGMPAGEMAPLOCK Lock;
2606 int rc;
2607 bool bReleaseLocked = false;
2608
2609 do
2610 {
2611 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2612
2613 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2614 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2615 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2616 {
2617 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2618 pvBuf = pvRam + pCmd->Location.offVramBuf;
2619 }
2620 else
2621 {
2622 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2623 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2624 Assert(offset + pCmd->cbBuf <= 0x1000);
2625 if (offset + pCmd->cbBuf > 0x1000)
2626 {
2627 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2628 rc = VERR_INVALID_PARAMETER;
2629 break;
2630 }
2631
2632 const void * pvPageBuf;
2633 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2634 AssertRC(rc);
2635 if (!RT_SUCCESS(rc))
2636 {
2637 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2638 break;
2639 }
2640
2641 pvBuf = (const uint8_t *)pvPageBuf;
2642 pvBuf += offset;
2643
2644 bReleaseLocked = true;
2645 }
2646
2647 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2648 AssertRC(rc);
2649
2650 if (bReleaseLocked)
2651 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2652 } while (0);
2653
2654 pCmd->rc = rc;
2655
2656 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2657 AssertRC(rc);
2658}
2659
2660static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2661{
2662 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2663 pCmd->i32Result = VINF_SUCCESS;
2664 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2665 AssertRC(rc);
2666}
2667
2668#endif /* #ifdef VBOX_WITH_CRHGSMI */
2669
2670#ifdef VBOX_VDMA_WITH_WATCHDOG
2671static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2672{
2673 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2674 PVGASTATE pVGAState = pVdma->pVGAState;
2675 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2676}
2677
2678static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2679{
2680 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2681 if (cMillis)
2682 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2683 else
2684 TMTimerStop(pVdma->WatchDogTimer);
2685 return VINF_SUCCESS;
2686}
2687#endif
2688
2689int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2690{
2691 int rc;
2692 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2693 Assert(pVdma);
2694 if (pVdma)
2695 {
2696 pVdma->pHgsmi = pVGAState->pHGSMI;
2697 pVdma->pVGAState = pVGAState;
2698
2699#ifdef VBOX_VDMA_WITH_WATCHDOG
2700 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2701 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2702 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2703 AssertRC(rc);
2704#endif
2705
2706#ifdef VBOX_WITH_CRHGSMI
2707 VBoxVDMAThreadInit(&pVdma->Thread);
2708
2709 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2710 if (RT_SUCCESS(rc))
2711 {
2712 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2713 if (RT_SUCCESS(rc))
2714 {
2715 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2716 if (RT_SUCCESS(rc))
2717 {
2718 pVGAState->pVdma = pVdma;
2719 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2720 return VINF_SUCCESS;
2721
2722 RTCritSectDelete(&pVdma->CalloutCritSect);
2723 }
2724 else
2725 WARN(("RTCritSectInit failed %d\n", rc));
2726
2727 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2728 }
2729 else
2730 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2731
2732 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2733 }
2734 else
2735 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2736
2737
2738 RTMemFree(pVdma);
2739#else
2740 pVGAState->pVdma = pVdma;
2741 return VINF_SUCCESS;
2742#endif
2743 }
2744 else
2745 rc = VERR_OUT_OF_RESOURCES;
2746
2747 return rc;
2748}
2749
2750int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2751{
2752#ifdef VBOX_WITH_CRHGSMI
2753 vdmaVBVACtlDisableSync(pVdma);
2754#endif
2755 return VINF_SUCCESS;
2756}
2757
2758int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2759{
2760 if (!pVdma)
2761 return VINF_SUCCESS;
2762#ifdef VBOX_WITH_CRHGSMI
2763 vdmaVBVACtlDisableSync(pVdma);
2764 VBoxVDMAThreadCleanup(&pVdma->Thread);
2765 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2766 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2767 RTCritSectDelete(&pVdma->CalloutCritSect);
2768#endif
2769 RTMemFree(pVdma);
2770 return VINF_SUCCESS;
2771}
2772
2773void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2774{
2775 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2776
2777 switch (pCmd->enmCtl)
2778 {
2779 case VBOXVDMA_CTL_TYPE_ENABLE:
2780 pCmd->i32Result = VINF_SUCCESS;
2781 break;
2782 case VBOXVDMA_CTL_TYPE_DISABLE:
2783 pCmd->i32Result = VINF_SUCCESS;
2784 break;
2785 case VBOXVDMA_CTL_TYPE_FLUSH:
2786 pCmd->i32Result = VINF_SUCCESS;
2787 break;
2788#ifdef VBOX_VDMA_WITH_WATCHDOG
2789 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2790 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2791 break;
2792#endif
2793 default:
2794 WARN(("cmd not supported"));
2795 pCmd->i32Result = VERR_NOT_SUPPORTED;
2796 }
2797
2798 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2799 AssertRC(rc);
2800}
2801
2802void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2803{
2804 int rc = VERR_NOT_IMPLEMENTED;
2805
2806#ifdef VBOX_WITH_CRHGSMI
2807 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2808 * this is why we process them specially */
2809 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2810 if (rc == VINF_SUCCESS)
2811 return;
2812
2813 if (RT_FAILURE(rc))
2814 {
2815 pCmd->rc = rc;
2816 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2817 AssertRC(rc);
2818 return;
2819 }
2820
2821 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2822#else
2823 pCmd->rc = rc;
2824 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2825 AssertRC(rc);
2826#endif
2827}
2828
2829/**/
2830#ifdef VBOX_WITH_CRHGSMI
2831
2832static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2833
2834static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2835{
2836 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2837 if (RT_SUCCESS(rc))
2838 {
2839 if (rc == VINF_SUCCESS)
2840 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2841 else
2842 Assert(rc == VINF_ALREADY_INITIALIZED);
2843 }
2844 else
2845 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2846
2847 return rc;
2848}
2849
2850static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2851{
2852 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2853 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2854 AssertRC(rc);
2855 pGCtl->i32Result = rc;
2856
2857 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2858 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2859 AssertRC(rc);
2860
2861 VBoxVBVAExHCtlFree(pVbva, pCtl);
2862}
2863
2864static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2865{
2866 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2867 if (!pHCtl)
2868 {
2869 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2870 return VERR_NO_MEMORY;
2871 }
2872
2873 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2874 pHCtl->u.cmd.cbCmd = cbCmd;
2875 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2876 if (RT_FAILURE(rc))
2877 {
2878 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2879 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2880 return rc;;
2881 }
2882 return VINF_SUCCESS;
2883}
2884
2885static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2886{
2887 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2888 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2889 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2890 if (RT_SUCCESS(rc))
2891 return VINF_SUCCESS;
2892
2893 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2894 pCtl->i32Result = rc;
2895 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2896 AssertRC(rc);
2897 return VINF_SUCCESS;
2898}
2899
2900static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2901{
2902 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2903 if (pVboxCtl->u.pfnInternal)
2904 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2905 VBoxVBVAExHCtlFree(pVbva, pCtl);
2906}
2907
2908static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2909 PFNCRCTLCOMPLETION pfnCompletion,
2910 void *pvCompletion)
2911{
2912 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2913 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2914 if (RT_FAILURE(rc))
2915 {
2916 if (rc == VERR_INVALID_STATE)
2917 {
2918 pCmd->u.pfnInternal = NULL;
2919 PVGASTATE pVGAState = pVdma->pVGAState;
2920 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2921 if (!RT_SUCCESS(rc))
2922 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2923
2924 return rc;
2925 }
2926 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2927 return rc;
2928 }
2929
2930 return VINF_SUCCESS;
2931}
2932
2933static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2934{
2935 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2936 {
2937 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2938 if (!RT_SUCCESS(rc))
2939 {
2940 WARN(("pfnVBVAEnable failed %d\n", rc));
2941 for (uint32_t j = 0; j < i; j++)
2942 {
2943 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2944 }
2945
2946 return rc;
2947 }
2948 }
2949 return VINF_SUCCESS;
2950}
2951
2952static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2953{
2954 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2955 {
2956 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2957 }
2958 return VINF_SUCCESS;
2959}
2960
2961static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2962{
2963 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2964 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2965
2966 if (RT_SUCCESS(rc))
2967 {
2968 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2969 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2970 if (rc == VINF_SUCCESS)
2971 {
2972 /* we need to inform Main about VBVA enable/disable
2973 * main expects notifications to be done from the main thread
2974 * submit it there */
2975 PVGASTATE pVGAState = pVdma->pVGAState;
2976
2977 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2978 vdmaVBVANotifyEnable(pVGAState);
2979 else
2980 vdmaVBVANotifyDisable(pVGAState);
2981 }
2982 else if (RT_FAILURE(rc))
2983 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2984 }
2985 else
2986 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2987
2988 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2989}
2990
2991static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2992{
2993 int rc;
2994 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2995 if (pHCtl)
2996 {
2997 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2998 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2999 pHCtl->pfnComplete = pfnComplete;
3000 pHCtl->pvComplete = pvComplete;
3001
3002 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3003 if (RT_SUCCESS(rc))
3004 return VINF_SUCCESS;
3005 else
3006 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3007
3008 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3009 }
3010 else
3011 {
3012 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3013 rc = VERR_NO_MEMORY;
3014 }
3015
3016 return rc;
3017}
3018
3019static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3020{
3021 VBVAENABLE Enable = {0};
3022 Enable.u32Flags = VBVA_F_ENABLE;
3023 Enable.u32Offset = offVram;
3024
3025 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3026 Data.rc = VERR_NOT_IMPLEMENTED;
3027 int rc = RTSemEventCreate(&Data.hEvent);
3028 if (!RT_SUCCESS(rc))
3029 {
3030 WARN(("RTSemEventCreate failed %d\n", rc));
3031 return rc;
3032 }
3033
3034 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3035 if (RT_SUCCESS(rc))
3036 {
3037 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3038 if (RT_SUCCESS(rc))
3039 {
3040 rc = Data.rc;
3041 if (!RT_SUCCESS(rc))
3042 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3043 }
3044 else
3045 WARN(("RTSemEventWait failed %d\n", rc));
3046 }
3047 else
3048 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3049
3050 RTSemEventDestroy(Data.hEvent);
3051
3052 return rc;
3053}
3054
3055static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3056{
3057 int rc;
3058 VBVAEXHOSTCTL* pHCtl;
3059 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3060 {
3061 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3062 return VINF_SUCCESS;
3063 }
3064
3065 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3066 if (!pHCtl)
3067 {
3068 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3069 return VERR_NO_MEMORY;
3070 }
3071
3072 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3073 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3074 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3075 if (RT_SUCCESS(rc))
3076 return VINF_SUCCESS;
3077
3078 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3079 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3080 return rc;
3081}
3082
3083static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3084{
3085 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3086 if (fEnable)
3087 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3088 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3089}
3090
3091static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3092{
3093 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3094 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3095 if (RT_SUCCESS(rc))
3096 return VINF_SUCCESS;
3097
3098 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3099 pEnable->Hdr.i32Result = rc;
3100 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3101 AssertRC(rc);
3102 return VINF_SUCCESS;
3103}
3104
3105static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
3106{
3107 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3108 pData->rc = rc;
3109 rc = RTSemEventSignal(pData->hEvent);
3110 if (!RT_SUCCESS(rc))
3111 WARN(("RTSemEventSignal failed %d\n", rc));
3112}
3113
3114static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3115{
3116 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3117 Data.rc = VERR_NOT_IMPLEMENTED;
3118 int rc = RTSemEventCreate(&Data.hEvent);
3119 if (!RT_SUCCESS(rc))
3120 {
3121 WARN(("RTSemEventCreate failed %d\n", rc));
3122 return rc;
3123 }
3124
3125 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3126 if (RT_SUCCESS(rc))
3127 {
3128 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3129 if (RT_SUCCESS(rc))
3130 {
3131 rc = Data.rc;
3132 if (!RT_SUCCESS(rc))
3133 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3134 }
3135 else
3136 WARN(("RTSemEventWait failed %d\n", rc));
3137 }
3138 else
3139 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3140
3141 RTSemEventDestroy(Data.hEvent);
3142
3143 return rc;
3144}
3145
3146static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3147{
3148 VBVAEXHOSTCTL Ctl;
3149 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3150 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3151}
3152
3153static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3154{
3155 VBVAEXHOSTCTL Ctl;
3156 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3157 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3158}
3159
3160static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3161{
3162 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3163 switch (rc)
3164 {
3165 case VINF_SUCCESS:
3166 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3167 case VINF_ALREADY_INITIALIZED:
3168 case VINF_EOF:
3169 case VERR_INVALID_STATE:
3170 return VINF_SUCCESS;
3171 default:
3172 Assert(!RT_FAILURE(rc));
3173 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3174 }
3175}
3176
3177
3178int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3179 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3180 PFNCRCTLCOMPLETION pfnCompletion,
3181 void *pvCompletion)
3182{
3183 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3184 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3185 pCmd->CalloutList.List.pNext = NULL;
3186 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3187}
3188
3189typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3190{
3191 struct VBOXVDMAHOST *pVdma;
3192 uint32_t fProcessing;
3193 int rc;
3194} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3195
3196static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3197{
3198 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3199
3200 pData->rc = rc;
3201
3202 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3203
3204 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3205
3206 pData->fProcessing = 0;
3207
3208 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3209}
3210
3211static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3212{
3213 pEntry->pfnCb = pfnCb;
3214 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3215 if (RT_SUCCESS(rc))
3216 {
3217 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3218 RTCritSectLeave(&pVdma->CalloutCritSect);
3219
3220 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3221 }
3222 else
3223 WARN(("RTCritSectEnter failed %d\n", rc));
3224
3225 return rc;
3226}
3227
3228
3229static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3230{
3231 int rc = VINF_SUCCESS;
3232 for(;;)
3233 {
3234 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3235 if (RT_SUCCESS(rc))
3236 {
3237 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3238 if (pEntry)
3239 RTListNodeRemove(&pEntry->Node);
3240 RTCritSectLeave(&pVdma->CalloutCritSect);
3241
3242 if (!pEntry)
3243 break;
3244
3245 pEntry->pfnCb(pEntry);
3246 }
3247 else
3248 {
3249 WARN(("RTCritSectEnter failed %d\n", rc));
3250 break;
3251 }
3252 }
3253
3254 return rc;
3255}
3256
3257int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3258 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3259{
3260 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3261 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3262 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3263 Data.pVdma = pVdma;
3264 Data.fProcessing = 1;
3265 Data.rc = VERR_INTERNAL_ERROR;
3266 RTListInit(&pCmd->CalloutList.List);
3267 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3268 if (!RT_SUCCESS(rc))
3269 {
3270 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3271 return rc;
3272 }
3273
3274 while (Data.fProcessing)
3275 {
3276 /* Poll infrequently to make sure no completed message has been missed. */
3277 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3278
3279 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3280
3281 if (Data.fProcessing)
3282 RTThreadYield();
3283 }
3284
3285 /* extra check callouts */
3286 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3287
3288 /* 'Our' message has been processed, so should reset the semaphore.
3289 * There is still possible that another message has been processed
3290 * and the semaphore has been signalled again.
3291 * Reset only if there are no other messages completed.
3292 */
3293 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3294 Assert(c >= 0);
3295 if (!c)
3296 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3297
3298 rc = Data.rc;
3299 if (!RT_SUCCESS(rc))
3300 WARN(("host call failed %d", rc));
3301
3302 return rc;
3303}
3304
3305int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3306{
3307 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3308 int rc = VINF_SUCCESS;
3309 switch (pCtl->u32Type)
3310 {
3311 case VBOXCMDVBVACTL_TYPE_3DCTL:
3312 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3313 case VBOXCMDVBVACTL_TYPE_RESIZE:
3314 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3315 case VBOXCMDVBVACTL_TYPE_ENABLE:
3316 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3317 {
3318 WARN(("incorrect enable size\n"));
3319 rc = VERR_INVALID_PARAMETER;
3320 break;
3321 }
3322 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3323 default:
3324 WARN(("unsupported type\n"));
3325 rc = VERR_INVALID_PARAMETER;
3326 break;
3327 }
3328
3329 pCtl->i32Result = rc;
3330 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3331 AssertRC(rc);
3332 return VINF_SUCCESS;
3333}
3334
3335int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3336{
3337 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3338 {
3339 WARN(("vdma VBVA is disabled\n"));
3340 return VERR_INVALID_STATE;
3341 }
3342
3343 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3344}
3345
3346int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3347{
3348 WARN(("flush\n"));
3349 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3350 {
3351 WARN(("vdma VBVA is disabled\n"));
3352 return VERR_INVALID_STATE;
3353 }
3354 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3355}
3356
3357void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3358{
3359 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3360 return;
3361 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3362}
3363
3364bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3365{
3366 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3367}
3368#endif
3369
3370int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3371{
3372#ifdef VBOX_WITH_CRHGSMI
3373 int rc = vdmaVBVAPause(pVdma);
3374 if (RT_SUCCESS(rc))
3375 return VINF_SUCCESS;
3376
3377 if (rc != VERR_INVALID_STATE)
3378 {
3379 WARN(("vdmaVBVAPause failed %d\n", rc));
3380 return rc;
3381 }
3382
3383#ifdef DEBUG_misha
3384 WARN(("debug prep"));
3385#endif
3386
3387 PVGASTATE pVGAState = pVdma->pVGAState;
3388 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3389 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3390 Assert(pCmd);
3391 if (pCmd)
3392 {
3393 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3394 AssertRC(rc);
3395 if (RT_SUCCESS(rc))
3396 {
3397 rc = vboxVDMACrCtlGetRc(pCmd);
3398 }
3399 vboxVDMACrCtlRelease(pCmd);
3400 return rc;
3401 }
3402 return VERR_NO_MEMORY;
3403#else
3404 return VINF_SUCCESS;
3405#endif
3406}
3407
3408int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3409{
3410#ifdef VBOX_WITH_CRHGSMI
3411 int rc = vdmaVBVAResume(pVdma);
3412 if (RT_SUCCESS(rc))
3413 return VINF_SUCCESS;
3414
3415 if (rc != VERR_INVALID_STATE)
3416 {
3417 WARN(("vdmaVBVAResume failed %d\n", rc));
3418 return rc;
3419 }
3420
3421#ifdef DEBUG_misha
3422 WARN(("debug done"));
3423#endif
3424
3425 PVGASTATE pVGAState = pVdma->pVGAState;
3426 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3427 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3428 Assert(pCmd);
3429 if (pCmd)
3430 {
3431 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3432 AssertRC(rc);
3433 if (RT_SUCCESS(rc))
3434 {
3435 rc = vboxVDMACrCtlGetRc(pCmd);
3436 }
3437 vboxVDMACrCtlRelease(pCmd);
3438 return rc;
3439 }
3440 return VERR_NO_MEMORY;
3441#else
3442 return VINF_SUCCESS;
3443#endif
3444}
3445
3446int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3447{
3448 int rc;
3449
3450#ifdef VBOX_WITH_CRHGSMI
3451 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3452#endif
3453 {
3454 rc = SSMR3PutU32(pSSM, 0xffffffff);
3455 AssertRCReturn(rc, rc);
3456 return VINF_SUCCESS;
3457 }
3458
3459#ifdef VBOX_WITH_CRHGSMI
3460 PVGASTATE pVGAState = pVdma->pVGAState;
3461 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3462
3463 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3464 AssertRCReturn(rc, rc);
3465
3466 VBVAEXHOSTCTL HCtl;
3467 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3468 HCtl.u.state.pSSM = pSSM;
3469 HCtl.u.state.u32Version = 0;
3470 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3471#endif
3472}
3473
3474int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3475{
3476 uint32_t u32;
3477 int rc = SSMR3GetU32(pSSM, &u32);
3478 AssertRCReturn(rc, rc);
3479
3480 if (u32 != 0xffffffff)
3481 {
3482#ifdef VBOX_WITH_CRHGSMI
3483 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3484 AssertRCReturn(rc, rc);
3485
3486 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3487
3488 VBVAEXHOSTCTL HCtl;
3489 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3490 HCtl.u.state.pSSM = pSSM;
3491 HCtl.u.state.u32Version = u32Version;
3492 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3493 AssertRCReturn(rc, rc);
3494
3495 rc = vdmaVBVAResume(pVdma);
3496 AssertRCReturn(rc, rc);
3497
3498 return VINF_SUCCESS;
3499#else
3500 WARN(("Unsupported VBVACtl info!\n"));
3501 return VERR_VERSION_MISMATCH;
3502#endif
3503 }
3504
3505 return VINF_SUCCESS;
3506}
3507
3508int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3509{
3510#ifdef VBOX_WITH_CRHGSMI
3511 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3512 return VINF_SUCCESS;
3513
3514/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3515 * the purpose of this code is. */
3516 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3517 if (!pHCtl)
3518 {
3519 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3520 return VERR_NO_MEMORY;
3521 }
3522
3523 /* sanity */
3524 pHCtl->u.cmd.pu8Cmd = NULL;
3525 pHCtl->u.cmd.cbCmd = 0;
3526
3527 /* NULL completion will just free the ctl up */
3528 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3529 if (RT_FAILURE(rc))
3530 {
3531 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3532 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3533 return rc;
3534 }
3535#endif
3536 return VINF_SUCCESS;
3537}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette