VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 56378

Last change on this file since 56378 was 56292, checked in by vboxsync, 10 years ago

Devices: Updated (C) year.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.1 KB
Line 
1/* $Id: DevVGA_VDMA.cpp 56292 2015-06-09 14:20:46Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17#include <VBox/VMMDev.h>
18#include <VBox/vmm/pdmdev.h>
19#include <VBox/vmm/pgm.h>
20#include <VBox/VBoxVideo.h>
21#include <iprt/semaphore.h>
22#include <iprt/thread.h>
23#include <iprt/mem.h>
24#include <iprt/asm.h>
25#include <iprt/list.h>
26#include <iprt/param.h>
27
28#include "DevVGA.h"
29#include "HGSMI/SHGSMIHost.h"
30
31#include <VBox/VBoxVideo3D.h>
32#include <VBox/VBoxVideoHost3D.h>
33
34#ifdef DEBUG_misha
35# define VBOXVDBG_MEMCACHE_DISABLE
36#endif
37
38#ifndef VBOXVDBG_MEMCACHE_DISABLE
39# include <iprt/memcache.h>
40#endif
41
42#ifdef DEBUG_misha
43#define WARN_BP() do { AssertFailed(); } while (0)
44#else
45#define WARN_BP() do { } while (0)
46#endif
47#define WARN(_msg) do { \
48 LogRel(_msg); \
49 WARN_BP(); \
50 } while (0)
51
52#define VBOXVDMATHREAD_STATE_TERMINATED 0
53#define VBOXVDMATHREAD_STATE_CREATING 1
54#define VBOXVDMATHREAD_STATE_CREATED 3
55#define VBOXVDMATHREAD_STATE_TERMINATING 4
56
57struct VBOXVDMATHREAD;
58
59typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
60
61static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
62
63
64typedef struct VBOXVDMATHREAD
65{
66 RTTHREAD hWorkerThread;
67 RTSEMEVENT hEvent;
68 volatile uint32_t u32State;
69 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
70 void *pvChanged;
71} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
72
73
74/* state transformations:
75 *
76 * submitter | processor
77 *
78 * LISTENING ---> PROCESSING
79 *
80 * */
81#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
82#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
83
84#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
85#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
86#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
87
88typedef struct VBVAEXHOSTCONTEXT
89{
90 VBVABUFFER *pVBVA;
91 volatile int32_t i32State;
92 volatile int32_t i32EnableState;
93 volatile uint32_t u32cCtls;
94 /* critical section for accessing ctl lists */
95 RTCRITSECT CltCritSect;
96 RTLISTANCHOR GuestCtlList;
97 RTLISTANCHOR HostCtlList;
98#ifndef VBOXVDBG_MEMCACHE_DISABLE
99 RTMEMCACHE CtlCache;
100#endif
101} VBVAEXHOSTCONTEXT;
102
103typedef enum
104{
105 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
106 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
107 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
108 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
109 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
110 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
111 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
112 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
113 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
114 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
115 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
116 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
117 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
118} VBVAEXHOSTCTL_TYPE;
119
120struct VBVAEXHOSTCTL;
121
122typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
123
124typedef struct VBVAEXHOSTCTL
125{
126 RTLISTNODE Node;
127 VBVAEXHOSTCTL_TYPE enmType;
128 union
129 {
130 struct
131 {
132 uint8_t * pu8Cmd;
133 uint32_t cbCmd;
134 } cmd;
135
136 struct
137 {
138 PSSMHANDLE pSSM;
139 uint32_t u32Version;
140 } state;
141 } u;
142 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
143 void *pvComplete;
144} VBVAEXHOSTCTL;
145
146/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
147 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
148 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
149 * see mor edetailed comments in headers for function definitions */
150typedef enum
151{
152 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
153 VBVAEXHOST_DATA_TYPE_CMD,
154 VBVAEXHOST_DATA_TYPE_HOSTCTL,
155 VBVAEXHOST_DATA_TYPE_GUESTCTL
156} VBVAEXHOST_DATA_TYPE;
157
158static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
159
160
161static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
162
163static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
164static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
165
166/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
167 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
168static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
169
170static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
171static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
172static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
173static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
174static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
175static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
176
177static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
178{
179#ifndef VBOXVDBG_MEMCACHE_DISABLE
180 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
181#else
182 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
183#endif
184}
185
186static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
187{
188#ifndef VBOXVDBG_MEMCACHE_DISABLE
189 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
190#else
191 RTMemFree(pCtl);
192#endif
193}
194
195static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
196{
197 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
198 if (!pCtl)
199 {
200 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
201 return NULL;
202 }
203
204 pCtl->enmType = enmType;
205 return pCtl;
206}
207
208static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
209{
210 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
211
212 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
213 return VINF_SUCCESS;
214 return VERR_SEM_BUSY;
215}
216
217static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
218{
219 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
220
221 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
222 return NULL;
223
224 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
225 if (RT_SUCCESS(rc))
226 {
227 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
228 if (pCtl)
229 *pfHostCtl = true;
230 else if (!fHostOnlyMode)
231 {
232 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
233 {
234 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
235 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
236 * and there are no HostCtl commands*/
237 Assert(pCtl);
238 *pfHostCtl = false;
239 }
240 }
241
242 if (pCtl)
243 {
244 RTListNodeRemove(&pCtl->Node);
245 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
246 }
247
248 RTCritSectLeave(&pCmdVbva->CltCritSect);
249
250 return pCtl;
251 }
252 else
253 WARN(("RTCritSectEnter failed %d\n", rc));
254
255 return NULL;
256}
257
258static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
259{
260 bool fHostCtl = false;
261 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
262 Assert(!pCtl || fHostCtl);
263 return pCtl;
264}
265
266static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
267{
268 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
269 {
270 WARN(("Invalid state\n"));
271 return VERR_INVALID_STATE;
272 }
273
274 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
275 return VINF_SUCCESS;
276}
277
278static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
279{
280 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
281 {
282 WARN(("Invalid state\n"));
283 return VERR_INVALID_STATE;
284 }
285
286 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
287 return VINF_SUCCESS;
288}
289
290
291static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
292{
293 switch (pCtl->enmType)
294 {
295 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
296 {
297 int rc = VBoxVBVAExHPPause(pCmdVbva);
298 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
299 return true;
300 }
301 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
302 {
303 int rc = VBoxVBVAExHPResume(pCmdVbva);
304 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
305 return true;
306 }
307 default:
308 return false;
309 }
310}
311
312static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
313{
314 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
315
316 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
317}
318
319static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
320{
321 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
322 if (pCmdVbva->pVBVA)
323 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
324}
325
326static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
327{
328 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
329 if (pCmdVbva->pVBVA)
330 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
331}
332
333static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
334{
335 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
336 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
337
338 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
339
340 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
341 uint32_t indexRecordFree = pVBVA->indexRecordFree;
342
343 Log(("first = %d, free = %d\n",
344 indexRecordFirst, indexRecordFree));
345
346 if (indexRecordFirst == indexRecordFree)
347 {
348 /* No records to process. Return without assigning output variables. */
349 return VINF_EOF;
350 }
351
352 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
353
354 /* A new record need to be processed. */
355 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
356 {
357 /* the record is being recorded, try again */
358 return VINF_TRY_AGAIN;
359 }
360
361 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
362
363 if (!cbRecord)
364 {
365 /* the record is being recorded, try again */
366 return VINF_TRY_AGAIN;
367 }
368
369 /* we should not get partial commands here actually */
370 Assert(cbRecord);
371
372 /* The size of largest contiguous chunk in the ring biffer. */
373 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
374
375 /* The pointer to data in the ring buffer. */
376 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
377
378 /* Fetch or point the data. */
379 if (u32BytesTillBoundary >= cbRecord)
380 {
381 /* The command does not cross buffer boundary. Return address in the buffer. */
382 *ppCmd = pSrc;
383 *pcbCmd = cbRecord;
384 return VINF_SUCCESS;
385 }
386
387 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
388 return VERR_INVALID_STATE;
389}
390
391static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
392{
393 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
394 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
395
396 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
397}
398
399static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
400{
401 if (pCtl->pfnComplete)
402 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
403 else
404 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
405}
406
407static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
408{
409 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
410 VBVAEXHOSTCTL*pCtl;
411 bool fHostClt;
412
413 for(;;)
414 {
415 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
416 if (pCtl)
417 {
418 if (fHostClt)
419 {
420 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
421 {
422 *ppCmd = (uint8_t*)pCtl;
423 *pcbCmd = sizeof (*pCtl);
424 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
425 }
426 continue;
427 }
428 else
429 {
430 *ppCmd = (uint8_t*)pCtl;
431 *pcbCmd = sizeof (*pCtl);
432 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
433 }
434 }
435
436 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
437 return VBVAEXHOST_DATA_TYPE_NO_DATA;
438
439 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
440 switch (rc)
441 {
442 case VINF_SUCCESS:
443 return VBVAEXHOST_DATA_TYPE_CMD;
444 case VINF_EOF:
445 return VBVAEXHOST_DATA_TYPE_NO_DATA;
446 case VINF_TRY_AGAIN:
447 RTThreadSleep(1);
448 continue;
449 default:
450 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
451 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
452 return VBVAEXHOST_DATA_TYPE_NO_DATA;
453 }
454 }
455
456 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
457 return VBVAEXHOST_DATA_TYPE_NO_DATA;
458}
459
460static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
461{
462 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
463 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
464 {
465 vboxVBVAExHPHgEventClear(pCmdVbva);
466 vboxVBVAExHPProcessorRelease(pCmdVbva);
467 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
468 * 1. we check the queue -> and it is empty
469 * 2. submitter adds command to the queue
470 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
471 * 4. we clear the "processing" state
472 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
473 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
474 **/
475 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
476 if (RT_SUCCESS(rc))
477 {
478 /* we are the processor now */
479 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
480 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
481 {
482 vboxVBVAExHPProcessorRelease(pCmdVbva);
483 return VBVAEXHOST_DATA_TYPE_NO_DATA;
484 }
485
486 vboxVBVAExHPHgEventSet(pCmdVbva);
487 }
488 }
489
490 return enmType;
491}
492
493DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
494{
495 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
496
497 if (pVBVA)
498 {
499 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
500 uint32_t indexRecordFree = pVBVA->indexRecordFree;
501
502 if (indexRecordFirst != indexRecordFree)
503 return true;
504 }
505
506 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
507}
508
509/* Checks whether the new commands are ready for processing
510 * @returns
511 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
512 * VINF_EOF - no commands in a queue
513 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
514 * VERR_INVALID_STATE - the VBVA is paused or pausing */
515static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
516{
517 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
518 if (RT_SUCCESS(rc))
519 {
520 /* we are the processor now */
521 if (vboxVBVAExHSHasCommands(pCmdVbva))
522 {
523 vboxVBVAExHPHgEventSet(pCmdVbva);
524 return VINF_SUCCESS;
525 }
526
527 vboxVBVAExHPProcessorRelease(pCmdVbva);
528 return VINF_EOF;
529 }
530 if (rc == VERR_SEM_BUSY)
531 return VINF_ALREADY_INITIALIZED;
532 return VERR_INVALID_STATE;
533}
534
535static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
536{
537 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
538 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
539 if (RT_SUCCESS(rc))
540 {
541#ifndef VBOXVDBG_MEMCACHE_DISABLE
542 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
543 0, /* size_t cbAlignment */
544 UINT32_MAX, /* uint32_t cMaxObjects */
545 NULL, /* PFNMEMCACHECTOR pfnCtor*/
546 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
547 NULL, /* void *pvUser*/
548 0 /* uint32_t fFlags*/
549 );
550 if (RT_SUCCESS(rc))
551#endif
552 {
553 RTListInit(&pCmdVbva->GuestCtlList);
554 RTListInit(&pCmdVbva->HostCtlList);
555 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
556 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
557 return VINF_SUCCESS;
558 }
559#ifndef VBOXVDBG_MEMCACHE_DISABLE
560 else
561 WARN(("RTMemCacheCreate failed %d\n", rc));
562#endif
563 }
564 else
565 WARN(("RTCritSectInit failed %d\n", rc));
566
567 return rc;
568}
569
570DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
571{
572 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
573}
574
575DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
576{
577 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
578}
579
580static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
581{
582 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
583 {
584 WARN(("VBVAEx is enabled already\n"));
585 return VERR_INVALID_STATE;
586 }
587
588 pCmdVbva->pVBVA = pVBVA;
589 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
590 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
591 return VINF_SUCCESS;
592}
593
594static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
595{
596 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
597 return VINF_SUCCESS;
598
599 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
600 return VINF_SUCCESS;
601}
602
603static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
604{
605 /* ensure the processor is stopped */
606 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
607
608 /* ensure no one tries to submit the command */
609 if (pCmdVbva->pVBVA)
610 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
611
612 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
613 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
614
615 RTCritSectDelete(&pCmdVbva->CltCritSect);
616
617#ifndef VBOXVDBG_MEMCACHE_DISABLE
618 RTMemCacheDestroy(pCmdVbva->CtlCache);
619#endif
620
621 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
622}
623
624static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
625{
626 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
627 AssertRCReturn(rc, rc);
628 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
629 AssertRCReturn(rc, rc);
630 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
631 AssertRCReturn(rc, rc);
632
633 return VINF_SUCCESS;
634}
635
636static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
637{
638 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
639 {
640 WARN(("vbva not paused\n"));
641 return VERR_INVALID_STATE;
642 }
643
644 VBVAEXHOSTCTL* pCtl;
645 int rc;
646 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
647 {
648 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
649 AssertRCReturn(rc, rc);
650 }
651
652 rc = SSMR3PutU32(pSSM, 0);
653 AssertRCReturn(rc, rc);
654
655 return VINF_SUCCESS;
656}
657/* Saves state
658 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
659 */
660static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
661{
662 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
663 if (RT_FAILURE(rc))
664 {
665 WARN(("RTCritSectEnter failed %d\n", rc));
666 return rc;
667 }
668
669 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
670 if (RT_FAILURE(rc))
671 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
672
673 RTCritSectLeave(&pCmdVbva->CltCritSect);
674
675 return rc;
676}
677
678static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
679{
680 uint32_t u32;
681 int rc = SSMR3GetU32(pSSM, &u32);
682 AssertRCReturn(rc, rc);
683
684 if (!u32)
685 return VINF_EOF;
686
687 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
688 if (!pHCtl)
689 {
690 WARN(("VBoxVBVAExHCtlCreate failed\n"));
691 return VERR_NO_MEMORY;
692 }
693
694 rc = SSMR3GetU32(pSSM, &u32);
695 AssertRCReturn(rc, rc);
696 pHCtl->u.cmd.cbCmd = u32;
697
698 rc = SSMR3GetU32(pSSM, &u32);
699 AssertRCReturn(rc, rc);
700 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
701
702 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
703 ++pCmdVbva->u32cCtls;
704
705 return VINF_SUCCESS;
706}
707
708
709static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
710{
711 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
712 {
713 WARN(("vbva not stopped\n"));
714 return VERR_INVALID_STATE;
715 }
716
717 int rc;
718
719 do {
720 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
721 AssertRCReturn(rc, rc);
722 } while (VINF_EOF != rc);
723
724 return VINF_SUCCESS;
725}
726
727/* Loads state
728 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
729 */
730static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
731{
732 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
733 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
734 if (RT_FAILURE(rc))
735 {
736 WARN(("RTCritSectEnter failed %d\n", rc));
737 return rc;
738 }
739
740 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
741 if (RT_FAILURE(rc))
742 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
743
744 RTCritSectLeave(&pCmdVbva->CltCritSect);
745
746 return rc;
747}
748
749typedef enum
750{
751 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
752 VBVAEXHOSTCTL_SOURCE_HOST
753} VBVAEXHOSTCTL_SOURCE;
754
755
756static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
757{
758 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
759 {
760 Log(("cmd vbva not enabled\n"));
761 return VERR_INVALID_STATE;
762 }
763
764 pCtl->pfnComplete = pfnComplete;
765 pCtl->pvComplete = pvComplete;
766
767 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
768 if (RT_SUCCESS(rc))
769 {
770 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
771 {
772 Log(("cmd vbva not enabled\n"));
773 RTCritSectLeave(&pCmdVbva->CltCritSect);
774 return VERR_INVALID_STATE;
775 }
776
777 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
778 {
779 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
780 }
781 else
782 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
783
784 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
785
786 RTCritSectLeave(&pCmdVbva->CltCritSect);
787
788 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
789 }
790 else
791 WARN(("RTCritSectEnter failed %d\n", rc));
792
793 return rc;
794}
795
796#ifdef VBOX_WITH_CRHGSMI
797typedef struct VBOXVDMA_SOURCE
798{
799 VBVAINFOSCREEN Screen;
800 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
801} VBOXVDMA_SOURCE;
802#endif
803
804typedef struct VBOXVDMAHOST
805{
806 PHGSMIINSTANCE pHgsmi;
807 PVGASTATE pVGAState;
808#ifdef VBOX_WITH_CRHGSMI
809 VBVAEXHOSTCONTEXT CmdVbva;
810 VBOXVDMATHREAD Thread;
811 VBOXCRCMD_SVRINFO CrSrvInfo;
812 VBVAEXHOSTCTL* pCurRemainingHostCtl;
813 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
814 int32_t volatile i32cHostCrCtlCompleted;
815 RTCRITSECT CalloutCritSect;
816// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
817#endif
818#ifdef VBOX_VDMA_WITH_WATCHDOG
819 PTMTIMERR3 WatchDogTimer;
820#endif
821} VBOXVDMAHOST, *PVBOXVDMAHOST;
822
823#ifdef VBOX_WITH_CRHGSMI
824
825void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
826{
827 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
828 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
829 void *pvChanged = pThread->pvChanged;
830
831 pThread->pfnChanged = NULL;
832 pThread->pvChanged = NULL;
833
834 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
835
836 if (pfnChanged)
837 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
838}
839
840void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
841{
842 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
843 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
844 void *pvChanged = pThread->pvChanged;
845
846 pThread->pfnChanged = NULL;
847 pThread->pvChanged = NULL;
848
849 if (pfnChanged)
850 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
851}
852
853DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
854{
855 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
856}
857
858void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
859{
860 memset(pThread, 0, sizeof (*pThread));
861 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
862}
863
864int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
865{
866 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
867 switch (u32State)
868 {
869 case VBOXVDMATHREAD_STATE_TERMINATED:
870 return VINF_SUCCESS;
871 case VBOXVDMATHREAD_STATE_TERMINATING:
872 {
873 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
874 if (!RT_SUCCESS(rc))
875 {
876 WARN(("RTThreadWait failed %d\n", rc));
877 return rc;
878 }
879
880 RTSemEventDestroy(pThread->hEvent);
881
882 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
883 return VINF_SUCCESS;
884 }
885 default:
886 WARN(("invalid state"));
887 return VERR_INVALID_STATE;
888 }
889}
890
891int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
892{
893 int rc = VBoxVDMAThreadCleanup(pThread);
894 if (RT_FAILURE(rc))
895 {
896 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
897 return rc;
898 }
899
900 rc = RTSemEventCreate(&pThread->hEvent);
901 if (RT_SUCCESS(rc))
902 {
903 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
904 pThread->pfnChanged = pfnCreated;
905 pThread->pvChanged = pvCreated;
906 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
907 if (RT_SUCCESS(rc))
908 return VINF_SUCCESS;
909 else
910 WARN(("RTThreadCreate failed %d\n", rc));
911
912 RTSemEventDestroy(pThread->hEvent);
913 }
914 else
915 WARN(("RTSemEventCreate failed %d\n", rc));
916
917 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
918
919 return rc;
920}
921
922DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
923{
924 int rc = RTSemEventSignal(pThread->hEvent);
925 AssertRC(rc);
926 return rc;
927}
928
929DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
930{
931 int rc = RTSemEventWait(pThread->hEvent, cMillies);
932 AssertRC(rc);
933 return rc;
934}
935
936int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
937{
938 int rc;
939 do
940 {
941 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
942 switch (u32State)
943 {
944 case VBOXVDMATHREAD_STATE_CREATED:
945 pThread->pfnChanged = pfnTerminated;
946 pThread->pvChanged = pvTerminated;
947 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
948 if (fNotify)
949 {
950 rc = VBoxVDMAThreadEventNotify(pThread);
951 AssertRC(rc);
952 }
953 return VINF_SUCCESS;
954 case VBOXVDMATHREAD_STATE_TERMINATING:
955 case VBOXVDMATHREAD_STATE_TERMINATED:
956 {
957 WARN(("thread is marked to termination or terminated\nn"));
958 return VERR_INVALID_STATE;
959 }
960 case VBOXVDMATHREAD_STATE_CREATING:
961 {
962 /* wait till the thread creation is completed */
963 WARN(("concurrent thread create/destron\n"));
964 RTThreadYield();
965 continue;
966 }
967 default:
968 WARN(("invalid state"));
969 return VERR_INVALID_STATE;
970 }
971 } while (1);
972
973 WARN(("should never be here\n"));
974 return VERR_INTERNAL_ERROR;
975}
976
977static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
978
979typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
980typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
981
982typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
983{
984 uint32_t cRefs;
985 int32_t rc;
986 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
987 void *pvCompletion;
988 VBOXVDMACMD_CHROMIUM_CTL Cmd;
989} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
990
991#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
992
993static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
994{
995 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
996 Assert(pHdr);
997 if (pHdr)
998 {
999 pHdr->cRefs = 1;
1000 pHdr->rc = VERR_NOT_IMPLEMENTED;
1001 pHdr->Cmd.enmType = enmCmd;
1002 pHdr->Cmd.cbCmd = cbCmd;
1003 return &pHdr->Cmd;
1004 }
1005
1006 return NULL;
1007}
1008
1009DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1010{
1011 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1012 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1013 if(!cRefs)
1014 {
1015 RTMemFree(pHdr);
1016 }
1017}
1018
1019DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1020{
1021 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1022 ASMAtomicIncU32(&pHdr->cRefs);
1023}
1024
1025DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1026{
1027 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1028 return pHdr->rc;
1029}
1030
1031static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1032{
1033 RTSemEventSignal((RTSEMEVENT)pvContext);
1034}
1035
1036static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1037{
1038 vboxVDMACrCtlRelease(pCmd);
1039}
1040
1041
1042static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1043{
1044 if ( pVGAState->pDrv
1045 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1046 {
1047 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1048 pHdr->pfnCompletion = pfnCompletion;
1049 pHdr->pvCompletion = pvCompletion;
1050 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1051 return VINF_SUCCESS;
1052 }
1053#ifdef DEBUG_misha
1054 Assert(0);
1055#endif
1056 return VERR_NOT_SUPPORTED;
1057}
1058
1059static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1060{
1061 RTSEMEVENT hComplEvent;
1062 int rc = RTSemEventCreate(&hComplEvent);
1063 AssertRC(rc);
1064 if(RT_SUCCESS(rc))
1065 {
1066 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1067#ifdef DEBUG_misha
1068 AssertRC(rc);
1069#endif
1070 if (RT_SUCCESS(rc))
1071 {
1072 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1073 AssertRC(rc);
1074 if(RT_SUCCESS(rc))
1075 {
1076 RTSemEventDestroy(hComplEvent);
1077 }
1078 }
1079 else
1080 {
1081 /* the command is completed */
1082 RTSemEventDestroy(hComplEvent);
1083 }
1084 }
1085 return rc;
1086}
1087
1088typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1089{
1090 int rc;
1091 RTSEMEVENT hEvent;
1092} VDMA_VBVA_CTL_CYNC_COMPLETION;
1093
1094static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1095{
1096 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1097 pData->rc = rc;
1098 rc = RTSemEventSignal(pData->hEvent);
1099 if (!RT_SUCCESS(rc))
1100 WARN(("RTSemEventSignal failed %d\n", rc));
1101}
1102
1103static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1104{
1105 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1106 Data.rc = VERR_NOT_IMPLEMENTED;
1107 int rc = RTSemEventCreate(&Data.hEvent);
1108 if (!RT_SUCCESS(rc))
1109 {
1110 WARN(("RTSemEventCreate failed %d\n", rc));
1111 return rc;
1112 }
1113
1114 pCtl->CalloutList.List.pNext = NULL;
1115
1116 PVGASTATE pVGAState = pVdma->pVGAState;
1117 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1118 if (RT_SUCCESS(rc))
1119 {
1120 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1121 if (RT_SUCCESS(rc))
1122 {
1123 rc = Data.rc;
1124 if (!RT_SUCCESS(rc))
1125 {
1126 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1127 }
1128
1129 }
1130 else
1131 WARN(("RTSemEventWait failed %d\n", rc));
1132 }
1133 else
1134 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1135
1136
1137 RTSemEventDestroy(Data.hEvent);
1138
1139 return rc;
1140}
1141
1142static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1143{
1144 VBVAEXHOSTCTL HCtl;
1145 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1146 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1147 if (RT_FAILURE(rc))
1148 {
1149 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1150 return rc;
1151 }
1152
1153 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1154
1155 return VINF_SUCCESS;
1156}
1157
1158static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1159{
1160 struct VBOXVDMAHOST *pVdma = hClient;
1161 if (!pVdma->pCurRemainingHostCtl)
1162 {
1163 /* disable VBVA, all subsequent host commands will go HGCM way */
1164 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1165 }
1166 else
1167 {
1168 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1169 }
1170
1171 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1172 if (pVdma->pCurRemainingHostCtl)
1173 {
1174 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1175 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1176 }
1177
1178 *pcbCtl = 0;
1179 return NULL;
1180}
1181
1182static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1183{
1184 struct VBOXVDMAHOST *pVdma = hClient;
1185 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1186 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1187}
1188
1189static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1190{
1191 struct VBOXVDMAHOST *pVdma = hClient;
1192 VBVAEXHOSTCTL HCtl;
1193 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1194 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1195
1196 pHgcmEnableData->hRHCmd = pVdma;
1197 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1198
1199 if (RT_FAILURE(rc))
1200 {
1201 if (rc == VERR_INVALID_STATE)
1202 rc = VINF_SUCCESS;
1203 else
1204 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1205 }
1206
1207 return rc;
1208}
1209
1210static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1211{
1212 VBOXCRCMDCTL_ENABLE Enable;
1213 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1214 Enable.Data.hRHCmd = pVdma;
1215 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1216
1217 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1218 Assert(!pVdma->pCurRemainingHostCtl);
1219 if (RT_SUCCESS(rc))
1220 {
1221 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1222 return VINF_SUCCESS;
1223 }
1224
1225 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1226 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1227
1228 return rc;
1229}
1230
1231static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1232{
1233 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1234 {
1235 WARN(("vdma VBVA is already enabled\n"));
1236 return VERR_INVALID_STATE;
1237 }
1238
1239 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1240 if (!pVBVA)
1241 {
1242 WARN(("invalid offset %d\n", u32Offset));
1243 return VERR_INVALID_PARAMETER;
1244 }
1245
1246 if (!pVdma->CrSrvInfo.pfnEnable)
1247 {
1248#ifdef DEBUG_misha
1249 WARN(("pfnEnable is NULL\n"));
1250 return VERR_NOT_SUPPORTED;
1251#endif
1252 }
1253
1254 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1255 if (RT_SUCCESS(rc))
1256 {
1257 VBOXCRCMDCTL_DISABLE Disable;
1258 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1259 Disable.Data.hNotifyTerm = pVdma;
1260 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1261 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1262 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1263 if (RT_SUCCESS(rc))
1264 {
1265 PVGASTATE pVGAState = pVdma->pVGAState;
1266 VBOXCRCMD_SVRENABLE_INFO Info;
1267 Info.hCltScr = pVGAState->pDrv;
1268 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1269 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1270 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1271 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1272 if (RT_SUCCESS(rc))
1273 return VINF_SUCCESS;
1274 else
1275 WARN(("pfnEnable failed %d\n", rc));
1276
1277 vboxVDMACrHgcmHandleEnable(pVdma);
1278 }
1279 else
1280 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1281
1282 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1283 }
1284 else
1285 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1286
1287 return rc;
1288}
1289
1290static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1291{
1292 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1293 {
1294 Log(("vdma VBVA is already disabled\n"));
1295 return VINF_SUCCESS;
1296 }
1297
1298 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1299 if (RT_SUCCESS(rc))
1300 {
1301 if (fDoHgcmEnable)
1302 {
1303 PVGASTATE pVGAState = pVdma->pVGAState;
1304
1305 /* disable is a bit tricky
1306 * we need to ensure the host ctl commands do not come out of order
1307 * and do not come over HGCM channel until after it is enabled */
1308 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1309 if (RT_SUCCESS(rc))
1310 {
1311 vdmaVBVANotifyDisable(pVGAState);
1312 return VINF_SUCCESS;
1313 }
1314
1315 VBOXCRCMD_SVRENABLE_INFO Info;
1316 Info.hCltScr = pVGAState->pDrv;
1317 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1318 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1319 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1320 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1321 }
1322 }
1323 else
1324 WARN(("pfnDisable failed %d\n", rc));
1325
1326 return rc;
1327}
1328
1329static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1330{
1331 *pfContinue = true;
1332
1333 switch (pCmd->enmType)
1334 {
1335 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1336 {
1337 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1338 {
1339 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1340 return VERR_INVALID_STATE;
1341 }
1342 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1343 }
1344 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1345 {
1346 int rc = vdmaVBVADisableProcess(pVdma, true);
1347 if (RT_FAILURE(rc))
1348 {
1349 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1350 return rc;
1351 }
1352
1353 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1354 }
1355 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1356 {
1357 int rc = vdmaVBVADisableProcess(pVdma, false);
1358 if (RT_FAILURE(rc))
1359 {
1360 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1361 return rc;
1362 }
1363
1364 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1365 if (RT_FAILURE(rc))
1366 {
1367 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1368 return rc;
1369 }
1370
1371 *pfContinue = false;
1372 return VINF_SUCCESS;
1373 }
1374 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1375 {
1376 PVGASTATE pVGAState = pVdma->pVGAState;
1377 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1378 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1379 if (RT_FAILURE(rc))
1380 {
1381 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1382 return rc;
1383 }
1384 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1385 }
1386 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1387 {
1388 PVGASTATE pVGAState = pVdma->pVGAState;
1389 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1390
1391 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1392 if (RT_FAILURE(rc))
1393 {
1394 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1395 return rc;
1396 }
1397
1398 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1399 if (RT_FAILURE(rc))
1400 {
1401 WARN(("pfnLoadState failed %d\n", rc));
1402 return rc;
1403 }
1404
1405 return VINF_SUCCESS;
1406 }
1407 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1408 {
1409 PVGASTATE pVGAState = pVdma->pVGAState;
1410
1411 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1412 {
1413 VBVAINFOSCREEN CurScreen;
1414 VBVAINFOVIEW CurView;
1415
1416 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1417 if (RT_FAILURE(rc))
1418 {
1419 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1420 return rc;
1421 }
1422
1423 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1424 if (RT_FAILURE(rc))
1425 {
1426 WARN(("VBVAInfoScreen failed %d\n", rc));
1427 return rc;
1428 }
1429 }
1430
1431 return VINF_SUCCESS;
1432 }
1433 default:
1434 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1435 return VERR_INVALID_PARAMETER;
1436 }
1437}
1438
1439static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1440{
1441 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1442 const bool fDisabled = RT_BOOL(pScreen->u16Flags & VBVA_SCREEN_F_DISABLED);
1443
1444 if (fDisabled)
1445 {
1446 if ( u32ViewIndex < pVGAState->cMonitors
1447 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1448 {
1449 RT_ZERO(*pScreen);
1450 pScreen->u32ViewIndex = u32ViewIndex;
1451 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1452 return VINF_SUCCESS;
1453 }
1454 }
1455 else
1456 {
1457 if ( u32ViewIndex < pVGAState->cMonitors
1458 && pScreen->u16BitsPerPixel <= 32
1459 && pScreen->u32Width <= UINT16_MAX
1460 && pScreen->u32Height <= UINT16_MAX
1461 && pScreen->u32LineSize <= UINT16_MAX * 4)
1462 {
1463 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1464 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1465 {
1466 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1467 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1468 && u64ScreenSize <= pVGAState->vram_size
1469 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1470 {
1471 return VINF_SUCCESS;
1472 }
1473 }
1474 }
1475 }
1476
1477 return VERR_INVALID_PARAMETER;
1478}
1479
1480static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1481{
1482 PVGASTATE pVGAState = pVdma->pVGAState;
1483 VBVAINFOSCREEN Screen = pEntry->Screen;
1484
1485 /* Verify and cleanup local copy of the input data. */
1486 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1487 if (RT_FAILURE(rc))
1488 {
1489 WARN(("invalid screen data\n"));
1490 return rc;
1491 }
1492
1493 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1494 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1495 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1496
1497 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1498 if (RT_FAILURE(rc))
1499 {
1500 WARN(("pfnResize failed %d\n", rc));
1501 return rc;
1502 }
1503
1504 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1505 VBVAINFOVIEW View;
1506 View.u32ViewOffset = 0;
1507 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1508 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1509
1510 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1511
1512 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1513 i >= 0;
1514 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1515 {
1516 Screen.u32ViewIndex = i;
1517
1518 VBVAINFOSCREEN CurScreen;
1519 VBVAINFOVIEW CurView;
1520
1521 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1522 AssertRC(rc);
1523
1524 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1525 continue;
1526
1527 if (!fDisable || !CurView.u32ViewSize)
1528 {
1529 View.u32ViewIndex = Screen.u32ViewIndex;
1530
1531 rc = VBVAInfoView(pVGAState, &View);
1532 if (RT_FAILURE(rc))
1533 {
1534 WARN(("VBVAInfoView failed %d\n", rc));
1535 break;
1536 }
1537 }
1538
1539 rc = VBVAInfoScreen(pVGAState, &Screen);
1540 if (RT_FAILURE(rc))
1541 {
1542 WARN(("VBVAInfoScreen failed %d\n", rc));
1543 break;
1544 }
1545 }
1546
1547 return rc;
1548}
1549
1550static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1551{
1552 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1553 switch (enmType)
1554 {
1555 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1556 {
1557 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1558 {
1559 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1560 return VERR_INVALID_STATE;
1561 }
1562 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1563 }
1564 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1565 {
1566 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1567 {
1568 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1569 return VERR_INVALID_STATE;
1570 }
1571
1572 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1573
1574 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1575 {
1576 WARN(("invalid buffer size\n"));
1577 return VERR_INVALID_PARAMETER;
1578 }
1579
1580 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1581 if (!cElements)
1582 {
1583 WARN(("invalid buffer size\n"));
1584 return VERR_INVALID_PARAMETER;
1585 }
1586
1587 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1588
1589 int rc = VINF_SUCCESS;
1590
1591 for (uint32_t i = 0; i < cElements; ++i)
1592 {
1593 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1594 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1595 if (RT_FAILURE(rc))
1596 {
1597 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1598 break;
1599 }
1600 }
1601 return rc;
1602 }
1603 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1604 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1605 {
1606 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1607 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1608 uint32_t u32Offset = pEnable->u32Offset;
1609 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1610 if (!RT_SUCCESS(rc))
1611 {
1612 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1613 return rc;
1614 }
1615
1616 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1617 {
1618 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1619 if (!RT_SUCCESS(rc))
1620 {
1621 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1622 return rc;
1623 }
1624 }
1625
1626 return VINF_SUCCESS;
1627 }
1628 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1629 {
1630 int rc = vdmaVBVADisableProcess(pVdma, true);
1631 if (RT_FAILURE(rc))
1632 {
1633 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1634 return rc;
1635 }
1636
1637 /* do vgaUpdateDisplayAll right away */
1638 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1639 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1640
1641 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1642 }
1643 default:
1644 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1645 return VERR_INVALID_PARAMETER;
1646 }
1647}
1648
1649/**
1650 * @param fIn - whether this is a page in or out op.
1651 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1652 */
1653static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1654{
1655 RTGCPHYS phPage = (RTGCPHYS)iPage << PAGE_SHIFT;
1656 PGMPAGEMAPLOCK Lock;
1657 int rc;
1658
1659 if (fIn)
1660 {
1661 const void * pvPage;
1662 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1663 if (!RT_SUCCESS(rc))
1664 {
1665 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1666 return rc;
1667 }
1668
1669 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1670
1671 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1672 }
1673 else
1674 {
1675 void * pvPage;
1676 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1677 if (!RT_SUCCESS(rc))
1678 {
1679 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1680 return rc;
1681 }
1682
1683 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1684
1685 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1686 }
1687
1688 return VINF_SUCCESS;
1689}
1690
1691static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1692{
1693 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1694 {
1695 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1696 if (!RT_SUCCESS(rc))
1697 {
1698 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1699 return rc;
1700 }
1701 }
1702
1703 return VINF_SUCCESS;
1704}
1705
1706static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1707 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1708 uint8_t **ppu8Vram, bool *pfIn)
1709{
1710 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1711 {
1712 WARN(("cmd too small"));
1713 return -1;
1714 }
1715
1716 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1717 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1718 {
1719 WARN(("invalid cmd size"));
1720 return -1;
1721 }
1722 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1723
1724 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1725 if (offVRAM & PAGE_OFFSET_MASK)
1726 {
1727 WARN(("offVRAM address is not on page boundary\n"));
1728 return -1;
1729 }
1730 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1731
1732 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1733 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1734 if (offVRAM >= pVGAState->vram_size)
1735 {
1736 WARN(("invalid vram offset"));
1737 return -1;
1738 }
1739
1740 if (~(~(VBOXCMDVBVAPAGEIDX)0 >> PAGE_SHIFT) & cPages)
1741 {
1742 WARN(("invalid cPages %d", cPages));
1743 return -1;
1744 }
1745
1746 if (offVRAM + ((VBOXCMDVBVAOFFSET)cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1747 {
1748 WARN(("invalid cPages %d, exceeding vram size", cPages));
1749 return -1;
1750 }
1751
1752 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1753 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1754
1755 *ppPages = pPages;
1756 *pcPages = cPages;
1757 *ppu8Vram = pu8Vram;
1758 *pfIn = fIn;
1759 return 0;
1760}
1761
1762static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1763{
1764 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1765 if (offVRAM & PAGE_OFFSET_MASK)
1766 {
1767 WARN(("offVRAM address is not on page boundary\n"));
1768 return -1;
1769 }
1770
1771 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1772 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1773 if (offVRAM >= pVGAState->vram_size)
1774 {
1775 WARN(("invalid vram offset"));
1776 return -1;
1777 }
1778
1779 uint32_t cbFill = pFill->u32CbFill;
1780
1781 if (offVRAM + cbFill >= pVGAState->vram_size)
1782 {
1783 WARN(("invalid cPages"));
1784 return -1;
1785 }
1786
1787 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1788 uint32_t u32Color = pFill->u32Pattern;
1789
1790 Assert(!(cbFill % 4));
1791 for (uint32_t i = 0; i < cbFill / 4; ++i)
1792 {
1793 pu32Vram[i] = u32Color;
1794 }
1795
1796 return 0;
1797}
1798
1799static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1800{
1801 switch (pCmd->u8OpCode)
1802 {
1803 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1804 return 0;
1805 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1806 {
1807 PVGASTATE pVGAState = pVdma->pVGAState;
1808 const VBOXCMDVBVAPAGEIDX *pPages;
1809 uint32_t cPages;
1810 uint8_t *pu8Vram;
1811 bool fIn;
1812 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1813 &pPages, &cPages,
1814 &pu8Vram, &fIn);
1815 if (i8Result < 0)
1816 {
1817 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1818 return i8Result;
1819 }
1820
1821 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1822 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1823 if (!RT_SUCCESS(rc))
1824 {
1825 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1826 return -1;
1827 }
1828
1829 return 0;
1830 }
1831 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1832 {
1833 PVGASTATE pVGAState = pVdma->pVGAState;
1834 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1835 {
1836 WARN(("cmd too small"));
1837 return -1;
1838 }
1839
1840 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1841 }
1842 default:
1843 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1844 }
1845}
1846
1847#if 0
1848typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1849{
1850 VBOXCMDVBVA_HDR Hdr;
1851 /* for now can only contain offVRAM.
1852 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1853 VBOXCMDVBVA_ALLOCINFO Alloc;
1854 uint32_t u32Reserved;
1855 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1856} VBOXCMDVBVA_PAGING_TRANSFER;
1857#endif
1858
1859AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1860AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1861AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1862AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1863
1864#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1865
1866static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1867{
1868 switch (pCmd->u8OpCode)
1869 {
1870 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1871 {
1872 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1873 {
1874 WARN(("invalid command size"));
1875 return -1;
1876 }
1877 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1878 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1879 uint32_t cbRealCmd = pCmd->u8Flags;
1880 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1881 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1882 {
1883 WARN(("invalid sysmem cmd size"));
1884 return -1;
1885 }
1886
1887 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1888
1889 PGMPAGEMAPLOCK Lock;
1890 PVGASTATE pVGAState = pVdma->pVGAState;
1891 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1892 const void * pvCmd;
1893 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1894 if (!RT_SUCCESS(rc))
1895 {
1896 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1897 return -1;
1898 }
1899
1900 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1901
1902 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1903
1904 if (cbRealCmd <= cbCmdPart)
1905 {
1906 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1907 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1908 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1909 return i8Result;
1910 }
1911
1912 VBOXCMDVBVA_HDR Hdr;
1913 const void *pvCurCmdTail;
1914 uint32_t cbCurCmdTail;
1915 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1916 {
1917 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1918 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1919 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1920 }
1921 else
1922 {
1923 memcpy(&Hdr, pvCmd, cbCmdPart);
1924 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1925 phCmd += cbCmdPart;
1926 Assert(!(phCmd & PAGE_OFFSET_MASK));
1927 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1928 if (!RT_SUCCESS(rc))
1929 {
1930 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1931 return -1;
1932 }
1933
1934 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1935 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1936 pRealCmdHdr = &Hdr;
1937 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1938 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1939 }
1940
1941 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1942 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1943
1944 int8_t i8Result = 0;
1945
1946 switch (pRealCmdHdr->u8OpCode)
1947 {
1948 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1949 {
1950 const uint32_t *pPages;
1951 uint32_t cPages;
1952 uint8_t *pu8Vram;
1953 bool fIn;
1954 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1955 &pPages, &cPages,
1956 &pu8Vram, &fIn);
1957 if (i8Result < 0)
1958 {
1959 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1960 /* we need to break, not return, to ensure currently locked page is released */
1961 break;
1962 }
1963
1964 if (cbCurCmdTail & 3)
1965 {
1966 WARN(("command is not alligned properly %d", cbCurCmdTail));
1967 i8Result = -1;
1968 /* we need to break, not return, to ensure currently locked page is released */
1969 break;
1970 }
1971
1972 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1973 Assert(cCurPages < cPages);
1974
1975 do
1976 {
1977 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1978 if (!RT_SUCCESS(rc))
1979 {
1980 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1981 i8Result = -1;
1982 /* we need to break, not return, to ensure currently locked page is released */
1983 break;
1984 }
1985
1986 Assert(cPages >= cCurPages);
1987 cPages -= cCurPages;
1988
1989 if (!cPages)
1990 break;
1991
1992 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1993
1994 Assert(!(phCmd & PAGE_OFFSET_MASK));
1995
1996 phCmd += PAGE_SIZE;
1997 pu8Vram += (VBOXCMDVBVAOFFSET)cCurPages << PAGE_SHIFT;
1998
1999 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
2000 if (!RT_SUCCESS(rc))
2001 {
2002 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
2003 /* the page is not locked, return */
2004 return -1;
2005 }
2006
2007 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
2008 if (cCurPages > cPages)
2009 cCurPages = cPages;
2010 } while (1);
2011 break;
2012 }
2013 default:
2014 WARN(("command can not be splitted"));
2015 i8Result = -1;
2016 break;
2017 }
2018
2019 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2020 return i8Result;
2021 }
2022 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2023 {
2024 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
2025 ++pCmd;
2026 cbCmd -= sizeof (*pCmd);
2027 uint32_t cbCurCmd = 0;
2028 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
2029 {
2030 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2031 {
2032 WARN(("invalid command size"));
2033 return -1;
2034 }
2035
2036 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2037 if (cbCmd < cbCurCmd)
2038 {
2039 WARN(("invalid command size"));
2040 return -1;
2041 }
2042
2043 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2044 if (i8Result < 0)
2045 {
2046 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2047 return i8Result;
2048 }
2049 }
2050 return 0;
2051 }
2052 default:
2053 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2054 }
2055}
2056
2057static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2058{
2059 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2060 return;
2061
2062 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2063 {
2064 WARN(("invalid command size"));
2065 return;
2066 }
2067
2068 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2069
2070 /* check if the command is cancelled */
2071 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2072 {
2073 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2074 return;
2075 }
2076
2077 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2078}
2079
2080static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2081{
2082 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2083 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2084 int rc = VERR_NO_MEMORY;
2085 if (pCmd)
2086 {
2087 PVGASTATE pVGAState = pVdma->pVGAState;
2088 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2089 pCmd->cbVRam = pVGAState->vram_size;
2090 pCmd->pLed = &pVGAState->Led3D;
2091 pCmd->CrClientInfo.hClient = pVdma;
2092 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2093 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2094 if (RT_SUCCESS(rc))
2095 {
2096 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2097 if (RT_SUCCESS(rc))
2098 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2099 else if (rc != VERR_NOT_SUPPORTED)
2100 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2101 }
2102 else
2103 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2104
2105 vboxVDMACrCtlRelease(&pCmd->Hdr);
2106 }
2107
2108 if (!RT_SUCCESS(rc))
2109 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2110
2111 return rc;
2112}
2113
2114static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2115
2116/* check if this is external cmd to be passed to chromium backend */
2117static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2118{
2119 PVBOXVDMACMD pDmaCmd = NULL;
2120 uint32_t cbDmaCmd = 0;
2121 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2122 int rc = VINF_NOT_SUPPORTED;
2123
2124 cbDmaCmd = pCmdDr->cbBuf;
2125
2126 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2127 {
2128 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2129 {
2130 AssertMsgFailed(("invalid buffer data!"));
2131 return VERR_INVALID_PARAMETER;
2132 }
2133
2134 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2135 {
2136 AssertMsgFailed(("invalid command buffer data!"));
2137 return VERR_INVALID_PARAMETER;
2138 }
2139
2140 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2141 }
2142 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2143 {
2144 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2145 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2146 {
2147 AssertMsgFailed(("invalid command buffer data from offset!"));
2148 return VERR_INVALID_PARAMETER;
2149 }
2150 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2151 }
2152
2153 if (pDmaCmd)
2154 {
2155 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2156 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2157
2158 switch (pDmaCmd->enmType)
2159 {
2160 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2161 {
2162 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2163 if (cbBody < sizeof (*pCrCmd))
2164 {
2165 AssertMsgFailed(("invalid chromium command buffer size!"));
2166 return VERR_INVALID_PARAMETER;
2167 }
2168 PVGASTATE pVGAState = pVdma->pVGAState;
2169 rc = VINF_SUCCESS;
2170 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2171 {
2172 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2173 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2174 break;
2175 }
2176 else
2177 {
2178 Assert(0);
2179 }
2180
2181 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2182 AssertRC(tmpRc);
2183 break;
2184 }
2185 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2186 {
2187 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2188 if (cbBody < sizeof (*pTransfer))
2189 {
2190 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2191 return VERR_INVALID_PARAMETER;
2192 }
2193
2194 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2195 AssertRC(rc);
2196 if (RT_SUCCESS(rc))
2197 {
2198 pCmdDr->rc = VINF_SUCCESS;
2199 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2200 AssertRC(rc);
2201 rc = VINF_SUCCESS;
2202 }
2203 break;
2204 }
2205 default:
2206 break;
2207 }
2208 }
2209 return rc;
2210}
2211
2212int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2213{
2214 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2215 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2216 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2217 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2218 AssertRC(rc);
2219 pDr->rc = rc;
2220
2221 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2222 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2223 AssertRC(rc);
2224 return rc;
2225}
2226
2227int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2228{
2229 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2230 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2231 pCmdPrivate->rc = rc;
2232 if (pCmdPrivate->pfnCompletion)
2233 {
2234 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2235 }
2236 return VINF_SUCCESS;
2237}
2238
2239static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2240 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2241 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2242 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2243{
2244 /* we do not support color conversion */
2245 Assert(pDstDesc->format == pSrcDesc->format);
2246 /* we do not support stretching */
2247 Assert(pDstRectl->height == pSrcRectl->height);
2248 Assert(pDstRectl->width == pSrcRectl->width);
2249 if (pDstDesc->format != pSrcDesc->format)
2250 return VERR_INVALID_FUNCTION;
2251 if (pDstDesc->width == pDstRectl->width
2252 && pSrcDesc->width == pSrcRectl->width
2253 && pSrcDesc->width == pDstDesc->width)
2254 {
2255 Assert(!pDstRectl->left);
2256 Assert(!pSrcRectl->left);
2257 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2258 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2259 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2260 }
2261 else
2262 {
2263 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2264 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2265 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2266 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2267 Assert(cbDstLine <= pDstDesc->pitch);
2268 uint32_t cbDstSkip = pDstDesc->pitch;
2269 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2270
2271 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2272 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2273 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2274 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2275 Assert(cbSrcLine <= pSrcDesc->pitch);
2276 uint32_t cbSrcSkip = pSrcDesc->pitch;
2277 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2278
2279 Assert(cbDstLine == cbSrcLine);
2280
2281 for (uint32_t i = 0; ; ++i)
2282 {
2283 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2284 if (i == pDstRectl->height)
2285 break;
2286 pvDstStart += cbDstSkip;
2287 pvSrcStart += cbSrcSkip;
2288 }
2289 }
2290 return VINF_SUCCESS;
2291}
2292
2293static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2294{
2295 if (!pRectl1->width)
2296 *pRectl1 = *pRectl2;
2297 else
2298 {
2299 int16_t x21 = pRectl1->left + pRectl1->width;
2300 int16_t x22 = pRectl2->left + pRectl2->width;
2301 if (pRectl1->left > pRectl2->left)
2302 {
2303 pRectl1->left = pRectl2->left;
2304 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2305 }
2306 else if (x21 < x22)
2307 pRectl1->width = x22 - pRectl1->left;
2308
2309 x21 = pRectl1->top + pRectl1->height;
2310 x22 = pRectl2->top + pRectl2->height;
2311 if (pRectl1->top > pRectl2->top)
2312 {
2313 pRectl1->top = pRectl2->top;
2314 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2315 }
2316 else if (x21 < x22)
2317 pRectl1->height = x22 - pRectl1->top;
2318 }
2319}
2320
2321/*
2322 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2323 */
2324static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2325{
2326 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2327 Assert(cbBlt <= cbBuffer);
2328 if (cbBuffer < cbBlt)
2329 return VERR_INVALID_FUNCTION;
2330
2331 /* we do not support stretching for now */
2332 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2333 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2334 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2335 return VERR_INVALID_FUNCTION;
2336 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2337 return VERR_INVALID_FUNCTION;
2338 Assert(pBlt->cDstSubRects);
2339
2340 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2341 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2342
2343 if (pBlt->cDstSubRects)
2344 {
2345 VBOXVDMA_RECTL dstRectl, srcRectl;
2346 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2347 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2348 {
2349 pDstRectl = &pBlt->aDstSubRects[i];
2350 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2351 {
2352 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2353 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2354 dstRectl.width = pDstRectl->width;
2355 dstRectl.height = pDstRectl->height;
2356 pDstRectl = &dstRectl;
2357 }
2358
2359 pSrcRectl = &pBlt->aDstSubRects[i];
2360 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2361 {
2362 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2363 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2364 srcRectl.width = pSrcRectl->width;
2365 srcRectl.height = pSrcRectl->height;
2366 pSrcRectl = &srcRectl;
2367 }
2368
2369 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2370 &pBlt->dstDesc, &pBlt->srcDesc,
2371 pDstRectl,
2372 pSrcRectl);
2373 AssertRC(rc);
2374 if (!RT_SUCCESS(rc))
2375 return rc;
2376
2377 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2378 }
2379 }
2380 else
2381 {
2382 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2383 &pBlt->dstDesc, &pBlt->srcDesc,
2384 &pBlt->dstRectl,
2385 &pBlt->srcRectl);
2386 AssertRC(rc);
2387 if (!RT_SUCCESS(rc))
2388 return rc;
2389
2390 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2391 }
2392
2393 return cbBlt;
2394}
2395
2396static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2397{
2398 if (cbBuffer < sizeof (*pTransfer))
2399 return VERR_INVALID_PARAMETER;
2400
2401 PVGASTATE pVGAState = pVdma->pVGAState;
2402 uint8_t * pvRam = pVGAState->vram_ptrR3;
2403 PGMPAGEMAPLOCK SrcLock;
2404 PGMPAGEMAPLOCK DstLock;
2405 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2406 const void * pvSrc;
2407 void * pvDst;
2408 int rc = VINF_SUCCESS;
2409 uint32_t cbTransfer = pTransfer->cbTransferSize;
2410 uint32_t cbTransfered = 0;
2411 bool bSrcLocked = false;
2412 bool bDstLocked = false;
2413 do
2414 {
2415 uint32_t cbSubTransfer = cbTransfer;
2416 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2417 {
2418 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2419 }
2420 else
2421 {
2422 RTGCPHYS phPage = pTransfer->Src.phBuf;
2423 phPage += cbTransfered;
2424 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2425 AssertRC(rc);
2426 if (RT_SUCCESS(rc))
2427 {
2428 bSrcLocked = true;
2429 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2430 }
2431 else
2432 {
2433 break;
2434 }
2435 }
2436
2437 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2438 {
2439 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2440 }
2441 else
2442 {
2443 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2444 phPage += cbTransfered;
2445 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2446 AssertRC(rc);
2447 if (RT_SUCCESS(rc))
2448 {
2449 bDstLocked = true;
2450 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2451 }
2452 else
2453 {
2454 break;
2455 }
2456 }
2457
2458 if (RT_SUCCESS(rc))
2459 {
2460 memcpy(pvDst, pvSrc, cbSubTransfer);
2461 cbTransfer -= cbSubTransfer;
2462 cbTransfered += cbSubTransfer;
2463 }
2464 else
2465 {
2466 cbTransfer = 0; /* to break */
2467 }
2468
2469 if (bSrcLocked)
2470 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2471 if (bDstLocked)
2472 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2473 } while (cbTransfer);
2474
2475 if (RT_SUCCESS(rc))
2476 return sizeof (*pTransfer);
2477 return rc;
2478}
2479
2480static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2481{
2482 do
2483 {
2484 Assert(pvBuffer);
2485 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2486
2487 if (!pvBuffer)
2488 return VERR_INVALID_PARAMETER;
2489 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2490 return VERR_INVALID_PARAMETER;
2491
2492 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2493 uint32_t cbCmd = 0;
2494 switch (pCmd->enmType)
2495 {
2496 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2497 {
2498#ifdef VBOXWDDM_TEST_UHGSMI
2499 static int count = 0;
2500 static uint64_t start, end;
2501 if (count==0)
2502 {
2503 start = RTTimeNanoTS();
2504 }
2505 ++count;
2506 if (count==100000)
2507 {
2508 end = RTTimeNanoTS();
2509 float ems = (end-start)/1000000.f;
2510 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2511 }
2512#endif
2513 /* todo: post the buffer to chromium */
2514 return VINF_SUCCESS;
2515 }
2516 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2517 {
2518 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2519 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2520 Assert(cbBlt >= 0);
2521 Assert((uint32_t)cbBlt <= cbBuffer);
2522 if (cbBlt >= 0)
2523 {
2524 if ((uint32_t)cbBlt == cbBuffer)
2525 return VINF_SUCCESS;
2526 else
2527 {
2528 cbBuffer -= (uint32_t)cbBlt;
2529 pvBuffer -= cbBlt;
2530 }
2531 }
2532 else
2533 return cbBlt; /* error */
2534 break;
2535 }
2536 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2537 {
2538 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2539 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2540 Assert(cbTransfer >= 0);
2541 Assert((uint32_t)cbTransfer <= cbBuffer);
2542 if (cbTransfer >= 0)
2543 {
2544 if ((uint32_t)cbTransfer == cbBuffer)
2545 return VINF_SUCCESS;
2546 else
2547 {
2548 cbBuffer -= (uint32_t)cbTransfer;
2549 pvBuffer -= cbTransfer;
2550 }
2551 }
2552 else
2553 return cbTransfer; /* error */
2554 break;
2555 }
2556 case VBOXVDMACMD_TYPE_DMA_NOP:
2557 return VINF_SUCCESS;
2558 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2559 return VINF_SUCCESS;
2560 default:
2561 AssertBreakpoint();
2562 return VERR_INVALID_FUNCTION;
2563 }
2564 } while (1);
2565
2566 /* we should not be here */
2567 AssertBreakpoint();
2568 return VERR_INVALID_STATE;
2569}
2570
2571static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2572{
2573 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2574 PVGASTATE pVGAState = pVdma->pVGAState;
2575 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2576 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2577 uint8_t *pCmd;
2578 uint32_t cbCmd;
2579 int rc;
2580
2581 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2582
2583 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2584 {
2585 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2586 switch (enmType)
2587 {
2588 case VBVAEXHOST_DATA_TYPE_CMD:
2589 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2590 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2591 VBVARaiseIrqNoWait(pVGAState, 0);
2592 break;
2593 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2594 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2595 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2596 break;
2597 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2598 {
2599 bool fContinue = true;
2600 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2601 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2602 if (fContinue)
2603 break;
2604 }
2605 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2606 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2607 AssertRC(rc);
2608 break;
2609 default:
2610 WARN(("unexpected type %d\n", enmType));
2611 break;
2612 }
2613 }
2614
2615 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2616
2617 return VINF_SUCCESS;
2618}
2619
2620static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2621{
2622 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2623 const uint8_t * pvBuf;
2624 PGMPAGEMAPLOCK Lock;
2625 int rc;
2626 bool bReleaseLocked = false;
2627
2628 do
2629 {
2630 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2631
2632 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2633 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2634 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2635 {
2636 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2637 pvBuf = pvRam + pCmd->Location.offVramBuf;
2638 }
2639 else
2640 {
2641 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2642 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2643 Assert(offset + pCmd->cbBuf <= 0x1000);
2644 if (offset + pCmd->cbBuf > 0x1000)
2645 {
2646 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2647 rc = VERR_INVALID_PARAMETER;
2648 break;
2649 }
2650
2651 const void * pvPageBuf;
2652 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2653 AssertRC(rc);
2654 if (!RT_SUCCESS(rc))
2655 {
2656 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2657 break;
2658 }
2659
2660 pvBuf = (const uint8_t *)pvPageBuf;
2661 pvBuf += offset;
2662
2663 bReleaseLocked = true;
2664 }
2665
2666 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2667 AssertRC(rc);
2668
2669 if (bReleaseLocked)
2670 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2671 } while (0);
2672
2673 pCmd->rc = rc;
2674
2675 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2676 AssertRC(rc);
2677}
2678
2679static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2680{
2681 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2682 pCmd->i32Result = VINF_SUCCESS;
2683 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2684 AssertRC(rc);
2685}
2686
2687#endif /* #ifdef VBOX_WITH_CRHGSMI */
2688
2689#ifdef VBOX_VDMA_WITH_WATCHDOG
2690static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2691{
2692 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2693 PVGASTATE pVGAState = pVdma->pVGAState;
2694 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2695}
2696
2697static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2698{
2699 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2700 if (cMillis)
2701 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2702 else
2703 TMTimerStop(pVdma->WatchDogTimer);
2704 return VINF_SUCCESS;
2705}
2706#endif
2707
2708int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2709{
2710 int rc;
2711 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2712 Assert(pVdma);
2713 if (pVdma)
2714 {
2715 pVdma->pHgsmi = pVGAState->pHGSMI;
2716 pVdma->pVGAState = pVGAState;
2717
2718#ifdef VBOX_VDMA_WITH_WATCHDOG
2719 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2720 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2721 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2722 AssertRC(rc);
2723#endif
2724
2725#ifdef VBOX_WITH_CRHGSMI
2726 VBoxVDMAThreadInit(&pVdma->Thread);
2727
2728 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2729 if (RT_SUCCESS(rc))
2730 {
2731 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2732 if (RT_SUCCESS(rc))
2733 {
2734 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2735 if (RT_SUCCESS(rc))
2736 {
2737 pVGAState->pVdma = pVdma;
2738 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2739 return VINF_SUCCESS;
2740
2741 RTCritSectDelete(&pVdma->CalloutCritSect);
2742 }
2743 else
2744 WARN(("RTCritSectInit failed %d\n", rc));
2745
2746 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2747 }
2748 else
2749 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2750
2751 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2752 }
2753 else
2754 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2755
2756
2757 RTMemFree(pVdma);
2758#else
2759 pVGAState->pVdma = pVdma;
2760 return VINF_SUCCESS;
2761#endif
2762 }
2763 else
2764 rc = VERR_OUT_OF_RESOURCES;
2765
2766 return rc;
2767}
2768
2769int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2770{
2771#ifdef VBOX_WITH_CRHGSMI
2772 vdmaVBVACtlDisableSync(pVdma);
2773#endif
2774 return VINF_SUCCESS;
2775}
2776
2777int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2778{
2779 if (!pVdma)
2780 return VINF_SUCCESS;
2781#ifdef VBOX_WITH_CRHGSMI
2782 vdmaVBVACtlDisableSync(pVdma);
2783 VBoxVDMAThreadCleanup(&pVdma->Thread);
2784 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2785 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2786 RTCritSectDelete(&pVdma->CalloutCritSect);
2787#endif
2788 RTMemFree(pVdma);
2789 return VINF_SUCCESS;
2790}
2791
2792void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2793{
2794 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2795
2796 switch (pCmd->enmCtl)
2797 {
2798 case VBOXVDMA_CTL_TYPE_ENABLE:
2799 pCmd->i32Result = VINF_SUCCESS;
2800 break;
2801 case VBOXVDMA_CTL_TYPE_DISABLE:
2802 pCmd->i32Result = VINF_SUCCESS;
2803 break;
2804 case VBOXVDMA_CTL_TYPE_FLUSH:
2805 pCmd->i32Result = VINF_SUCCESS;
2806 break;
2807#ifdef VBOX_VDMA_WITH_WATCHDOG
2808 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2809 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2810 break;
2811#endif
2812 default:
2813 WARN(("cmd not supported"));
2814 pCmd->i32Result = VERR_NOT_SUPPORTED;
2815 }
2816
2817 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2818 AssertRC(rc);
2819}
2820
2821void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2822{
2823 int rc = VERR_NOT_IMPLEMENTED;
2824
2825#ifdef VBOX_WITH_CRHGSMI
2826 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2827 * this is why we process them specially */
2828 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2829 if (rc == VINF_SUCCESS)
2830 return;
2831
2832 if (RT_FAILURE(rc))
2833 {
2834 pCmd->rc = rc;
2835 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2836 AssertRC(rc);
2837 return;
2838 }
2839
2840 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2841#else
2842 pCmd->rc = rc;
2843 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2844 AssertRC(rc);
2845#endif
2846}
2847
2848/**/
2849#ifdef VBOX_WITH_CRHGSMI
2850
2851static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2852
2853static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2854{
2855 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2856 if (RT_SUCCESS(rc))
2857 {
2858 if (rc == VINF_SUCCESS)
2859 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2860 else
2861 Assert(rc == VINF_ALREADY_INITIALIZED);
2862 }
2863 else
2864 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2865
2866 return rc;
2867}
2868
2869static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2870{
2871 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2872 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2873 AssertRC(rc);
2874 pGCtl->i32Result = rc;
2875
2876 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2877 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2878 AssertRC(rc);
2879
2880 VBoxVBVAExHCtlFree(pVbva, pCtl);
2881}
2882
2883static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2884{
2885 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2886 if (!pHCtl)
2887 {
2888 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2889 return VERR_NO_MEMORY;
2890 }
2891
2892 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2893 pHCtl->u.cmd.cbCmd = cbCmd;
2894 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2895 if (RT_FAILURE(rc))
2896 {
2897 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2898 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2899 return rc;;
2900 }
2901 return VINF_SUCCESS;
2902}
2903
2904static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2905{
2906 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2907 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2908 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2909 if (RT_SUCCESS(rc))
2910 return VINF_SUCCESS;
2911
2912 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2913 pCtl->i32Result = rc;
2914 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2915 AssertRC(rc);
2916 return VINF_SUCCESS;
2917}
2918
2919static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2920{
2921 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2922 if (pVboxCtl->u.pfnInternal)
2923 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2924 VBoxVBVAExHCtlFree(pVbva, pCtl);
2925}
2926
2927static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2928 PFNCRCTLCOMPLETION pfnCompletion,
2929 void *pvCompletion)
2930{
2931 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2932 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2933 if (RT_FAILURE(rc))
2934 {
2935 if (rc == VERR_INVALID_STATE)
2936 {
2937 pCmd->u.pfnInternal = NULL;
2938 PVGASTATE pVGAState = pVdma->pVGAState;
2939 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2940 if (!RT_SUCCESS(rc))
2941 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2942
2943 return rc;
2944 }
2945 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2946 return rc;
2947 }
2948
2949 return VINF_SUCCESS;
2950}
2951
2952static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2953{
2954 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2955 {
2956 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2957 if (!RT_SUCCESS(rc))
2958 {
2959 WARN(("pfnVBVAEnable failed %d\n", rc));
2960 for (uint32_t j = 0; j < i; j++)
2961 {
2962 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2963 }
2964
2965 return rc;
2966 }
2967 }
2968 return VINF_SUCCESS;
2969}
2970
2971static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2972{
2973 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2974 {
2975 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2976 }
2977 return VINF_SUCCESS;
2978}
2979
2980static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2981{
2982 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2983 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2984
2985 if (RT_SUCCESS(rc))
2986 {
2987 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2988 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2989 if (rc == VINF_SUCCESS)
2990 {
2991 /* we need to inform Main about VBVA enable/disable
2992 * main expects notifications to be done from the main thread
2993 * submit it there */
2994 PVGASTATE pVGAState = pVdma->pVGAState;
2995
2996 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2997 vdmaVBVANotifyEnable(pVGAState);
2998 else
2999 vdmaVBVANotifyDisable(pVGAState);
3000 }
3001 else if (RT_FAILURE(rc))
3002 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3003 }
3004 else
3005 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3006
3007 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3008}
3009
3010static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3011{
3012 int rc;
3013 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3014 if (pHCtl)
3015 {
3016 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3017 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3018 pHCtl->pfnComplete = pfnComplete;
3019 pHCtl->pvComplete = pvComplete;
3020
3021 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3022 if (RT_SUCCESS(rc))
3023 return VINF_SUCCESS;
3024 else
3025 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3026
3027 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3028 }
3029 else
3030 {
3031 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3032 rc = VERR_NO_MEMORY;
3033 }
3034
3035 return rc;
3036}
3037
3038static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3039{
3040 VBVAENABLE Enable = {0};
3041 Enable.u32Flags = VBVA_F_ENABLE;
3042 Enable.u32Offset = offVram;
3043
3044 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3045 Data.rc = VERR_NOT_IMPLEMENTED;
3046 int rc = RTSemEventCreate(&Data.hEvent);
3047 if (!RT_SUCCESS(rc))
3048 {
3049 WARN(("RTSemEventCreate failed %d\n", rc));
3050 return rc;
3051 }
3052
3053 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3054 if (RT_SUCCESS(rc))
3055 {
3056 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3057 if (RT_SUCCESS(rc))
3058 {
3059 rc = Data.rc;
3060 if (!RT_SUCCESS(rc))
3061 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3062 }
3063 else
3064 WARN(("RTSemEventWait failed %d\n", rc));
3065 }
3066 else
3067 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3068
3069 RTSemEventDestroy(Data.hEvent);
3070
3071 return rc;
3072}
3073
3074static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3075{
3076 int rc;
3077 VBVAEXHOSTCTL* pHCtl;
3078 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3079 {
3080 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3081 return VINF_SUCCESS;
3082 }
3083
3084 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3085 if (!pHCtl)
3086 {
3087 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3088 return VERR_NO_MEMORY;
3089 }
3090
3091 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3092 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3093 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3094 if (RT_SUCCESS(rc))
3095 return VINF_SUCCESS;
3096
3097 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3098 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3099 return rc;
3100}
3101
3102static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3103{
3104 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3105 if (fEnable)
3106 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3107 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3108}
3109
3110static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3111{
3112 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3113 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3114 if (RT_SUCCESS(rc))
3115 return VINF_SUCCESS;
3116
3117 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3118 pEnable->Hdr.i32Result = rc;
3119 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3120 AssertRC(rc);
3121 return VINF_SUCCESS;
3122}
3123
3124static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
3125{
3126 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3127 pData->rc = rc;
3128 rc = RTSemEventSignal(pData->hEvent);
3129 if (!RT_SUCCESS(rc))
3130 WARN(("RTSemEventSignal failed %d\n", rc));
3131}
3132
3133static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3134{
3135 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3136 Data.rc = VERR_NOT_IMPLEMENTED;
3137 int rc = RTSemEventCreate(&Data.hEvent);
3138 if (!RT_SUCCESS(rc))
3139 {
3140 WARN(("RTSemEventCreate failed %d\n", rc));
3141 return rc;
3142 }
3143
3144 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3145 if (RT_SUCCESS(rc))
3146 {
3147 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3148 if (RT_SUCCESS(rc))
3149 {
3150 rc = Data.rc;
3151 if (!RT_SUCCESS(rc))
3152 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3153 }
3154 else
3155 WARN(("RTSemEventWait failed %d\n", rc));
3156 }
3157 else
3158 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3159
3160 RTSemEventDestroy(Data.hEvent);
3161
3162 return rc;
3163}
3164
3165static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3166{
3167 VBVAEXHOSTCTL Ctl;
3168 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3169 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3170}
3171
3172static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3173{
3174 VBVAEXHOSTCTL Ctl;
3175 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3176 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3177}
3178
3179static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3180{
3181 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3182 switch (rc)
3183 {
3184 case VINF_SUCCESS:
3185 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3186 case VINF_ALREADY_INITIALIZED:
3187 case VINF_EOF:
3188 case VERR_INVALID_STATE:
3189 return VINF_SUCCESS;
3190 default:
3191 Assert(!RT_FAILURE(rc));
3192 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3193 }
3194}
3195
3196
3197int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3198 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3199 PFNCRCTLCOMPLETION pfnCompletion,
3200 void *pvCompletion)
3201{
3202 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3203 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3204 pCmd->CalloutList.List.pNext = NULL;
3205 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3206}
3207
3208typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3209{
3210 struct VBOXVDMAHOST *pVdma;
3211 uint32_t fProcessing;
3212 int rc;
3213} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3214
3215static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3216{
3217 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3218
3219 pData->rc = rc;
3220
3221 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3222
3223 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3224
3225 pData->fProcessing = 0;
3226
3227 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3228}
3229
3230static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3231{
3232 pEntry->pfnCb = pfnCb;
3233 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3234 if (RT_SUCCESS(rc))
3235 {
3236 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3237 RTCritSectLeave(&pVdma->CalloutCritSect);
3238
3239 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3240 }
3241 else
3242 WARN(("RTCritSectEnter failed %d\n", rc));
3243
3244 return rc;
3245}
3246
3247
3248static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3249{
3250 int rc = VINF_SUCCESS;
3251 for(;;)
3252 {
3253 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3254 if (RT_SUCCESS(rc))
3255 {
3256 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3257 if (pEntry)
3258 RTListNodeRemove(&pEntry->Node);
3259 RTCritSectLeave(&pVdma->CalloutCritSect);
3260
3261 if (!pEntry)
3262 break;
3263
3264 pEntry->pfnCb(pEntry);
3265 }
3266 else
3267 {
3268 WARN(("RTCritSectEnter failed %d\n", rc));
3269 break;
3270 }
3271 }
3272
3273 return rc;
3274}
3275
3276int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3277 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3278{
3279 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3280 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3281 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3282 Data.pVdma = pVdma;
3283 Data.fProcessing = 1;
3284 Data.rc = VERR_INTERNAL_ERROR;
3285 RTListInit(&pCmd->CalloutList.List);
3286 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3287 if (!RT_SUCCESS(rc))
3288 {
3289 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3290 return rc;
3291 }
3292
3293 while (Data.fProcessing)
3294 {
3295 /* Poll infrequently to make sure no completed message has been missed. */
3296 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3297
3298 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3299
3300 if (Data.fProcessing)
3301 RTThreadYield();
3302 }
3303
3304 /* extra check callouts */
3305 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3306
3307 /* 'Our' message has been processed, so should reset the semaphore.
3308 * There is still possible that another message has been processed
3309 * and the semaphore has been signalled again.
3310 * Reset only if there are no other messages completed.
3311 */
3312 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3313 Assert(c >= 0);
3314 if (!c)
3315 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3316
3317 rc = Data.rc;
3318 if (!RT_SUCCESS(rc))
3319 WARN(("host call failed %d", rc));
3320
3321 return rc;
3322}
3323
3324int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3325{
3326 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3327 int rc = VINF_SUCCESS;
3328 switch (pCtl->u32Type)
3329 {
3330 case VBOXCMDVBVACTL_TYPE_3DCTL:
3331 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3332 case VBOXCMDVBVACTL_TYPE_RESIZE:
3333 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3334 case VBOXCMDVBVACTL_TYPE_ENABLE:
3335 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3336 {
3337 WARN(("incorrect enable size\n"));
3338 rc = VERR_INVALID_PARAMETER;
3339 break;
3340 }
3341 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3342 default:
3343 WARN(("unsupported type\n"));
3344 rc = VERR_INVALID_PARAMETER;
3345 break;
3346 }
3347
3348 pCtl->i32Result = rc;
3349 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3350 AssertRC(rc);
3351 return VINF_SUCCESS;
3352}
3353
3354int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3355{
3356 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3357 {
3358 WARN(("vdma VBVA is disabled\n"));
3359 return VERR_INVALID_STATE;
3360 }
3361
3362 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3363}
3364
3365int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3366{
3367 WARN(("flush\n"));
3368 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3369 {
3370 WARN(("vdma VBVA is disabled\n"));
3371 return VERR_INVALID_STATE;
3372 }
3373 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3374}
3375
3376void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3377{
3378 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3379 return;
3380 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3381}
3382
3383bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3384{
3385 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3386}
3387#endif
3388
3389int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3390{
3391#ifdef VBOX_WITH_CRHGSMI
3392 int rc = vdmaVBVAPause(pVdma);
3393 if (RT_SUCCESS(rc))
3394 return VINF_SUCCESS;
3395
3396 if (rc != VERR_INVALID_STATE)
3397 {
3398 WARN(("vdmaVBVAPause failed %d\n", rc));
3399 return rc;
3400 }
3401
3402#ifdef DEBUG_misha
3403 WARN(("debug prep"));
3404#endif
3405
3406 PVGASTATE pVGAState = pVdma->pVGAState;
3407 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3408 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3409 Assert(pCmd);
3410 if (pCmd)
3411 {
3412 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3413 AssertRC(rc);
3414 if (RT_SUCCESS(rc))
3415 {
3416 rc = vboxVDMACrCtlGetRc(pCmd);
3417 }
3418 vboxVDMACrCtlRelease(pCmd);
3419 return rc;
3420 }
3421 return VERR_NO_MEMORY;
3422#else
3423 return VINF_SUCCESS;
3424#endif
3425}
3426
3427int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3428{
3429#ifdef VBOX_WITH_CRHGSMI
3430 int rc = vdmaVBVAResume(pVdma);
3431 if (RT_SUCCESS(rc))
3432 return VINF_SUCCESS;
3433
3434 if (rc != VERR_INVALID_STATE)
3435 {
3436 WARN(("vdmaVBVAResume failed %d\n", rc));
3437 return rc;
3438 }
3439
3440#ifdef DEBUG_misha
3441 WARN(("debug done"));
3442#endif
3443
3444 PVGASTATE pVGAState = pVdma->pVGAState;
3445 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3446 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3447 Assert(pCmd);
3448 if (pCmd)
3449 {
3450 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3451 AssertRC(rc);
3452 if (RT_SUCCESS(rc))
3453 {
3454 rc = vboxVDMACrCtlGetRc(pCmd);
3455 }
3456 vboxVDMACrCtlRelease(pCmd);
3457 return rc;
3458 }
3459 return VERR_NO_MEMORY;
3460#else
3461 return VINF_SUCCESS;
3462#endif
3463}
3464
3465int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3466{
3467 int rc;
3468
3469#ifdef VBOX_WITH_CRHGSMI
3470 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3471#endif
3472 {
3473 rc = SSMR3PutU32(pSSM, 0xffffffff);
3474 AssertRCReturn(rc, rc);
3475 return VINF_SUCCESS;
3476 }
3477
3478#ifdef VBOX_WITH_CRHGSMI
3479 PVGASTATE pVGAState = pVdma->pVGAState;
3480 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3481
3482 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3483 AssertRCReturn(rc, rc);
3484
3485 VBVAEXHOSTCTL HCtl;
3486 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3487 HCtl.u.state.pSSM = pSSM;
3488 HCtl.u.state.u32Version = 0;
3489 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3490#endif
3491}
3492
3493int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3494{
3495 uint32_t u32;
3496 int rc = SSMR3GetU32(pSSM, &u32);
3497 AssertRCReturn(rc, rc);
3498
3499 if (u32 != 0xffffffff)
3500 {
3501#ifdef VBOX_WITH_CRHGSMI
3502 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3503 AssertRCReturn(rc, rc);
3504
3505 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3506
3507 VBVAEXHOSTCTL HCtl;
3508 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3509 HCtl.u.state.pSSM = pSSM;
3510 HCtl.u.state.u32Version = u32Version;
3511 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3512 AssertRCReturn(rc, rc);
3513
3514 rc = vdmaVBVAResume(pVdma);
3515 AssertRCReturn(rc, rc);
3516
3517 return VINF_SUCCESS;
3518#else
3519 WARN(("Unsupported VBVACtl info!\n"));
3520 return VERR_VERSION_MISMATCH;
3521#endif
3522 }
3523
3524 return VINF_SUCCESS;
3525}
3526
3527int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3528{
3529#ifdef VBOX_WITH_CRHGSMI
3530 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3531 return VINF_SUCCESS;
3532
3533/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3534 * the purpose of this code is. */
3535 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3536 if (!pHCtl)
3537 {
3538 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3539 return VERR_NO_MEMORY;
3540 }
3541
3542 /* sanity */
3543 pHCtl->u.cmd.pu8Cmd = NULL;
3544 pHCtl->u.cmd.cbCmd = 0;
3545
3546 /* NULL completion will just free the ctl up */
3547 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3548 if (RT_FAILURE(rc))
3549 {
3550 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3551 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3552 return rc;
3553 }
3554#endif
3555 return VINF_SUCCESS;
3556}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette