VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 51013

Last change on this file since 51013 was 51013, checked in by vboxsync, 11 years ago

Main/crOpenGL/DevVGA: synchronization, bugfixes, cleanup

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 97.4 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16//#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATING 1
52#define VBOXVDMATHREAD_STATE_CREATED 3
53#define VBOXVDMATHREAD_STATE_TERMINATING 4
54
55struct VBOXVDMATHREAD;
56
57typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
58
59typedef struct VBOXVDMATHREAD
60{
61 RTTHREAD hWorkerThread;
62 RTSEMEVENT hEvent;
63 volatile uint32_t u32State;
64 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
65 void *pvChanged;
66} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
67
68
69/* state transformations:
70 *
71 * submitter | processor
72 *
73 * LISTENING ---> PROCESSING
74 *
75 * */
76#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
77#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
78
79#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
80#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
81#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
82
83typedef struct VBVAEXHOSTCONTEXT
84{
85 VBVABUFFER *pVBVA;
86 volatile int32_t i32State;
87 volatile int32_t i32EnableState;
88 volatile uint32_t u32cCtls;
89 /* critical section for accessing ctl lists */
90 RTCRITSECT CltCritSect;
91 RTLISTANCHOR GuestCtlList;
92 RTLISTANCHOR HostCtlList;
93#ifndef VBOXVDBG_MEMCACHE_DISABLE
94 RTMEMCACHE CtlCache;
95#endif
96} VBVAEXHOSTCONTEXT;
97
98typedef enum
99{
100 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
101 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
102 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
103 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
104 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
105 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
106 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
107 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
108 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
109 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
110 VBVAEXHOSTCTL_TYPE_GH_MIN = VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
111 VBVAEXHOSTCTL_TYPE_GH_MAX = VBVAEXHOSTCTL_TYPE_GHH_DISABLE
112} VBVAEXHOSTCTL_TYPE;
113
114struct VBVAEXHOSTCTL;
115
116typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
117
118typedef struct VBVAEXHOSTCTL
119{
120 RTLISTNODE Node;
121 VBVAEXHOSTCTL_TYPE enmType;
122 union
123 {
124 struct
125 {
126 uint8_t * pu8Cmd;
127 uint32_t cbCmd;
128 } cmd;
129
130 struct
131 {
132 PSSMHANDLE pSSM;
133 uint32_t u32Version;
134 } state;
135 } u;
136 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
137 void *pvComplete;
138} VBVAEXHOSTCTL;
139
140/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
141 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
142 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
143 * see mor edetailed comments in headers for function definitions */
144typedef enum
145{
146 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
147 VBVAEXHOST_DATA_TYPE_CMD,
148 VBVAEXHOST_DATA_TYPE_HOSTCTL,
149 VBVAEXHOST_DATA_TYPE_GUESTCTL
150} VBVAEXHOST_DATA_TYPE;
151static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
152
153static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
154static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
155
156/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
157 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
158static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
159
160static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
161static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
162static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
163static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
164static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
165static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
166
167static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
168{
169#ifndef VBOXVDBG_MEMCACHE_DISABLE
170 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
171#else
172 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
173#endif
174}
175
176static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
177{
178#ifndef VBOXVDBG_MEMCACHE_DISABLE
179 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
180#else
181 RTMemFree(pCtl);
182#endif
183}
184
185static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
186{
187 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
188 if (!pCtl)
189 {
190 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
191 return NULL;
192 }
193
194 pCtl->enmType = enmType;
195 return pCtl;
196}
197
198static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
199{
200 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
201
202 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
203 return VINF_SUCCESS;
204 return VERR_SEM_BUSY;
205}
206
207static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
208{
209 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
210
211 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
212 return NULL;
213
214 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
215 if (RT_SUCCESS(rc))
216 {
217 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
218 if (pCtl)
219 *pfHostCtl = true;
220 else if (!fHostOnlyMode)
221 {
222 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
223 {
224 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
225 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
226 * and there are no HostCtl commands*/
227 Assert(pCtl);
228 *pfHostCtl = false;
229 }
230 }
231
232 if (pCtl)
233 {
234 RTListNodeRemove(&pCtl->Node);
235 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
236 }
237
238 RTCritSectLeave(&pCmdVbva->CltCritSect);
239
240 return pCtl;
241 }
242 else
243 WARN(("RTCritSectEnter failed %d\n", rc));
244
245 return NULL;
246}
247
248static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
249{
250 bool fHostCtl = false;
251 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
252 Assert(!pCtl || fHostCtl);
253 return pCtl;
254}
255
256static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
257{
258 switch (pCtl->enmType)
259 {
260 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
261 if (pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
262 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
263 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
264 return true;
265 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
266 if (pCmdVbva->i32EnableState == VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
267 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
268 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
269 return true;
270 default:
271 return false;
272 }
273}
274
275static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
276{
277 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
278
279 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
280}
281
282static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
283{
284 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
285 if (pCmdVbva->pVBVA)
286 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
287}
288
289static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
290{
291 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
292 if (pCmdVbva->pVBVA)
293 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
294}
295
296static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
297{
298 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
299 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
300
301 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
302
303 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
304 uint32_t indexRecordFree = pVBVA->indexRecordFree;
305
306 Log(("first = %d, free = %d\n",
307 indexRecordFirst, indexRecordFree));
308
309 if (indexRecordFirst == indexRecordFree)
310 {
311 /* No records to process. Return without assigning output variables. */
312 return VINF_EOF;
313 }
314
315 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
316
317 /* A new record need to be processed. */
318 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
319 {
320 /* the record is being recorded, try again */
321 return VINF_TRY_AGAIN;
322 }
323
324 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
325
326 if (!cbRecord)
327 {
328 /* the record is being recorded, try again */
329 return VINF_TRY_AGAIN;
330 }
331
332 /* we should not get partial commands here actually */
333 Assert(cbRecord);
334
335 /* The size of largest contiguous chunk in the ring biffer. */
336 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
337
338 /* The pointer to data in the ring buffer. */
339 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
340
341 /* Fetch or point the data. */
342 if (u32BytesTillBoundary >= cbRecord)
343 {
344 /* The command does not cross buffer boundary. Return address in the buffer. */
345 *ppCmd = pSrc;
346 *pcbCmd = cbRecord;
347 return VINF_SUCCESS;
348 }
349
350 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
351 return VERR_INVALID_STATE;
352}
353
354static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
355{
356 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
357 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
358
359 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
360}
361
362static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
363{
364 if (pCtl->pfnComplete)
365 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
366 else
367 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
368}
369
370static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
371{
372 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
373 VBVAEXHOSTCTL*pCtl;
374 bool fHostClt;
375
376 for(;;)
377 {
378 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
379 if (pCtl)
380 {
381 if (fHostClt)
382 {
383 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
384 {
385 *ppCmd = (uint8_t*)pCtl;
386 *pcbCmd = sizeof (*pCtl);
387 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
388 }
389 continue;
390 }
391 else
392 {
393 *ppCmd = (uint8_t*)pCtl;
394 *pcbCmd = sizeof (*pCtl);
395 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
396 }
397 }
398
399 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
400 return VBVAEXHOST_DATA_TYPE_NO_DATA;
401
402 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
403 switch (rc)
404 {
405 case VINF_SUCCESS:
406 return VBVAEXHOST_DATA_TYPE_CMD;
407 case VINF_EOF:
408 return VBVAEXHOST_DATA_TYPE_NO_DATA;
409 case VINF_TRY_AGAIN:
410 RTThreadSleep(1);
411 continue;
412 default:
413 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
414 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
415 return VBVAEXHOST_DATA_TYPE_NO_DATA;
416 }
417 }
418
419 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
420 return VBVAEXHOST_DATA_TYPE_NO_DATA;
421}
422
423static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
424{
425 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
426 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
427 {
428 vboxVBVAExHPHgEventClear(pCmdVbva);
429 vboxVBVAExHPProcessorRelease(pCmdVbva);
430 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
431 * 1. we check the queue -> and it is empty
432 * 2. submitter adds command to the queue
433 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
434 * 4. we clear the "processing" state
435 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
436 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
437 **/
438 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
439 if (RT_SUCCESS(rc))
440 {
441 /* we are the processor now */
442 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
443 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
444 {
445 vboxVBVAExHPProcessorRelease(pCmdVbva);
446 return VBVAEXHOST_DATA_TYPE_NO_DATA;
447 }
448
449 vboxVBVAExHPHgEventSet(pCmdVbva);
450 }
451 }
452
453 return enmType;
454}
455
456DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
457{
458 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
459
460 if (pVBVA)
461 {
462 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
463 uint32_t indexRecordFree = pVBVA->indexRecordFree;
464
465 if (indexRecordFirst != indexRecordFree)
466 return true;
467 }
468
469 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
470}
471
472/* Checks whether the new commands are ready for processing
473 * @returns
474 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
475 * VINF_EOF - no commands in a queue
476 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
477 * VERR_INVALID_STATE - the VBVA is paused or pausing */
478static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
479{
480 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
481 if (RT_SUCCESS(rc))
482 {
483 /* we are the processor now */
484 if (vboxVBVAExHSHasCommands(pCmdVbva))
485 {
486 vboxVBVAExHPHgEventSet(pCmdVbva);
487 return VINF_SUCCESS;
488 }
489
490 vboxVBVAExHPProcessorRelease(pCmdVbva);
491 return VINF_EOF;
492 }
493 if (rc == VERR_SEM_BUSY)
494 return VINF_ALREADY_INITIALIZED;
495 return VERR_INVALID_STATE;
496}
497
498static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
499{
500 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
501 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
502 if (RT_SUCCESS(rc))
503 {
504#ifndef VBOXVDBG_MEMCACHE_DISABLE
505 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
506 0, /* size_t cbAlignment */
507 UINT32_MAX, /* uint32_t cMaxObjects */
508 NULL, /* PFNMEMCACHECTOR pfnCtor*/
509 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
510 NULL, /* void *pvUser*/
511 0 /* uint32_t fFlags*/
512 );
513 if (RT_SUCCESS(rc))
514#endif
515 {
516 RTListInit(&pCmdVbva->GuestCtlList);
517 RTListInit(&pCmdVbva->HostCtlList);
518 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
519 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
520 return VINF_SUCCESS;
521 }
522#ifndef VBOXVDBG_MEMCACHE_DISABLE
523 else
524 WARN(("RTMemCacheCreate failed %d\n", rc));
525#endif
526 }
527 else
528 WARN(("RTCritSectInit failed %d\n", rc));
529
530 return rc;
531}
532
533DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
534{
535 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
536}
537
538DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
539{
540 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
541}
542
543static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
544{
545 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
546 {
547 WARN(("VBVAEx is enabled already\n"));
548 return VERR_INVALID_STATE;
549 }
550
551 pCmdVbva->pVBVA = pVBVA;
552 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
553 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
554 return VINF_SUCCESS;
555}
556
557static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
558{
559 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
560 return VINF_SUCCESS;
561
562 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
563 return VINF_SUCCESS;
564}
565
566static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
567{
568 /* ensure the processor is stopped */
569 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
570
571 /* ensure no one tries to submit the command */
572 if (pCmdVbva->pVBVA)
573 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
574
575 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
576 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
577
578 RTCritSectDelete(&pCmdVbva->CltCritSect);
579
580#ifndef VBOXVDBG_MEMCACHE_DISABLE
581 RTMemCacheDestroy(pCmdVbva->CtlCache);
582#endif
583
584 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
585}
586
587static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
588{
589 if (VBVAEXHOSTCTL_TYPE_GH_MIN > pCtl->enmType || VBVAEXHOSTCTL_TYPE_GH_MAX < pCtl->enmType)
590 {
591 WARN(("unexpected command type!\n"));
592 return VERR_INTERNAL_ERROR;
593 }
594
595 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
596 AssertRCReturn(rc, rc);
597 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
598 AssertRCReturn(rc, rc);
599 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
600 AssertRCReturn(rc, rc);
601
602 return VINF_SUCCESS;
603}
604
605static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
606{
607 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
608 {
609 WARN(("vbva not paused\n"));
610 return VERR_INVALID_STATE;
611 }
612
613 VBVAEXHOSTCTL* pCtl;
614 int rc;
615 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
616 {
617 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
618 AssertRCReturn(rc, rc);
619 }
620
621 rc = SSMR3PutU32(pSSM, 0);
622 AssertRCReturn(rc, rc);
623
624 return VINF_SUCCESS;
625}
626/* Saves state
627 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
628 */
629static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
630{
631 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
632 if (RT_FAILURE(rc))
633 {
634 WARN(("RTCritSectEnter failed %d\n", rc));
635 return rc;
636 }
637
638 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
639 if (RT_FAILURE(rc))
640 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
641
642 RTCritSectLeave(&pCmdVbva->CltCritSect);
643
644 return rc;
645}
646
647static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
648{
649 uint32_t u32;
650 int rc = SSMR3GetU32(pSSM, &u32);
651 AssertRCReturn(rc, rc);
652
653 if (!u32)
654 return VINF_EOF;
655
656 if (VBVAEXHOSTCTL_TYPE_GH_MIN > u32 || VBVAEXHOSTCTL_TYPE_GH_MAX < u32)
657 {
658 WARN(("unexpected command type!\n"));
659 return VERR_INTERNAL_ERROR;
660 }
661
662 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
663 if (!pHCtl)
664 {
665 WARN(("VBoxVBVAExHCtlCreate failed\n"));
666 return VERR_NO_MEMORY;
667 }
668
669 rc = SSMR3GetU32(pSSM, &u32);
670 AssertRCReturn(rc, rc);
671 pHCtl->u.cmd.cbCmd = u32;
672
673 rc = SSMR3GetU32(pSSM, &u32);
674 AssertRCReturn(rc, rc);
675 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
676
677 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
678 ++pCmdVbva->u32cCtls;
679
680 return VINF_SUCCESS;
681}
682
683
684static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
685{
686 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
687 {
688 WARN(("vbva not stopped\n"));
689 return VERR_INVALID_STATE;
690 }
691
692 int rc;
693
694 do {
695 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
696 AssertRCReturn(rc, rc);
697 } while (VINF_EOF != rc);
698
699 return VINF_SUCCESS;
700}
701
702/* Loads state
703 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
704 */
705static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
706{
707 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
708 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
709 if (RT_FAILURE(rc))
710 {
711 WARN(("RTCritSectEnter failed %d\n", rc));
712 return rc;
713 }
714
715 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
716 if (RT_FAILURE(rc))
717 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
718
719 RTCritSectLeave(&pCmdVbva->CltCritSect);
720
721 return rc;
722}
723
724typedef enum
725{
726 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
727 VBVAEXHOSTCTL_SOURCE_HOST
728} VBVAEXHOSTCTL_SOURCE;
729
730
731static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
732{
733 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
734 {
735 Log(("cmd vbva not enabled\n"));
736 return VERR_INVALID_STATE;
737 }
738
739 pCtl->pfnComplete = pfnComplete;
740 pCtl->pvComplete = pvComplete;
741
742 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
743 if (RT_SUCCESS(rc))
744 {
745 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
746 {
747 Log(("cmd vbva not enabled\n"));
748 RTCritSectLeave(&pCmdVbva->CltCritSect);
749 return VERR_INVALID_STATE;
750 }
751
752 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
753 {
754 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
755 }
756 else
757 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
758
759 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
760
761 RTCritSectLeave(&pCmdVbva->CltCritSect);
762
763 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
764 }
765 else
766 WARN(("RTCritSectEnter failed %d\n", rc));
767
768 return rc;
769}
770
771typedef struct VBOXVDMAHOST
772{
773 PHGSMIINSTANCE pHgsmi;
774 PVGASTATE pVGAState;
775#ifdef VBOX_WITH_CRHGSMI
776 VBVAEXHOSTCONTEXT CmdVbva;
777 VBOXVDMATHREAD Thread;
778 VBOXCRCMD_SVRINFO CrSrvInfo;
779 VBVAEXHOSTCTL* pCurRemainingHostCtl;
780 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
781 int32_t volatile i32cHostCrCtlCompleted;
782#endif
783#ifdef VBOX_VDMA_WITH_WATCHDOG
784 PTMTIMERR3 WatchDogTimer;
785#endif
786} VBOXVDMAHOST, *PVBOXVDMAHOST;
787
788#ifdef VBOX_WITH_CRHGSMI
789
790void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
791{
792 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
793 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
794 void *pvChanged = pThread->pvChanged;
795
796 pThread->pfnChanged = NULL;
797 pThread->pvChanged = NULL;
798
799 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
800
801 if (pfnChanged)
802 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
803}
804
805void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
806{
807 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
808 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
809 void *pvChanged = pThread->pvChanged;
810
811 pThread->pfnChanged = NULL;
812 pThread->pvChanged = NULL;
813
814 if (pfnChanged)
815 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
816}
817
818DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
819{
820 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
821}
822
823void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
824{
825 memset(pThread, 0, sizeof (*pThread));
826 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
827}
828
829int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
830{
831 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
832 switch (u32State)
833 {
834 case VBOXVDMATHREAD_STATE_TERMINATED:
835 return VINF_SUCCESS;
836 case VBOXVDMATHREAD_STATE_TERMINATING:
837 {
838 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
839 if (!RT_SUCCESS(rc))
840 {
841 WARN(("RTThreadWait failed %d\n", rc));
842 return rc;
843 }
844
845 RTSemEventDestroy(pThread->hEvent);
846
847 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
848 return VINF_SUCCESS;
849 }
850 default:
851 WARN(("invalid state"));
852 return VERR_INVALID_STATE;
853 }
854}
855
856int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
857{
858 int rc = VBoxVDMAThreadCleanup(pThread);
859 if (RT_FAILURE(rc))
860 {
861 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
862 return rc;
863 }
864
865 rc = RTSemEventCreate(&pThread->hEvent);
866 if (RT_SUCCESS(rc))
867 {
868 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
869 pThread->pfnChanged = pfnCreated;
870 pThread->pvChanged = pvCreated;
871 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
872 if (RT_SUCCESS(rc))
873 return VINF_SUCCESS;
874 else
875 WARN(("RTThreadCreate failed %d\n", rc));
876
877 RTSemEventDestroy(pThread->hEvent);
878 }
879 else
880 WARN(("RTSemEventCreate failed %d\n", rc));
881
882 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
883
884 return rc;
885}
886
887DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
888{
889 int rc = RTSemEventSignal(pThread->hEvent);
890 AssertRC(rc);
891 return rc;
892}
893
894DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
895{
896 int rc = RTSemEventWait(pThread->hEvent, cMillies);
897 AssertRC(rc);
898 return rc;
899}
900
901int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
902{
903 int rc;
904 do
905 {
906 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
907 switch (u32State)
908 {
909 case VBOXVDMATHREAD_STATE_CREATED:
910 pThread->pfnChanged = pfnTerminated;
911 pThread->pvChanged = pvTerminated;
912 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
913 if (fNotify)
914 {
915 rc = VBoxVDMAThreadEventNotify(pThread);
916 AssertRC(rc);
917 }
918 return VINF_SUCCESS;
919 case VBOXVDMATHREAD_STATE_TERMINATING:
920 case VBOXVDMATHREAD_STATE_TERMINATED:
921 {
922 WARN(("thread is marked to termination or terminated\nn"));
923 return VERR_INVALID_STATE;
924 }
925 case VBOXVDMATHREAD_STATE_CREATING:
926 {
927 /* wait till the thread creation is completed */
928 WARN(("concurrent thread create/destron\n"));
929 RTThreadYield();
930 continue;
931 }
932 default:
933 WARN(("invalid state"));
934 return VERR_INVALID_STATE;
935 }
936 } while (1);
937
938 WARN(("should never be here\n"));
939 return VERR_INTERNAL_ERROR;
940}
941
942static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
943
944typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
945typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
946
947typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
948{
949 uint32_t cRefs;
950 int32_t rc;
951 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
952 void *pvCompletion;
953 VBOXVDMACMD_CHROMIUM_CTL Cmd;
954} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
955
956#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
957
958static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
959{
960 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
961 Assert(pHdr);
962 if (pHdr)
963 {
964 pHdr->cRefs = 1;
965 pHdr->rc = VERR_NOT_IMPLEMENTED;
966 pHdr->Cmd.enmType = enmCmd;
967 pHdr->Cmd.cbCmd = cbCmd;
968 return &pHdr->Cmd;
969 }
970
971 return NULL;
972}
973
974DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
975{
976 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
977 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
978 if(!cRefs)
979 {
980 RTMemFree(pHdr);
981 }
982}
983
984DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
985{
986 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
987 ASMAtomicIncU32(&pHdr->cRefs);
988}
989
990DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
991{
992 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
993 return pHdr->rc;
994}
995
996static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
997{
998 RTSemEventSignal((RTSEMEVENT)pvContext);
999}
1000
1001static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1002{
1003 vboxVDMACrCtlRelease(pCmd);
1004}
1005
1006
1007static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1008{
1009 if ( pVGAState->pDrv
1010 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1011 {
1012 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1013 pHdr->pfnCompletion = pfnCompletion;
1014 pHdr->pvCompletion = pvCompletion;
1015 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1016 return VINF_SUCCESS;
1017 }
1018#ifdef DEBUG_misha
1019 Assert(0);
1020#endif
1021 return VERR_NOT_SUPPORTED;
1022}
1023
1024static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1025{
1026 RTSEMEVENT hComplEvent;
1027 int rc = RTSemEventCreate(&hComplEvent);
1028 AssertRC(rc);
1029 if(RT_SUCCESS(rc))
1030 {
1031 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1032#ifdef DEBUG_misha
1033 AssertRC(rc);
1034#endif
1035 if (RT_SUCCESS(rc))
1036 {
1037 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1038 AssertRC(rc);
1039 if(RT_SUCCESS(rc))
1040 {
1041 RTSemEventDestroy(hComplEvent);
1042 }
1043 }
1044 else
1045 {
1046 /* the command is completed */
1047 RTSemEventDestroy(hComplEvent);
1048 }
1049 }
1050 return rc;
1051}
1052
1053typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1054{
1055 int rc;
1056 RTSEMEVENT hEvent;
1057} VDMA_VBVA_CTL_CYNC_COMPLETION;
1058
1059static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1060{
1061 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1062 pData->rc = rc;
1063 rc = RTSemEventSignal(pData->hEvent);
1064 if (!RT_SUCCESS(rc))
1065 WARN(("RTSemEventSignal failed %d\n", rc));
1066}
1067
1068static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1069{
1070 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1071 Data.rc = VERR_NOT_IMPLEMENTED;
1072 int rc = RTSemEventCreate(&Data.hEvent);
1073 if (!RT_SUCCESS(rc))
1074 {
1075 WARN(("RTSemEventCreate failed %d\n", rc));
1076 return rc;
1077 }
1078
1079 PVGASTATE pVGAState = pVdma->pVGAState;
1080 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1081 if (RT_SUCCESS(rc))
1082 {
1083 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1084 if (RT_SUCCESS(rc))
1085 {
1086 rc = Data.rc;
1087 if (!RT_SUCCESS(rc))
1088 {
1089 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1090 }
1091
1092 }
1093 else
1094 WARN(("RTSemEventWait failed %d\n", rc));
1095 }
1096 else
1097 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1098
1099
1100 RTSemEventDestroy(Data.hEvent);
1101
1102 return rc;
1103}
1104
1105static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1106{
1107 VBVAEXHOSTCTL HCtl;
1108 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1109 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1110}
1111
1112static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1113{
1114 struct VBOXVDMAHOST *pVdma = hClient;
1115 if (!pVdma->pCurRemainingHostCtl)
1116 {
1117 /* disable VBVA, all subsequent host commands will go HGCM way */
1118 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1119 }
1120 else
1121 {
1122 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1123 }
1124
1125 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1126 if (pVdma->pCurRemainingHostCtl)
1127 {
1128 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1129 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1130 }
1131
1132 *pcbCtl = 0;
1133 return NULL;
1134}
1135
1136static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1137{
1138 struct VBOXVDMAHOST *pVdma = hClient;
1139 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1140 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1141}
1142
1143static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1144{
1145 struct VBOXVDMAHOST *pVdma = hClient;
1146 VBVAEXHOSTCTL HCtl;
1147 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1148 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1149
1150 pHgcmEnableData->hRHCmd = pVdma;
1151 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1152
1153 if (RT_FAILURE(rc))
1154 {
1155 if (rc == VERR_INVALID_STATE)
1156 rc = VINF_SUCCESS;
1157 else
1158 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1159 }
1160
1161 return rc;
1162}
1163
1164static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1165{
1166 VBOXCRCMDCTL_ENABLE Enable;
1167 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1168 Enable.Data.hRHCmd = pVdma;
1169 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1170
1171 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1172 Assert(!pVdma->pCurRemainingHostCtl);
1173 if (RT_SUCCESS(rc))
1174 {
1175 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1176 return VINF_SUCCESS;
1177 }
1178
1179 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1180 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1181
1182 return rc;
1183}
1184
1185static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1186{
1187 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1188 {
1189 WARN(("vdma VBVA is already enabled\n"));
1190 return VERR_INVALID_STATE;
1191 }
1192
1193 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1194 if (!pVBVA)
1195 {
1196 WARN(("invalid offset %d\n", u32Offset));
1197 return VERR_INVALID_PARAMETER;
1198 }
1199
1200 if (!pVdma->CrSrvInfo.pfnEnable)
1201 {
1202#ifdef DEBUG_misha
1203 WARN(("pfnEnable is NULL\n"));
1204 return VERR_NOT_SUPPORTED;
1205#endif
1206 }
1207
1208 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1209 if (RT_SUCCESS(rc))
1210 {
1211 VBOXCRCMDCTL_DISABLE Disable;
1212 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1213 Disable.Data.hNotifyTerm = pVdma;
1214 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1215 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1216 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1217 if (RT_SUCCESS(rc))
1218 {
1219 PVGASTATE pVGAState = pVdma->pVGAState;
1220 VBOXCRCMD_SVRENABLE_INFO Info;
1221 Info.hCltScr = pVGAState->pDrv;
1222 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1223 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1224 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1225 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1226 if (RT_SUCCESS(rc))
1227 return VINF_SUCCESS;
1228 else
1229 WARN(("pfnEnable failed %d\n", rc));
1230
1231 vboxVDMACrHgcmHandleEnable(pVdma);
1232 }
1233 else
1234 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1235
1236 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1237 }
1238 else
1239 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1240
1241 return rc;
1242}
1243
1244static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1245{
1246 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1247 {
1248 Log(("vdma VBVA is already disabled\n"));
1249 return VINF_SUCCESS;
1250 }
1251
1252 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1253 if (RT_SUCCESS(rc))
1254 {
1255 if (fDoHgcmEnable)
1256 {
1257 /* disable is a bit tricky
1258 * we need to ensure the host ctl commands do not come out of order
1259 * and do not come over HGCM channel until after it is enabled */
1260 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1261 if (RT_SUCCESS(rc))
1262 return rc;
1263
1264 PVGASTATE pVGAState = pVdma->pVGAState;
1265 VBOXCRCMD_SVRENABLE_INFO Info;
1266 Info.hCltScr = pVGAState->pDrv;
1267 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1268 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1269 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1270 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1271 }
1272 }
1273 else
1274 WARN(("pfnDisable failed %d\n", rc));
1275
1276 return rc;
1277}
1278
1279static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1280{
1281 *pfContinue = true;
1282
1283 switch (pCmd->enmType)
1284 {
1285 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1286 {
1287 PVGASTATE pVGAState = pVdma->pVGAState;
1288 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1289 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1290 if (RT_FAILURE(rc))
1291 {
1292 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1293 return rc;
1294 }
1295 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1296 }
1297 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1298 {
1299 PVGASTATE pVGAState = pVdma->pVGAState;
1300 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1301 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1302 if (RT_FAILURE(rc))
1303 {
1304 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1305 return rc;
1306 }
1307 return pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1308 }
1309 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1310 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1311 {
1312 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1313 return VERR_INVALID_STATE;
1314 }
1315 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1316 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1317 {
1318 int rc = vdmaVBVADisableProcess(pVdma, true);
1319 if (RT_FAILURE(rc))
1320 {
1321 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1322 return rc;
1323 }
1324
1325 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1326 }
1327 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1328 {
1329 int rc = vdmaVBVADisableProcess(pVdma, false);
1330 if (RT_FAILURE(rc))
1331 {
1332 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1333 return rc;
1334 }
1335
1336 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1337 if (RT_FAILURE(rc))
1338 {
1339 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1340 return rc;
1341 }
1342
1343 *pfContinue = false;
1344 return VINF_SUCCESS;
1345 }
1346 default:
1347 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1348 return VERR_INVALID_PARAMETER;
1349 }
1350}
1351
1352static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1353{
1354 switch (pCmd->enmType)
1355 {
1356 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1357 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1358 {
1359 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1360 return VERR_INVALID_STATE;
1361 }
1362 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1363 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1364 {
1365 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1366 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1367 uint32_t u32Offset = pEnable->u32Offset;
1368 return vdmaVBVAEnableProcess(pVdma, u32Offset);
1369 }
1370 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1371 {
1372 int rc = vdmaVBVADisableProcess(pVdma, true);
1373 if (RT_FAILURE(rc))
1374 {
1375 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1376 return rc;
1377 }
1378
1379 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1380 }
1381 default:
1382 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1383 return VERR_INVALID_PARAMETER;
1384 }
1385}
1386
1387/**
1388 * @param fIn - whether this is a page in or out op.
1389 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1390 */
1391static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1392{
1393 RTGCPHYS phPage = (RTGCPHYS)(iPage << PAGE_SHIFT);
1394 PGMPAGEMAPLOCK Lock;
1395 int rc;
1396
1397 if (fIn)
1398 {
1399 const void * pvPage;
1400 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1401 if (!RT_SUCCESS(rc))
1402 {
1403 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1404 return rc;
1405 }
1406
1407 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1408
1409 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1410 }
1411 else
1412 {
1413 void * pvPage;
1414 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1415 if (!RT_SUCCESS(rc))
1416 {
1417 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1418 return rc;
1419 }
1420
1421 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1422
1423 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1424 }
1425
1426 return VINF_SUCCESS;
1427}
1428
1429static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1430{
1431 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1432 {
1433 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1434 if (!RT_SUCCESS(rc))
1435 {
1436 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1437 return rc;
1438 }
1439 }
1440
1441 return VINF_SUCCESS;
1442}
1443
1444static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1445 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1446 uint8_t **ppu8Vram, bool *pfIn)
1447{
1448 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1449 {
1450 WARN(("cmd too small"));
1451 return -1;
1452 }
1453
1454 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1455 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1456 {
1457 WARN(("invalid cmd size"));
1458 return -1;
1459 }
1460 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1461
1462 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1463 if (offVRAM & PAGE_OFFSET_MASK)
1464 {
1465 WARN(("offVRAM address is not on page boundary\n"));
1466 return -1;
1467 }
1468 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1469
1470 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1471 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1472 if (offVRAM >= pVGAState->vram_size)
1473 {
1474 WARN(("invalid vram offset"));
1475 return -1;
1476 }
1477
1478 if (offVRAM + (cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1479 {
1480 WARN(("invalid cPages"));
1481 return -1;
1482 }
1483
1484 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1485 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1486
1487 *ppPages = pPages;
1488 *pcPages = cPages;
1489 *ppu8Vram = pu8Vram;
1490 *pfIn = fIn;
1491 return 0;
1492}
1493
1494static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1495{
1496 switch (pCmd->u8OpCode)
1497 {
1498 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1499 return 0;
1500 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1501 {
1502 PVGASTATE pVGAState = pVdma->pVGAState;
1503 const VBOXCMDVBVAPAGEIDX *pPages;
1504 uint32_t cPages;
1505 uint8_t *pu8Vram;
1506 bool fIn;
1507 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1508 &pPages, &cPages,
1509 &pu8Vram, &fIn);
1510 if (i8Result < 0)
1511 {
1512 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1513 return i8Result;
1514 }
1515
1516 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1517 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1518 if (!RT_SUCCESS(rc))
1519 {
1520 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1521 return -1;
1522 }
1523
1524 return 0;
1525 }
1526 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1527 WARN(("VBOXCMDVBVA_OPTYPE_PAGING_FILL not implemented"));
1528 return -1;
1529 default:
1530 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1531 }
1532}
1533
1534#if 0
1535typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1536{
1537 VBOXCMDVBVA_HDR Hdr;
1538 /* for now can only contain offVRAM.
1539 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1540 VBOXCMDVBVA_ALLOCINFO Alloc;
1541 uint32_t u32Reserved;
1542 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1543} VBOXCMDVBVA_PAGING_TRANSFER;
1544#endif
1545
1546AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1547AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1548AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1549AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1550
1551#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1552
1553static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1554{
1555 switch (pCmd->u8OpCode)
1556 {
1557 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1558 {
1559 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1560 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1561 uint32_t cbRealCmd = pCmd->u8Flags;
1562 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1563 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1564 {
1565 WARN(("invalid sysmem cmd size"));
1566 return -1;
1567 }
1568
1569 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1570
1571 PGMPAGEMAPLOCK Lock;
1572 PVGASTATE pVGAState = pVdma->pVGAState;
1573 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1574 const void * pvCmd;
1575 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1576 if (!RT_SUCCESS(rc))
1577 {
1578 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1579 return -1;
1580 }
1581
1582 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1583
1584 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1585
1586 if (cbRealCmd <= cbCmdPart)
1587 {
1588 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1589 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1590 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1591 return i8Result;
1592 }
1593
1594 VBOXCMDVBVA_HDR Hdr;
1595 const void *pvCurCmdTail;
1596 uint32_t cbCurCmdTail;
1597 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1598 {
1599 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1600 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1601 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1602 }
1603 else
1604 {
1605 memcpy(&Hdr, pvCmd, cbCmdPart);
1606 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1607 phCmd += cbCmdPart;
1608 Assert(!(phCmd & PAGE_OFFSET_MASK));
1609 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1610 if (!RT_SUCCESS(rc))
1611 {
1612 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1613 return -1;
1614 }
1615
1616 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1617 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1618 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1619 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1620 }
1621
1622 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1623 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1624
1625 int8_t i8Result = 0;
1626
1627 switch (pRealCmdHdr->u8OpCode)
1628 {
1629 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1630 {
1631 const uint32_t *pPages;
1632 uint32_t cPages;
1633 uint8_t *pu8Vram;
1634 bool fIn;
1635 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1636 &pPages, &cPages,
1637 &pu8Vram, &fIn);
1638 if (i8Result < 0)
1639 {
1640 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1641 /* we need to break, not return, to ensure currently locked page is released */
1642 break;
1643 }
1644
1645 if (cbCurCmdTail & 3)
1646 {
1647 WARN(("command is not alligned properly %d", cbCurCmdTail));
1648 i8Result = -1;
1649 /* we need to break, not return, to ensure currently locked page is released */
1650 break;
1651 }
1652
1653 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1654 Assert(cCurPages < cPages);
1655
1656 do
1657 {
1658 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1659 if (!RT_SUCCESS(rc))
1660 {
1661 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1662 i8Result = -1;
1663 /* we need to break, not return, to ensure currently locked page is released */
1664 break;
1665 }
1666
1667 Assert(cPages >= cCurPages);
1668 cPages -= cCurPages;
1669
1670 if (!cPages)
1671 break;
1672
1673 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1674
1675 Assert(!(phCmd & PAGE_OFFSET_MASK));
1676
1677 phCmd += PAGE_SIZE;
1678 pu8Vram += (cCurPages << PAGE_SHIFT);
1679
1680 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1681 if (!RT_SUCCESS(rc))
1682 {
1683 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1684 /* the page is not locked, return */
1685 return -1;
1686 }
1687
1688 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1689 if (cCurPages > cPages)
1690 cCurPages = cPages;
1691 } while (1);
1692 break;
1693 }
1694 default:
1695 WARN(("command can not be splitted"));
1696 i8Result = -1;
1697 break;
1698 }
1699
1700 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1701 return i8Result;
1702 }
1703 default:
1704 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
1705 }
1706}
1707
1708static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
1709{
1710 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1711 return;
1712
1713 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1714 {
1715 WARN(("invalid command size"));
1716 return;
1717 }
1718
1719 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
1720
1721 /* check if the command is cancelled */
1722 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
1723 {
1724 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
1725 return;
1726 }
1727
1728 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
1729}
1730
1731static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
1732{
1733 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
1734 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
1735 int rc = VERR_NO_MEMORY;
1736 if (pCmd)
1737 {
1738 PVGASTATE pVGAState = pVdma->pVGAState;
1739 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
1740 pCmd->cbVRam = pVGAState->vram_size;
1741 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
1742 if (RT_SUCCESS(rc))
1743 {
1744 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
1745 if (RT_SUCCESS(rc))
1746 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
1747 else if (rc != VERR_NOT_SUPPORTED)
1748 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
1749 }
1750 else
1751 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
1752
1753 vboxVDMACrCtlRelease(&pCmd->Hdr);
1754 }
1755
1756 if (!RT_SUCCESS(rc))
1757 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
1758
1759 return rc;
1760}
1761
1762static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
1763
1764/* check if this is external cmd to be passed to chromium backend */
1765static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
1766{
1767 PVBOXVDMACMD pDmaCmd = NULL;
1768 uint32_t cbDmaCmd = 0;
1769 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1770 int rc = VINF_NOT_SUPPORTED;
1771
1772 cbDmaCmd = pCmdDr->cbBuf;
1773
1774 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1775 {
1776 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
1777 {
1778 AssertMsgFailed(("invalid buffer data!"));
1779 return VERR_INVALID_PARAMETER;
1780 }
1781
1782 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
1783 {
1784 AssertMsgFailed(("invalid command buffer data!"));
1785 return VERR_INVALID_PARAMETER;
1786 }
1787
1788 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
1789 }
1790 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1791 {
1792 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
1793 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
1794 {
1795 AssertMsgFailed(("invalid command buffer data from offset!"));
1796 return VERR_INVALID_PARAMETER;
1797 }
1798 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
1799 }
1800
1801 if (pDmaCmd)
1802 {
1803 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
1804 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
1805
1806 switch (pDmaCmd->enmType)
1807 {
1808 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
1809 {
1810 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
1811 if (cbBody < sizeof (*pCrCmd))
1812 {
1813 AssertMsgFailed(("invalid chromium command buffer size!"));
1814 return VERR_INVALID_PARAMETER;
1815 }
1816 PVGASTATE pVGAState = pVdma->pVGAState;
1817 rc = VINF_SUCCESS;
1818 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
1819 {
1820 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
1821 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
1822 break;
1823 }
1824 else
1825 {
1826 Assert(0);
1827 }
1828
1829 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
1830 AssertRC(tmpRc);
1831 break;
1832 }
1833 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
1834 {
1835 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
1836 if (cbBody < sizeof (*pTransfer))
1837 {
1838 AssertMsgFailed(("invalid bpb transfer buffer size!"));
1839 return VERR_INVALID_PARAMETER;
1840 }
1841
1842 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
1843 AssertRC(rc);
1844 if (RT_SUCCESS(rc))
1845 {
1846 pCmdDr->rc = VINF_SUCCESS;
1847 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
1848 AssertRC(rc);
1849 rc = VINF_SUCCESS;
1850 }
1851 break;
1852 }
1853 default:
1854 break;
1855 }
1856 }
1857 return rc;
1858}
1859
1860int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
1861{
1862 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1863 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
1864 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
1865 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
1866 AssertRC(rc);
1867 pDr->rc = rc;
1868
1869 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
1870 rc = VBoxSHGSMICommandComplete(pIns, pDr);
1871 AssertRC(rc);
1872 return rc;
1873}
1874
1875int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1876{
1877 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1878 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1879 pCmdPrivate->rc = rc;
1880 if (pCmdPrivate->pfnCompletion)
1881 {
1882 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
1883 }
1884 return VINF_SUCCESS;
1885}
1886
1887static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
1888 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
1889 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
1890 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
1891{
1892 /* we do not support color conversion */
1893 Assert(pDstDesc->format == pSrcDesc->format);
1894 /* we do not support stretching */
1895 Assert(pDstRectl->height == pSrcRectl->height);
1896 Assert(pDstRectl->width == pSrcRectl->width);
1897 if (pDstDesc->format != pSrcDesc->format)
1898 return VERR_INVALID_FUNCTION;
1899 if (pDstDesc->width == pDstRectl->width
1900 && pSrcDesc->width == pSrcRectl->width
1901 && pSrcDesc->width == pDstDesc->width)
1902 {
1903 Assert(!pDstRectl->left);
1904 Assert(!pSrcRectl->left);
1905 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
1906 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
1907 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
1908 }
1909 else
1910 {
1911 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
1912 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
1913 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
1914 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
1915 Assert(cbDstLine <= pDstDesc->pitch);
1916 uint32_t cbDstSkip = pDstDesc->pitch;
1917 uint8_t * pvDstStart = pvDstSurf + offDstStart;
1918
1919 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
1920 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
1921 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
1922 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
1923 Assert(cbSrcLine <= pSrcDesc->pitch);
1924 uint32_t cbSrcSkip = pSrcDesc->pitch;
1925 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
1926
1927 Assert(cbDstLine == cbSrcLine);
1928
1929 for (uint32_t i = 0; ; ++i)
1930 {
1931 memcpy (pvDstStart, pvSrcStart, cbDstLine);
1932 if (i == pDstRectl->height)
1933 break;
1934 pvDstStart += cbDstSkip;
1935 pvSrcStart += cbSrcSkip;
1936 }
1937 }
1938 return VINF_SUCCESS;
1939}
1940
1941static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
1942{
1943 if (!pRectl1->width)
1944 *pRectl1 = *pRectl2;
1945 else
1946 {
1947 int16_t x21 = pRectl1->left + pRectl1->width;
1948 int16_t x22 = pRectl2->left + pRectl2->width;
1949 if (pRectl1->left > pRectl2->left)
1950 {
1951 pRectl1->left = pRectl2->left;
1952 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
1953 }
1954 else if (x21 < x22)
1955 pRectl1->width = x22 - pRectl1->left;
1956
1957 x21 = pRectl1->top + pRectl1->height;
1958 x22 = pRectl2->top + pRectl2->height;
1959 if (pRectl1->top > pRectl2->top)
1960 {
1961 pRectl1->top = pRectl2->top;
1962 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
1963 }
1964 else if (x21 < x22)
1965 pRectl1->height = x22 - pRectl1->top;
1966 }
1967}
1968
1969/*
1970 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
1971 */
1972static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
1973{
1974 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
1975 Assert(cbBlt <= cbBuffer);
1976 if (cbBuffer < cbBlt)
1977 return VERR_INVALID_FUNCTION;
1978
1979 /* we do not support stretching for now */
1980 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
1981 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
1982 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
1983 return VERR_INVALID_FUNCTION;
1984 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
1985 return VERR_INVALID_FUNCTION;
1986 Assert(pBlt->cDstSubRects);
1987
1988 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1989 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
1990
1991 if (pBlt->cDstSubRects)
1992 {
1993 VBOXVDMA_RECTL dstRectl, srcRectl;
1994 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
1995 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
1996 {
1997 pDstRectl = &pBlt->aDstSubRects[i];
1998 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
1999 {
2000 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2001 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2002 dstRectl.width = pDstRectl->width;
2003 dstRectl.height = pDstRectl->height;
2004 pDstRectl = &dstRectl;
2005 }
2006
2007 pSrcRectl = &pBlt->aDstSubRects[i];
2008 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2009 {
2010 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2011 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2012 srcRectl.width = pSrcRectl->width;
2013 srcRectl.height = pSrcRectl->height;
2014 pSrcRectl = &srcRectl;
2015 }
2016
2017 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2018 &pBlt->dstDesc, &pBlt->srcDesc,
2019 pDstRectl,
2020 pSrcRectl);
2021 AssertRC(rc);
2022 if (!RT_SUCCESS(rc))
2023 return rc;
2024
2025 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2026 }
2027 }
2028 else
2029 {
2030 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2031 &pBlt->dstDesc, &pBlt->srcDesc,
2032 &pBlt->dstRectl,
2033 &pBlt->srcRectl);
2034 AssertRC(rc);
2035 if (!RT_SUCCESS(rc))
2036 return rc;
2037
2038 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2039 }
2040
2041 return cbBlt;
2042}
2043
2044static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2045{
2046 if (cbBuffer < sizeof (*pTransfer))
2047 return VERR_INVALID_PARAMETER;
2048
2049 PVGASTATE pVGAState = pVdma->pVGAState;
2050 uint8_t * pvRam = pVGAState->vram_ptrR3;
2051 PGMPAGEMAPLOCK SrcLock;
2052 PGMPAGEMAPLOCK DstLock;
2053 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2054 const void * pvSrc;
2055 void * pvDst;
2056 int rc = VINF_SUCCESS;
2057 uint32_t cbTransfer = pTransfer->cbTransferSize;
2058 uint32_t cbTransfered = 0;
2059 bool bSrcLocked = false;
2060 bool bDstLocked = false;
2061 do
2062 {
2063 uint32_t cbSubTransfer = cbTransfer;
2064 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2065 {
2066 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2067 }
2068 else
2069 {
2070 RTGCPHYS phPage = pTransfer->Src.phBuf;
2071 phPage += cbTransfered;
2072 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2073 AssertRC(rc);
2074 if (RT_SUCCESS(rc))
2075 {
2076 bSrcLocked = true;
2077 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2078 }
2079 else
2080 {
2081 break;
2082 }
2083 }
2084
2085 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2086 {
2087 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2088 }
2089 else
2090 {
2091 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2092 phPage += cbTransfered;
2093 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2094 AssertRC(rc);
2095 if (RT_SUCCESS(rc))
2096 {
2097 bDstLocked = true;
2098 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2099 }
2100 else
2101 {
2102 break;
2103 }
2104 }
2105
2106 if (RT_SUCCESS(rc))
2107 {
2108 memcpy(pvDst, pvSrc, cbSubTransfer);
2109 cbTransfer -= cbSubTransfer;
2110 cbTransfered += cbSubTransfer;
2111 }
2112 else
2113 {
2114 cbTransfer = 0; /* to break */
2115 }
2116
2117 if (bSrcLocked)
2118 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2119 if (bDstLocked)
2120 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2121 } while (cbTransfer);
2122
2123 if (RT_SUCCESS(rc))
2124 return sizeof (*pTransfer);
2125 return rc;
2126}
2127
2128static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2129{
2130 do
2131 {
2132 Assert(pvBuffer);
2133 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2134
2135 if (!pvBuffer)
2136 return VERR_INVALID_PARAMETER;
2137 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2138 return VERR_INVALID_PARAMETER;
2139
2140 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2141 uint32_t cbCmd = 0;
2142 switch (pCmd->enmType)
2143 {
2144 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2145 {
2146#ifdef VBOXWDDM_TEST_UHGSMI
2147 static int count = 0;
2148 static uint64_t start, end;
2149 if (count==0)
2150 {
2151 start = RTTimeNanoTS();
2152 }
2153 ++count;
2154 if (count==100000)
2155 {
2156 end = RTTimeNanoTS();
2157 float ems = (end-start)/1000000.f;
2158 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2159 }
2160#endif
2161 /* todo: post the buffer to chromium */
2162 return VINF_SUCCESS;
2163 }
2164 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2165 {
2166 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2167 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2168 Assert(cbBlt >= 0);
2169 Assert((uint32_t)cbBlt <= cbBuffer);
2170 if (cbBlt >= 0)
2171 {
2172 if ((uint32_t)cbBlt == cbBuffer)
2173 return VINF_SUCCESS;
2174 else
2175 {
2176 cbBuffer -= (uint32_t)cbBlt;
2177 pvBuffer -= cbBlt;
2178 }
2179 }
2180 else
2181 return cbBlt; /* error */
2182 break;
2183 }
2184 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2185 {
2186 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2187 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2188 Assert(cbTransfer >= 0);
2189 Assert((uint32_t)cbTransfer <= cbBuffer);
2190 if (cbTransfer >= 0)
2191 {
2192 if ((uint32_t)cbTransfer == cbBuffer)
2193 return VINF_SUCCESS;
2194 else
2195 {
2196 cbBuffer -= (uint32_t)cbTransfer;
2197 pvBuffer -= cbTransfer;
2198 }
2199 }
2200 else
2201 return cbTransfer; /* error */
2202 break;
2203 }
2204 case VBOXVDMACMD_TYPE_DMA_NOP:
2205 return VINF_SUCCESS;
2206 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2207 return VINF_SUCCESS;
2208 default:
2209 AssertBreakpoint();
2210 return VERR_INVALID_FUNCTION;
2211 }
2212 } while (1);
2213
2214 /* we should not be here */
2215 AssertBreakpoint();
2216 return VERR_INVALID_STATE;
2217}
2218
2219static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2220{
2221 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2222 PVGASTATE pVGAState = pVdma->pVGAState;
2223 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2224 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2225 uint8_t *pCmd;
2226 uint32_t cbCmd;
2227 int rc;
2228
2229 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2230
2231 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2232 {
2233 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2234 switch (enmType)
2235 {
2236 case VBVAEXHOST_DATA_TYPE_CMD:
2237 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2238 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2239 VBVARaiseIrqNoWait(pVGAState, 0);
2240 break;
2241 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2242 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2243 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2244 break;
2245 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2246 {
2247 bool fContinue = true;
2248 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2249 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2250 if (fContinue)
2251 break;
2252 }
2253 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2254 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2255 AssertRC(rc);
2256 break;
2257 default:
2258 WARN(("unexpected type %d\n", enmType));
2259 break;
2260 }
2261 }
2262
2263 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2264
2265 return VINF_SUCCESS;
2266}
2267
2268static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2269{
2270 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2271 const uint8_t * pvBuf;
2272 PGMPAGEMAPLOCK Lock;
2273 int rc;
2274 bool bReleaseLocked = false;
2275
2276 do
2277 {
2278 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2279
2280 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2281 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2282 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2283 {
2284 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2285 pvBuf = pvRam + pCmd->Location.offVramBuf;
2286 }
2287 else
2288 {
2289 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2290 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2291 Assert(offset + pCmd->cbBuf <= 0x1000);
2292 if (offset + pCmd->cbBuf > 0x1000)
2293 {
2294 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2295 rc = VERR_INVALID_PARAMETER;
2296 break;
2297 }
2298
2299 const void * pvPageBuf;
2300 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2301 AssertRC(rc);
2302 if (!RT_SUCCESS(rc))
2303 {
2304 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2305 break;
2306 }
2307
2308 pvBuf = (const uint8_t *)pvPageBuf;
2309 pvBuf += offset;
2310
2311 bReleaseLocked = true;
2312 }
2313
2314 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2315 AssertRC(rc);
2316
2317 if (bReleaseLocked)
2318 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2319 } while (0);
2320
2321 pCmd->rc = rc;
2322
2323 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2324 AssertRC(rc);
2325}
2326
2327static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2328{
2329 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2330 pCmd->i32Result = VINF_SUCCESS;
2331 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2332 AssertRC(rc);
2333}
2334
2335#endif /* #ifdef VBOX_WITH_CRHGSMI */
2336
2337#ifdef VBOX_VDMA_WITH_WATCHDOG
2338static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2339{
2340 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2341 PVGASTATE pVGAState = pVdma->pVGAState;
2342 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2343}
2344
2345static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2346{
2347 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2348 if (cMillis)
2349 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2350 else
2351 TMTimerStop(pVdma->WatchDogTimer);
2352 return VINF_SUCCESS;
2353}
2354#endif
2355
2356int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2357{
2358 int rc;
2359 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2360 Assert(pVdma);
2361 if (pVdma)
2362 {
2363 pVdma->pHgsmi = pVGAState->pHGSMI;
2364 pVdma->pVGAState = pVGAState;
2365
2366#ifdef VBOX_VDMA_WITH_WATCHDOG
2367 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2368 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2369 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2370 AssertRC(rc);
2371#endif
2372
2373#ifdef VBOX_WITH_CRHGSMI
2374 VBoxVDMAThreadInit(&pVdma->Thread);
2375
2376 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2377 if (RT_SUCCESS(rc))
2378 {
2379 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2380 if (RT_SUCCESS(rc))
2381 {
2382 pVGAState->pVdma = pVdma;
2383 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2384 return VINF_SUCCESS;
2385
2386 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2387 }
2388 else
2389 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2390
2391 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2392 }
2393 else
2394 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2395
2396
2397 RTMemFree(pVdma);
2398#else
2399 pVGAState->pVdma = pVdma;
2400 return VINF_SUCCESS;
2401#endif
2402 }
2403 else
2404 rc = VERR_OUT_OF_RESOURCES;
2405
2406 return rc;
2407}
2408
2409int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2410{
2411#ifdef VBOX_WITH_CRHGSMI
2412 vdmaVBVACtlDisableSync(pVdma);
2413#endif
2414 return VINF_SUCCESS;
2415}
2416
2417int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2418{
2419#ifdef VBOX_WITH_CRHGSMI
2420 vdmaVBVACtlDisableSync(pVdma);
2421 VBoxVDMAThreadCleanup(&pVdma->Thread);
2422 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2423 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2424#endif
2425 RTMemFree(pVdma);
2426 return VINF_SUCCESS;
2427}
2428
2429void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2430{
2431 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2432
2433 switch (pCmd->enmCtl)
2434 {
2435 case VBOXVDMA_CTL_TYPE_ENABLE:
2436 pCmd->i32Result = VINF_SUCCESS;
2437 break;
2438 case VBOXVDMA_CTL_TYPE_DISABLE:
2439 pCmd->i32Result = VINF_SUCCESS;
2440 break;
2441 case VBOXVDMA_CTL_TYPE_FLUSH:
2442 pCmd->i32Result = VINF_SUCCESS;
2443 break;
2444#ifdef VBOX_VDMA_WITH_WATCHDOG
2445 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2446 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2447 break;
2448#endif
2449 default:
2450 WARN(("cmd not supported"));
2451 pCmd->i32Result = VERR_NOT_SUPPORTED;
2452 }
2453
2454 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2455 AssertRC(rc);
2456}
2457
2458void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2459{
2460 int rc = VERR_NOT_IMPLEMENTED;
2461
2462#ifdef VBOX_WITH_CRHGSMI
2463 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2464 * this is why we process them specially */
2465 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2466 if (rc == VINF_SUCCESS)
2467 return;
2468
2469 if (RT_FAILURE(rc))
2470 {
2471 pCmd->rc = rc;
2472 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2473 AssertRC(rc);
2474 return;
2475 }
2476
2477 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2478#else
2479 pCmd->rc = rc;
2480 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2481 AssertRC(rc);
2482#endif
2483}
2484
2485/**/
2486#ifdef VBOX_WITH_CRHGSMI
2487
2488static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2489
2490static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2491{
2492 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2493 if (RT_SUCCESS(rc))
2494 {
2495 if (rc == VINF_SUCCESS)
2496 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2497 else
2498 Assert(rc == VINF_ALREADY_INITIALIZED);
2499 }
2500 else
2501 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2502
2503 return rc;
2504}
2505
2506static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2507{
2508 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2509 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2510 AssertRC(rc);
2511 pGCtl->i32Result = rc;
2512
2513 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2514 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2515 AssertRC(rc);
2516
2517 VBoxVBVAExHCtlFree(pVbva, pCtl);
2518}
2519
2520static int vdmaVBVACtlOpaqueSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2521{
2522 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE);
2523 if (!pHCtl)
2524 {
2525 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2526 return VERR_NO_MEMORY;
2527 }
2528
2529 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2530 pHCtl->u.cmd.cbCmd = cbCmd;
2531 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2532 if (RT_FAILURE(rc))
2533 {
2534 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2535 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2536 return rc;;
2537 }
2538 return VINF_SUCCESS;
2539}
2540
2541static int vdmaVBVACtlOpaqueGuestSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2542{
2543 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2544 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2545 int rc = vdmaVBVACtlOpaqueSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2546 if (RT_SUCCESS(rc))
2547 return VINF_SUCCESS;
2548
2549 WARN(("vdmaVBVACtlOpaqueSubmit failed %d\n", rc));
2550 pCtl->i32Result = rc;
2551 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2552 AssertRC(rc);
2553 return VINF_SUCCESS;
2554}
2555
2556static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2557{
2558 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2559 if (pVboxCtl->u.pfnInternal)
2560 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2561 VBoxVBVAExHCtlFree(pVbva, pCtl);
2562}
2563
2564static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2565 PFNCRCTLCOMPLETION pfnCompletion,
2566 void *pvCompletion)
2567{
2568 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2569 int rc = vdmaVBVACtlOpaqueSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2570 if (RT_FAILURE(rc))
2571 {
2572 if (rc == VERR_INVALID_STATE)
2573 {
2574 pCmd->u.pfnInternal = NULL;
2575 PVGASTATE pVGAState = pVdma->pVGAState;
2576 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2577 if (!RT_SUCCESS(rc))
2578 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2579
2580 return rc;
2581 }
2582 WARN(("vdmaVBVACtlOpaqueSubmit failed %d\n", rc));
2583 return rc;
2584 }
2585
2586 return VINF_SUCCESS;
2587}
2588
2589static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2590{
2591 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2592 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2593
2594 if (RT_SUCCESS(rc))
2595 {
2596 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2597 if (RT_FAILURE(rc))
2598 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2599 }
2600 else
2601 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2602
2603 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2604}
2605
2606static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2607{
2608 int rc;
2609 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2610 if (pHCtl)
2611 {
2612 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2613 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2614 pHCtl->pfnComplete = pfnComplete;
2615 pHCtl->pvComplete = pvComplete;
2616
2617 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
2618 if (RT_SUCCESS(rc))
2619 return VINF_SUCCESS;
2620 else
2621 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
2622
2623 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2624 }
2625 else
2626 {
2627 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2628 rc = VERR_NO_MEMORY;
2629 }
2630
2631 return rc;
2632}
2633
2634static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram)
2635{
2636 VBVAENABLE Enable = {0};
2637 Enable.u32Flags = VBVA_F_ENABLE;
2638 Enable.u32Offset = offVram;
2639
2640 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2641 Data.rc = VERR_NOT_IMPLEMENTED;
2642 int rc = RTSemEventCreate(&Data.hEvent);
2643 if (!RT_SUCCESS(rc))
2644 {
2645 WARN(("RTSemEventCreate failed %d\n", rc));
2646 return rc;
2647 }
2648
2649 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, vdmaVBVACtlSubmitSyncCompletion, &Data);
2650 if (RT_SUCCESS(rc))
2651 {
2652 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2653 if (RT_SUCCESS(rc))
2654 {
2655 rc = Data.rc;
2656 if (!RT_SUCCESS(rc))
2657 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2658 }
2659 else
2660 WARN(("RTSemEventWait failed %d\n", rc));
2661 }
2662 else
2663 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
2664
2665 RTSemEventDestroy(Data.hEvent);
2666
2667 return rc;
2668}
2669
2670static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2671{
2672 int rc;
2673 VBVAEXHOSTCTL* pHCtl;
2674 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
2675 {
2676 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
2677 return VINF_SUCCESS;
2678 }
2679
2680 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
2681 if (!pHCtl)
2682 {
2683 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2684 return VERR_NO_MEMORY;
2685 }
2686
2687 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2688 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2689 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
2690 if (RT_SUCCESS(rc))
2691 return VINF_SUCCESS;
2692
2693 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2694 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2695 return rc;
2696}
2697
2698static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2699{
2700 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
2701 if (fEnable)
2702 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
2703 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
2704}
2705
2706static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
2707{
2708 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
2709 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2710 if (RT_SUCCESS(rc))
2711 return VINF_SUCCESS;
2712
2713 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
2714 pEnable->Hdr.i32Result = rc;
2715 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
2716 AssertRC(rc);
2717 return VINF_SUCCESS;
2718}
2719
2720static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2721{
2722 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
2723 pData->rc = rc;
2724 rc = RTSemEventSignal(pData->hEvent);
2725 if (!RT_SUCCESS(rc))
2726 WARN(("RTSemEventSignal failed %d\n", rc));
2727}
2728
2729static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
2730{
2731 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2732 Data.rc = VERR_NOT_IMPLEMENTED;
2733 int rc = RTSemEventCreate(&Data.hEvent);
2734 if (!RT_SUCCESS(rc))
2735 {
2736 WARN(("RTSemEventCreate failed %d\n", rc));
2737 return rc;
2738 }
2739
2740 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
2741 if (RT_SUCCESS(rc))
2742 {
2743 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2744 if (RT_SUCCESS(rc))
2745 {
2746 rc = Data.rc;
2747 if (!RT_SUCCESS(rc))
2748 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2749 }
2750 else
2751 WARN(("RTSemEventWait failed %d\n", rc));
2752 }
2753 else
2754 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
2755
2756 RTSemEventDestroy(Data.hEvent);
2757
2758 return rc;
2759}
2760
2761static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
2762{
2763 VBVAEXHOSTCTL Ctl;
2764 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
2765 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
2766}
2767
2768static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
2769{
2770 VBVAEXHOSTCTL Ctl;
2771 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
2772 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
2773}
2774
2775static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
2776{
2777 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
2778 switch (rc)
2779 {
2780 case VINF_SUCCESS:
2781 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2782 case VINF_ALREADY_INITIALIZED:
2783 case VINF_EOF:
2784 case VERR_INVALID_STATE:
2785 return VINF_SUCCESS;
2786 default:
2787 Assert(!RT_FAILURE(rc));
2788 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
2789 }
2790}
2791
2792
2793int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
2794 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2795 PFNCRCTLCOMPLETION pfnCompletion,
2796 void *pvCompletion)
2797{
2798 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2799 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2800 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
2801}
2802
2803typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
2804{
2805 struct VBOXVDMAHOST *pVdma;
2806 uint32_t fProcessing;
2807 int rc;
2808} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
2809
2810static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
2811{
2812 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
2813
2814 pData->rc = rc;
2815 pData->fProcessing = 0;
2816
2817 struct VBOXVDMAHOST *pVdma = pData->pVdma;
2818
2819 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
2820
2821 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
2822}
2823
2824int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
2825 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
2826{
2827 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2828 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2829 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
2830 Data.pVdma = pVdma;
2831 Data.fProcessing = 1;
2832 Data.rc = VERR_INTERNAL_ERROR;
2833 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
2834 if (!RT_SUCCESS(rc))
2835 {
2836 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
2837 return rc;
2838 }
2839
2840 while (Data.fProcessing)
2841 {
2842 /* Poll infrequently to make sure no completed message has been missed. */
2843 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
2844
2845 if (Data.fProcessing)
2846 RTThreadYield();
2847 }
2848
2849 /* 'Our' message has been processed, so should reset the semaphore.
2850 * There is still possible that another message has been processed
2851 * and the semaphore has been signalled again.
2852 * Reset only if there are no other messages completed.
2853 */
2854 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
2855 Assert(c >= 0);
2856 if (!c)
2857 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
2858
2859 rc = Data.rc;
2860 if (!RT_SUCCESS(rc))
2861 WARN(("host call failed %d", rc));
2862
2863 return rc;
2864}
2865
2866int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2867{
2868 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2869 int rc = VINF_SUCCESS;
2870 switch (pCtl->u32Type)
2871 {
2872 case VBOXCMDVBVACTL_TYPE_3DCTL:
2873 return vdmaVBVACtlOpaqueGuestSubmit(pVdma, pCtl, cbCtl);
2874 case VBOXCMDVBVACTL_TYPE_ENABLE:
2875 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
2876 {
2877 WARN(("incorrect enable size\n"));
2878 rc = VERR_INVALID_PARAMETER;
2879 break;
2880 }
2881 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
2882 default:
2883 WARN(("unsupported type\n"));
2884 rc = VERR_INVALID_PARAMETER;
2885 break;
2886 }
2887
2888 pCtl->i32Result = rc;
2889 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2890 AssertRC(rc);
2891 return VINF_SUCCESS;
2892}
2893
2894int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
2895{
2896 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2897 {
2898 WARN(("vdma VBVA is disabled\n"));
2899 return VERR_INVALID_STATE;
2900 }
2901
2902 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2903}
2904
2905int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
2906{
2907 WARN(("flush\n"));
2908 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2909 {
2910 WARN(("vdma VBVA is disabled\n"));
2911 return VERR_INVALID_STATE;
2912 }
2913 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2914}
2915
2916void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
2917{
2918 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2919 return;
2920 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2921}
2922
2923#endif
2924
2925int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
2926{
2927#ifdef VBOX_WITH_CRHGSMI
2928 int rc = vdmaVBVAPause(pVdma);
2929 if (RT_SUCCESS(rc))
2930 return VINF_SUCCESS;
2931
2932 if (rc != VERR_INVALID_STATE)
2933 {
2934 WARN(("vdmaVBVAPause failed %d\n", rc));
2935 return rc;
2936 }
2937
2938#ifdef DEBUG_misha
2939 WARN(("debug prep"));
2940#endif
2941
2942 PVGASTATE pVGAState = pVdma->pVGAState;
2943 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
2944 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
2945 Assert(pCmd);
2946 if (pCmd)
2947 {
2948 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
2949 AssertRC(rc);
2950 if (RT_SUCCESS(rc))
2951 {
2952 rc = vboxVDMACrCtlGetRc(pCmd);
2953 }
2954 vboxVDMACrCtlRelease(pCmd);
2955 return rc;
2956 }
2957 return VERR_NO_MEMORY;
2958#else
2959 return VINF_SUCCESS;
2960#endif
2961}
2962
2963int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
2964{
2965#ifdef VBOX_WITH_CRHGSMI
2966 int rc = vdmaVBVAResume(pVdma);
2967 if (RT_SUCCESS(rc))
2968 return VINF_SUCCESS;
2969
2970 if (rc != VERR_INVALID_STATE)
2971 {
2972 WARN(("vdmaVBVAResume failed %d\n", rc));
2973 return rc;
2974 }
2975
2976#ifdef DEBUG_misha
2977 WARN(("debug done"));
2978#endif
2979
2980 PVGASTATE pVGAState = pVdma->pVGAState;
2981 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
2982 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
2983 Assert(pCmd);
2984 if (pCmd)
2985 {
2986 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
2987 AssertRC(rc);
2988 if (RT_SUCCESS(rc))
2989 {
2990 rc = vboxVDMACrCtlGetRc(pCmd);
2991 }
2992 vboxVDMACrCtlRelease(pCmd);
2993 return rc;
2994 }
2995 return VERR_NO_MEMORY;
2996#else
2997 return VINF_SUCCESS;
2998#endif
2999}
3000
3001int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3002{
3003 int rc;
3004
3005#ifdef VBOX_WITH_CRHGSMI
3006 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3007#endif
3008 {
3009 rc = SSMR3PutU32(pSSM, 0xffffffff);
3010 AssertRCReturn(rc, rc);
3011 return VINF_SUCCESS;
3012 }
3013
3014#ifdef VBOX_WITH_CRHGSMI
3015 PVGASTATE pVGAState = pVdma->pVGAState;
3016 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3017
3018 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3019 AssertRCReturn(rc, rc);
3020
3021 VBVAEXHOSTCTL HCtl;
3022 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3023 HCtl.u.state.pSSM = pSSM;
3024 HCtl.u.state.u32Version = 0;
3025 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3026#endif
3027}
3028
3029int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3030{
3031 uint32_t u32;
3032 int rc = SSMR3GetU32(pSSM, &u32);
3033 AssertRCReturn(rc, rc);
3034
3035 if (u32 != 0xffffffff)
3036 {
3037#ifdef VBOX_WITH_CRHGSMI
3038 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32);
3039 AssertRCReturn(rc, rc);
3040
3041 rc = vdmaVBVAPause(pVdma);
3042 AssertRCReturn(rc, rc);
3043
3044 VBVAEXHOSTCTL HCtl;
3045 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3046 HCtl.u.state.pSSM = pSSM;
3047 HCtl.u.state.u32Version = u32Version;
3048 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3049 AssertRCReturn(rc, rc);
3050
3051 rc = vdmaVBVAResume(pVdma);
3052 AssertRCReturn(rc, rc);
3053
3054 return VINF_SUCCESS;
3055#else
3056 WARN(("Unsupported VBVACtl info!\n"));
3057 return VERR_VERSION_MISMATCH;
3058#endif
3059 }
3060
3061 return VINF_SUCCESS;
3062}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette