VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 51161

Last change on this file since 51161 was 51161, checked in by vboxsync, 11 years ago

gcc warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 105.1 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATING 1
52#define VBOXVDMATHREAD_STATE_CREATED 3
53#define VBOXVDMATHREAD_STATE_TERMINATING 4
54
55struct VBOXVDMATHREAD;
56
57typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
58
59typedef struct VBOXVDMATHREAD
60{
61 RTTHREAD hWorkerThread;
62 RTSEMEVENT hEvent;
63 volatile uint32_t u32State;
64 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
65 void *pvChanged;
66} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
67
68
69/* state transformations:
70 *
71 * submitter | processor
72 *
73 * LISTENING ---> PROCESSING
74 *
75 * */
76#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
77#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
78
79#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
80#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
81#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
82
83typedef struct VBVAEXHOSTCONTEXT
84{
85 VBVABUFFER *pVBVA;
86 volatile int32_t i32State;
87 volatile int32_t i32EnableState;
88 volatile uint32_t u32cCtls;
89 /* critical section for accessing ctl lists */
90 RTCRITSECT CltCritSect;
91 RTLISTANCHOR GuestCtlList;
92 RTLISTANCHOR HostCtlList;
93#ifndef VBOXVDBG_MEMCACHE_DISABLE
94 RTMEMCACHE CtlCache;
95#endif
96} VBVAEXHOSTCONTEXT;
97
98typedef enum
99{
100 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
101 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
102 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
103 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
104 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
105 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
106 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
107 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
108 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
109 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
110 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
111 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
112} VBVAEXHOSTCTL_TYPE;
113
114struct VBVAEXHOSTCTL;
115
116typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
117
118typedef struct VBVAEXHOSTCTL
119{
120 RTLISTNODE Node;
121 VBVAEXHOSTCTL_TYPE enmType;
122 union
123 {
124 struct
125 {
126 uint8_t * pu8Cmd;
127 uint32_t cbCmd;
128 } cmd;
129
130 struct
131 {
132 PSSMHANDLE pSSM;
133 uint32_t u32Version;
134 } state;
135 } u;
136 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
137 void *pvComplete;
138} VBVAEXHOSTCTL;
139
140/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
141 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
142 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
143 * see mor edetailed comments in headers for function definitions */
144typedef enum
145{
146 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
147 VBVAEXHOST_DATA_TYPE_CMD,
148 VBVAEXHOST_DATA_TYPE_HOSTCTL,
149 VBVAEXHOST_DATA_TYPE_GUESTCTL
150} VBVAEXHOST_DATA_TYPE;
151static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
152
153static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
154static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
155
156/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
157 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
158static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
159
160static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
161static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
162static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
163static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
164static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
165static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
166
167static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
168{
169#ifndef VBOXVDBG_MEMCACHE_DISABLE
170 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
171#else
172 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
173#endif
174}
175
176static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
177{
178#ifndef VBOXVDBG_MEMCACHE_DISABLE
179 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
180#else
181 RTMemFree(pCtl);
182#endif
183}
184
185static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
186{
187 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
188 if (!pCtl)
189 {
190 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
191 return NULL;
192 }
193
194 pCtl->enmType = enmType;
195 return pCtl;
196}
197
198static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
199{
200 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
201
202 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
203 return VINF_SUCCESS;
204 return VERR_SEM_BUSY;
205}
206
207static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
208{
209 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
210
211 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
212 return NULL;
213
214 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
215 if (RT_SUCCESS(rc))
216 {
217 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
218 if (pCtl)
219 *pfHostCtl = true;
220 else if (!fHostOnlyMode)
221 {
222 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
223 {
224 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
225 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
226 * and there are no HostCtl commands*/
227 Assert(pCtl);
228 *pfHostCtl = false;
229 }
230 }
231
232 if (pCtl)
233 {
234 RTListNodeRemove(&pCtl->Node);
235 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
236 }
237
238 RTCritSectLeave(&pCmdVbva->CltCritSect);
239
240 return pCtl;
241 }
242 else
243 WARN(("RTCritSectEnter failed %d\n", rc));
244
245 return NULL;
246}
247
248static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
249{
250 bool fHostCtl = false;
251 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
252 Assert(!pCtl || fHostCtl);
253 return pCtl;
254}
255
256static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
257{
258 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
259 {
260 WARN(("Invalid state\n"));
261 return VERR_INVALID_STATE;
262 }
263
264 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
265 return VINF_SUCCESS;
266}
267
268static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
269{
270 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
271 {
272 WARN(("Invalid state\n"));
273 return VERR_INVALID_STATE;
274 }
275
276 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
277 return VINF_SUCCESS;
278}
279
280
281static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
282{
283 switch (pCtl->enmType)
284 {
285 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
286 {
287 int rc = VBoxVBVAExHPPause(pCmdVbva);
288 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
289 return true;
290 }
291 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
292 {
293 int rc = VBoxVBVAExHPResume(pCmdVbva);
294 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
295 return true;
296 }
297 default:
298 return false;
299 }
300}
301
302static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
303{
304 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
305
306 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
307}
308
309static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
310{
311 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
312 if (pCmdVbva->pVBVA)
313 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
314}
315
316static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
317{
318 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
319 if (pCmdVbva->pVBVA)
320 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
321}
322
323static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
324{
325 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
326 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
327
328 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
329
330 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
331 uint32_t indexRecordFree = pVBVA->indexRecordFree;
332
333 Log(("first = %d, free = %d\n",
334 indexRecordFirst, indexRecordFree));
335
336 if (indexRecordFirst == indexRecordFree)
337 {
338 /* No records to process. Return without assigning output variables. */
339 return VINF_EOF;
340 }
341
342 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
343
344 /* A new record need to be processed. */
345 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
346 {
347 /* the record is being recorded, try again */
348 return VINF_TRY_AGAIN;
349 }
350
351 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
352
353 if (!cbRecord)
354 {
355 /* the record is being recorded, try again */
356 return VINF_TRY_AGAIN;
357 }
358
359 /* we should not get partial commands here actually */
360 Assert(cbRecord);
361
362 /* The size of largest contiguous chunk in the ring biffer. */
363 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
364
365 /* The pointer to data in the ring buffer. */
366 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
367
368 /* Fetch or point the data. */
369 if (u32BytesTillBoundary >= cbRecord)
370 {
371 /* The command does not cross buffer boundary. Return address in the buffer. */
372 *ppCmd = pSrc;
373 *pcbCmd = cbRecord;
374 return VINF_SUCCESS;
375 }
376
377 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
378 return VERR_INVALID_STATE;
379}
380
381static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
382{
383 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
384 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
385
386 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
387}
388
389static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
390{
391 if (pCtl->pfnComplete)
392 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
393 else
394 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
395}
396
397static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
398{
399 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
400 VBVAEXHOSTCTL*pCtl;
401 bool fHostClt;
402
403 for(;;)
404 {
405 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
406 if (pCtl)
407 {
408 if (fHostClt)
409 {
410 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
411 {
412 *ppCmd = (uint8_t*)pCtl;
413 *pcbCmd = sizeof (*pCtl);
414 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
415 }
416 continue;
417 }
418 else
419 {
420 *ppCmd = (uint8_t*)pCtl;
421 *pcbCmd = sizeof (*pCtl);
422 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
423 }
424 }
425
426 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
427 return VBVAEXHOST_DATA_TYPE_NO_DATA;
428
429 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
430 switch (rc)
431 {
432 case VINF_SUCCESS:
433 return VBVAEXHOST_DATA_TYPE_CMD;
434 case VINF_EOF:
435 return VBVAEXHOST_DATA_TYPE_NO_DATA;
436 case VINF_TRY_AGAIN:
437 RTThreadSleep(1);
438 continue;
439 default:
440 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
441 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
442 return VBVAEXHOST_DATA_TYPE_NO_DATA;
443 }
444 }
445
446 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
447 return VBVAEXHOST_DATA_TYPE_NO_DATA;
448}
449
450static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
451{
452 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
453 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
454 {
455 vboxVBVAExHPHgEventClear(pCmdVbva);
456 vboxVBVAExHPProcessorRelease(pCmdVbva);
457 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
458 * 1. we check the queue -> and it is empty
459 * 2. submitter adds command to the queue
460 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
461 * 4. we clear the "processing" state
462 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
463 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
464 **/
465 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
466 if (RT_SUCCESS(rc))
467 {
468 /* we are the processor now */
469 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
470 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
471 {
472 vboxVBVAExHPProcessorRelease(pCmdVbva);
473 return VBVAEXHOST_DATA_TYPE_NO_DATA;
474 }
475
476 vboxVBVAExHPHgEventSet(pCmdVbva);
477 }
478 }
479
480 return enmType;
481}
482
483DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
484{
485 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
486
487 if (pVBVA)
488 {
489 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
490 uint32_t indexRecordFree = pVBVA->indexRecordFree;
491
492 if (indexRecordFirst != indexRecordFree)
493 return true;
494 }
495
496 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
497}
498
499/* Checks whether the new commands are ready for processing
500 * @returns
501 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
502 * VINF_EOF - no commands in a queue
503 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
504 * VERR_INVALID_STATE - the VBVA is paused or pausing */
505static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
506{
507 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
508 if (RT_SUCCESS(rc))
509 {
510 /* we are the processor now */
511 if (vboxVBVAExHSHasCommands(pCmdVbva))
512 {
513 vboxVBVAExHPHgEventSet(pCmdVbva);
514 return VINF_SUCCESS;
515 }
516
517 vboxVBVAExHPProcessorRelease(pCmdVbva);
518 return VINF_EOF;
519 }
520 if (rc == VERR_SEM_BUSY)
521 return VINF_ALREADY_INITIALIZED;
522 return VERR_INVALID_STATE;
523}
524
525static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
526{
527 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
528 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
529 if (RT_SUCCESS(rc))
530 {
531#ifndef VBOXVDBG_MEMCACHE_DISABLE
532 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
533 0, /* size_t cbAlignment */
534 UINT32_MAX, /* uint32_t cMaxObjects */
535 NULL, /* PFNMEMCACHECTOR pfnCtor*/
536 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
537 NULL, /* void *pvUser*/
538 0 /* uint32_t fFlags*/
539 );
540 if (RT_SUCCESS(rc))
541#endif
542 {
543 RTListInit(&pCmdVbva->GuestCtlList);
544 RTListInit(&pCmdVbva->HostCtlList);
545 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
546 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
547 return VINF_SUCCESS;
548 }
549#ifndef VBOXVDBG_MEMCACHE_DISABLE
550 else
551 WARN(("RTMemCacheCreate failed %d\n", rc));
552#endif
553 }
554 else
555 WARN(("RTCritSectInit failed %d\n", rc));
556
557 return rc;
558}
559
560DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
561{
562 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
563}
564
565DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
566{
567 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
568}
569
570static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
571{
572 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
573 {
574 WARN(("VBVAEx is enabled already\n"));
575 return VERR_INVALID_STATE;
576 }
577
578 pCmdVbva->pVBVA = pVBVA;
579 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
580 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
581 return VINF_SUCCESS;
582}
583
584static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
585{
586 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
587 return VINF_SUCCESS;
588
589 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
590 return VINF_SUCCESS;
591}
592
593static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
594{
595 /* ensure the processor is stopped */
596 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
597
598 /* ensure no one tries to submit the command */
599 if (pCmdVbva->pVBVA)
600 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
601
602 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
603 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
604
605 RTCritSectDelete(&pCmdVbva->CltCritSect);
606
607#ifndef VBOXVDBG_MEMCACHE_DISABLE
608 RTMemCacheDestroy(pCmdVbva->CtlCache);
609#endif
610
611 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
612}
613
614static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
615{
616 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
619 AssertRCReturn(rc, rc);
620 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
621 AssertRCReturn(rc, rc);
622
623 return VINF_SUCCESS;
624}
625
626static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
627{
628 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
629 {
630 WARN(("vbva not paused\n"));
631 return VERR_INVALID_STATE;
632 }
633
634 VBVAEXHOSTCTL* pCtl;
635 int rc;
636 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
637 {
638 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
639 AssertRCReturn(rc, rc);
640 }
641
642 rc = SSMR3PutU32(pSSM, 0);
643 AssertRCReturn(rc, rc);
644
645 return VINF_SUCCESS;
646}
647/* Saves state
648 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
649 */
650static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
651{
652 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
653 if (RT_FAILURE(rc))
654 {
655 WARN(("RTCritSectEnter failed %d\n", rc));
656 return rc;
657 }
658
659 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
660 if (RT_FAILURE(rc))
661 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
662
663 RTCritSectLeave(&pCmdVbva->CltCritSect);
664
665 return rc;
666}
667
668static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
669{
670 uint32_t u32;
671 int rc = SSMR3GetU32(pSSM, &u32);
672 AssertRCReturn(rc, rc);
673
674 if (!u32)
675 return VINF_EOF;
676
677 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
678 if (!pHCtl)
679 {
680 WARN(("VBoxVBVAExHCtlCreate failed\n"));
681 return VERR_NO_MEMORY;
682 }
683
684 rc = SSMR3GetU32(pSSM, &u32);
685 AssertRCReturn(rc, rc);
686 pHCtl->u.cmd.cbCmd = u32;
687
688 rc = SSMR3GetU32(pSSM, &u32);
689 AssertRCReturn(rc, rc);
690 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
691
692 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
693 ++pCmdVbva->u32cCtls;
694
695 return VINF_SUCCESS;
696}
697
698
699static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
700{
701 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
702 {
703 WARN(("vbva not stopped\n"));
704 return VERR_INVALID_STATE;
705 }
706
707 int rc;
708
709 do {
710 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
711 AssertRCReturn(rc, rc);
712 } while (VINF_EOF != rc);
713
714 return VINF_SUCCESS;
715}
716
717/* Loads state
718 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
719 */
720static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
721{
722 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
723 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
724 if (RT_FAILURE(rc))
725 {
726 WARN(("RTCritSectEnter failed %d\n", rc));
727 return rc;
728 }
729
730 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
731 if (RT_FAILURE(rc))
732 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
733
734 RTCritSectLeave(&pCmdVbva->CltCritSect);
735
736 return rc;
737}
738
739typedef enum
740{
741 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
742 VBVAEXHOSTCTL_SOURCE_HOST
743} VBVAEXHOSTCTL_SOURCE;
744
745
746static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
747{
748 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
749 {
750 Log(("cmd vbva not enabled\n"));
751 return VERR_INVALID_STATE;
752 }
753
754 pCtl->pfnComplete = pfnComplete;
755 pCtl->pvComplete = pvComplete;
756
757 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
758 if (RT_SUCCESS(rc))
759 {
760 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
761 {
762 Log(("cmd vbva not enabled\n"));
763 RTCritSectLeave(&pCmdVbva->CltCritSect);
764 return VERR_INVALID_STATE;
765 }
766
767 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
768 {
769 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
770 }
771 else
772 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
773
774 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
775
776 RTCritSectLeave(&pCmdVbva->CltCritSect);
777
778 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
779 }
780 else
781 WARN(("RTCritSectEnter failed %d\n", rc));
782
783 return rc;
784}
785
786typedef struct VBOXVDMAHOST
787{
788 PHGSMIINSTANCE pHgsmi;
789 PVGASTATE pVGAState;
790#ifdef VBOX_WITH_CRHGSMI
791 VBVAEXHOSTCONTEXT CmdVbva;
792 VBOXVDMATHREAD Thread;
793 VBOXCRCMD_SVRINFO CrSrvInfo;
794 VBVAEXHOSTCTL* pCurRemainingHostCtl;
795 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
796 int32_t volatile i32cHostCrCtlCompleted;
797#endif
798#ifdef VBOX_VDMA_WITH_WATCHDOG
799 PTMTIMERR3 WatchDogTimer;
800#endif
801} VBOXVDMAHOST, *PVBOXVDMAHOST;
802
803#ifdef VBOX_WITH_CRHGSMI
804
805void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
806{
807 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
808 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
809 void *pvChanged = pThread->pvChanged;
810
811 pThread->pfnChanged = NULL;
812 pThread->pvChanged = NULL;
813
814 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
815
816 if (pfnChanged)
817 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
818}
819
820void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
821{
822 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
823 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
824 void *pvChanged = pThread->pvChanged;
825
826 pThread->pfnChanged = NULL;
827 pThread->pvChanged = NULL;
828
829 if (pfnChanged)
830 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
831}
832
833DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
834{
835 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
836}
837
838void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
839{
840 memset(pThread, 0, sizeof (*pThread));
841 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
842}
843
844int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
845{
846 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
847 switch (u32State)
848 {
849 case VBOXVDMATHREAD_STATE_TERMINATED:
850 return VINF_SUCCESS;
851 case VBOXVDMATHREAD_STATE_TERMINATING:
852 {
853 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
854 if (!RT_SUCCESS(rc))
855 {
856 WARN(("RTThreadWait failed %d\n", rc));
857 return rc;
858 }
859
860 RTSemEventDestroy(pThread->hEvent);
861
862 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
863 return VINF_SUCCESS;
864 }
865 default:
866 WARN(("invalid state"));
867 return VERR_INVALID_STATE;
868 }
869}
870
871int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
872{
873 int rc = VBoxVDMAThreadCleanup(pThread);
874 if (RT_FAILURE(rc))
875 {
876 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
877 return rc;
878 }
879
880 rc = RTSemEventCreate(&pThread->hEvent);
881 if (RT_SUCCESS(rc))
882 {
883 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
884 pThread->pfnChanged = pfnCreated;
885 pThread->pvChanged = pvCreated;
886 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
887 if (RT_SUCCESS(rc))
888 return VINF_SUCCESS;
889 else
890 WARN(("RTThreadCreate failed %d\n", rc));
891
892 RTSemEventDestroy(pThread->hEvent);
893 }
894 else
895 WARN(("RTSemEventCreate failed %d\n", rc));
896
897 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
898
899 return rc;
900}
901
902DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
903{
904 int rc = RTSemEventSignal(pThread->hEvent);
905 AssertRC(rc);
906 return rc;
907}
908
909DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
910{
911 int rc = RTSemEventWait(pThread->hEvent, cMillies);
912 AssertRC(rc);
913 return rc;
914}
915
916int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
917{
918 int rc;
919 do
920 {
921 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
922 switch (u32State)
923 {
924 case VBOXVDMATHREAD_STATE_CREATED:
925 pThread->pfnChanged = pfnTerminated;
926 pThread->pvChanged = pvTerminated;
927 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
928 if (fNotify)
929 {
930 rc = VBoxVDMAThreadEventNotify(pThread);
931 AssertRC(rc);
932 }
933 return VINF_SUCCESS;
934 case VBOXVDMATHREAD_STATE_TERMINATING:
935 case VBOXVDMATHREAD_STATE_TERMINATED:
936 {
937 WARN(("thread is marked to termination or terminated\nn"));
938 return VERR_INVALID_STATE;
939 }
940 case VBOXVDMATHREAD_STATE_CREATING:
941 {
942 /* wait till the thread creation is completed */
943 WARN(("concurrent thread create/destron\n"));
944 RTThreadYield();
945 continue;
946 }
947 default:
948 WARN(("invalid state"));
949 return VERR_INVALID_STATE;
950 }
951 } while (1);
952
953 WARN(("should never be here\n"));
954 return VERR_INTERNAL_ERROR;
955}
956
957static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
958
959typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
960typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
961
962typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
963{
964 uint32_t cRefs;
965 int32_t rc;
966 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
967 void *pvCompletion;
968 VBOXVDMACMD_CHROMIUM_CTL Cmd;
969} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
970
971#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
972
973static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
974{
975 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
976 Assert(pHdr);
977 if (pHdr)
978 {
979 pHdr->cRefs = 1;
980 pHdr->rc = VERR_NOT_IMPLEMENTED;
981 pHdr->Cmd.enmType = enmCmd;
982 pHdr->Cmd.cbCmd = cbCmd;
983 return &pHdr->Cmd;
984 }
985
986 return NULL;
987}
988
989DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
990{
991 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
992 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
993 if(!cRefs)
994 {
995 RTMemFree(pHdr);
996 }
997}
998
999DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1000{
1001 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1002 ASMAtomicIncU32(&pHdr->cRefs);
1003}
1004
1005DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1006{
1007 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1008 return pHdr->rc;
1009}
1010
1011static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1012{
1013 RTSemEventSignal((RTSEMEVENT)pvContext);
1014}
1015
1016static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1017{
1018 vboxVDMACrCtlRelease(pCmd);
1019}
1020
1021
1022static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1023{
1024 if ( pVGAState->pDrv
1025 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1026 {
1027 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1028 pHdr->pfnCompletion = pfnCompletion;
1029 pHdr->pvCompletion = pvCompletion;
1030 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1031 return VINF_SUCCESS;
1032 }
1033#ifdef DEBUG_misha
1034 Assert(0);
1035#endif
1036 return VERR_NOT_SUPPORTED;
1037}
1038
1039static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1040{
1041 RTSEMEVENT hComplEvent;
1042 int rc = RTSemEventCreate(&hComplEvent);
1043 AssertRC(rc);
1044 if(RT_SUCCESS(rc))
1045 {
1046 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1047#ifdef DEBUG_misha
1048 AssertRC(rc);
1049#endif
1050 if (RT_SUCCESS(rc))
1051 {
1052 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1053 AssertRC(rc);
1054 if(RT_SUCCESS(rc))
1055 {
1056 RTSemEventDestroy(hComplEvent);
1057 }
1058 }
1059 else
1060 {
1061 /* the command is completed */
1062 RTSemEventDestroy(hComplEvent);
1063 }
1064 }
1065 return rc;
1066}
1067
1068typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1069{
1070 int rc;
1071 RTSEMEVENT hEvent;
1072} VDMA_VBVA_CTL_CYNC_COMPLETION;
1073
1074static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1075{
1076 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1077 pData->rc = rc;
1078 rc = RTSemEventSignal(pData->hEvent);
1079 if (!RT_SUCCESS(rc))
1080 WARN(("RTSemEventSignal failed %d\n", rc));
1081}
1082
1083static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1084{
1085 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1086 Data.rc = VERR_NOT_IMPLEMENTED;
1087 int rc = RTSemEventCreate(&Data.hEvent);
1088 if (!RT_SUCCESS(rc))
1089 {
1090 WARN(("RTSemEventCreate failed %d\n", rc));
1091 return rc;
1092 }
1093
1094 PVGASTATE pVGAState = pVdma->pVGAState;
1095 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1096 if (RT_SUCCESS(rc))
1097 {
1098 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1099 if (RT_SUCCESS(rc))
1100 {
1101 rc = Data.rc;
1102 if (!RT_SUCCESS(rc))
1103 {
1104 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1105 }
1106
1107 }
1108 else
1109 WARN(("RTSemEventWait failed %d\n", rc));
1110 }
1111 else
1112 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1113
1114
1115 RTSemEventDestroy(Data.hEvent);
1116
1117 return rc;
1118}
1119
1120static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1121{
1122 VBVAEXHOSTCTL HCtl;
1123 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1124 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1125}
1126
1127static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1128{
1129 struct VBOXVDMAHOST *pVdma = hClient;
1130 if (!pVdma->pCurRemainingHostCtl)
1131 {
1132 /* disable VBVA, all subsequent host commands will go HGCM way */
1133 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1134 }
1135 else
1136 {
1137 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1138 }
1139
1140 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1141 if (pVdma->pCurRemainingHostCtl)
1142 {
1143 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1144 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1145 }
1146
1147 *pcbCtl = 0;
1148 return NULL;
1149}
1150
1151static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1152{
1153 struct VBOXVDMAHOST *pVdma = hClient;
1154 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1155 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1156}
1157
1158static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1159{
1160 struct VBOXVDMAHOST *pVdma = hClient;
1161 VBVAEXHOSTCTL HCtl;
1162 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1163 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1164
1165 pHgcmEnableData->hRHCmd = pVdma;
1166 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1167
1168 if (RT_FAILURE(rc))
1169 {
1170 if (rc == VERR_INVALID_STATE)
1171 rc = VINF_SUCCESS;
1172 else
1173 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1174 }
1175
1176 return rc;
1177}
1178
1179static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1180{
1181 VBOXCRCMDCTL_ENABLE Enable;
1182 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1183 Enable.Data.hRHCmd = pVdma;
1184 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1185
1186 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1187 Assert(!pVdma->pCurRemainingHostCtl);
1188 if (RT_SUCCESS(rc))
1189 {
1190 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1191 return VINF_SUCCESS;
1192 }
1193
1194 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1195 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1196
1197 return rc;
1198}
1199
1200static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1201{
1202 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1203 {
1204 WARN(("vdma VBVA is already enabled\n"));
1205 return VERR_INVALID_STATE;
1206 }
1207
1208 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1209 if (!pVBVA)
1210 {
1211 WARN(("invalid offset %d\n", u32Offset));
1212 return VERR_INVALID_PARAMETER;
1213 }
1214
1215 if (!pVdma->CrSrvInfo.pfnEnable)
1216 {
1217#ifdef DEBUG_misha
1218 WARN(("pfnEnable is NULL\n"));
1219 return VERR_NOT_SUPPORTED;
1220#endif
1221 }
1222
1223 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1224 if (RT_SUCCESS(rc))
1225 {
1226 VBOXCRCMDCTL_DISABLE Disable;
1227 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1228 Disable.Data.hNotifyTerm = pVdma;
1229 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1230 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1231 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1232 if (RT_SUCCESS(rc))
1233 {
1234 PVGASTATE pVGAState = pVdma->pVGAState;
1235 VBOXCRCMD_SVRENABLE_INFO Info;
1236 Info.hCltScr = pVGAState->pDrv;
1237 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1238 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1239 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1240 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1241 if (RT_SUCCESS(rc))
1242 return VINF_SUCCESS;
1243 else
1244 WARN(("pfnEnable failed %d\n", rc));
1245
1246 vboxVDMACrHgcmHandleEnable(pVdma);
1247 }
1248 else
1249 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1250
1251 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1252 }
1253 else
1254 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1255
1256 return rc;
1257}
1258
1259static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1260{
1261 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1262 {
1263 Log(("vdma VBVA is already disabled\n"));
1264 return VINF_SUCCESS;
1265 }
1266
1267 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1268 if (RT_SUCCESS(rc))
1269 {
1270 if (fDoHgcmEnable)
1271 {
1272 /* disable is a bit tricky
1273 * we need to ensure the host ctl commands do not come out of order
1274 * and do not come over HGCM channel until after it is enabled */
1275 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1276 if (RT_SUCCESS(rc))
1277 return rc;
1278
1279 PVGASTATE pVGAState = pVdma->pVGAState;
1280 VBOXCRCMD_SVRENABLE_INFO Info;
1281 Info.hCltScr = pVGAState->pDrv;
1282 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1283 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1284 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1285 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1286 }
1287 }
1288 else
1289 WARN(("pfnDisable failed %d\n", rc));
1290
1291 return rc;
1292}
1293
1294static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1295{
1296 *pfContinue = true;
1297
1298 switch (pCmd->enmType)
1299 {
1300 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1301 {
1302 PVGASTATE pVGAState = pVdma->pVGAState;
1303 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1304 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1305 if (RT_FAILURE(rc))
1306 {
1307 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1308 return rc;
1309 }
1310 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1311 }
1312 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1313 {
1314 PVGASTATE pVGAState = pVdma->pVGAState;
1315 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1316 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1317 if (RT_FAILURE(rc))
1318 {
1319 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1320 return rc;
1321 }
1322 return pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1323 }
1324 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1325 {
1326 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1327 {
1328 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1329 return VERR_INVALID_STATE;
1330 }
1331 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1332 }
1333 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1334 {
1335 int rc = vdmaVBVADisableProcess(pVdma, true);
1336 if (RT_FAILURE(rc))
1337 {
1338 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1339 return rc;
1340 }
1341
1342 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1343 }
1344 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1345 {
1346 int rc = vdmaVBVADisableProcess(pVdma, false);
1347 if (RT_FAILURE(rc))
1348 {
1349 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1350 return rc;
1351 }
1352
1353 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1354 if (RT_FAILURE(rc))
1355 {
1356 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1357 return rc;
1358 }
1359
1360 *pfContinue = false;
1361 return VINF_SUCCESS;
1362 }
1363 default:
1364 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1365 return VERR_INVALID_PARAMETER;
1366 }
1367}
1368
1369static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1370{
1371 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1372 switch (enmType)
1373 {
1374 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1375 {
1376 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1377 {
1378 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1379 return VERR_INVALID_STATE;
1380 }
1381 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1382 }
1383 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1384 {
1385 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1386 {
1387 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1388 return VERR_INVALID_STATE;
1389 }
1390
1391 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1392
1393 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1394 {
1395 WARN(("invalid buffer size\n"));
1396 return VERR_INVALID_PARAMETER;
1397 }
1398
1399 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1400 if (!cElements)
1401 {
1402 WARN(("invalid buffer size\n"));
1403 return VERR_INVALID_PARAMETER;
1404 }
1405
1406 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1407 PVGASTATE pVGAState = pVdma->pVGAState;
1408 int rc = VINF_SUCCESS;
1409
1410 for (uint32_t i = 0; i < cElements; ++i)
1411 {
1412 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1413 VBVAINFOSCREEN Screen = pEntry->Screen;
1414 VBVAINFOVIEW View;
1415 uint32_t u32StartOffsetPreserve = 0;
1416 if (Screen.u32StartOffset == 0xffffffff)
1417 {
1418 if (Screen.u16Flags & VBVA_SCREEN_F_DISABLED)
1419 {
1420 u32StartOffsetPreserve = 0xffffffff;
1421 Screen.u32StartOffset = 0;
1422 }
1423 else
1424 {
1425 WARN(("invalid parameter\n"));
1426 rc = VERR_INVALID_PARAMETER;
1427 break;
1428 }
1429 }
1430
1431
1432 View.u32ViewIndex = Screen.u32ViewIndex;
1433 View.u32ViewOffset = 0;
1434 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1435 View.u32MaxScreenSize = View.u32ViewSize + Screen.u32Width + 1; /* <- make VBVAInfoScreen logic (offEnd < pView->u32MaxScreenSize) happy */
1436
1437 rc = VBVAInfoView(pVGAState, &View);
1438 if (RT_SUCCESS(rc))
1439 {
1440
1441 rc = VBVAInfoScreen(pVGAState, &Screen);
1442 if (RT_SUCCESS(rc))
1443 {
1444 if (u32StartOffsetPreserve)
1445 Screen.u32StartOffset = u32StartOffsetPreserve;
1446
1447 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, u32StartOffsetPreserve ? NULL : pVGAState->vram_ptrR3 + Screen.u32StartOffset);
1448 if (RT_SUCCESS(rc))
1449 continue;
1450 else
1451 {
1452 WARN(("pfnResize failed %d\n", rc));
1453 break;
1454 }
1455 }
1456 else
1457 {
1458 WARN(("VBVAInfoScreen failed %d\n", rc));
1459 break;
1460 }
1461 }
1462 else
1463 {
1464 WARN(("VBVAInfoView failed %d\n", rc));
1465 break;
1466 }
1467 }
1468 return rc;
1469 }
1470 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1471 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1472 {
1473 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1474 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1475 uint32_t u32Offset = pEnable->u32Offset;
1476 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1477 if (!RT_SUCCESS(rc))
1478 {
1479 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1480 return rc;
1481 }
1482
1483 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1484 {
1485 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1486 if (!RT_SUCCESS(rc))
1487 {
1488 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1489 return rc;
1490 }
1491 }
1492
1493 return VINF_SUCCESS;
1494 }
1495 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1496 {
1497 int rc = vdmaVBVADisableProcess(pVdma, true);
1498 if (RT_FAILURE(rc))
1499 {
1500 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1501 return rc;
1502 }
1503
1504 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1505 }
1506 default:
1507 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1508 return VERR_INVALID_PARAMETER;
1509 }
1510}
1511
1512/**
1513 * @param fIn - whether this is a page in or out op.
1514 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1515 */
1516static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1517{
1518 RTGCPHYS phPage = (RTGCPHYS)(iPage << PAGE_SHIFT);
1519 PGMPAGEMAPLOCK Lock;
1520 int rc;
1521
1522 if (fIn)
1523 {
1524 const void * pvPage;
1525 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1526 if (!RT_SUCCESS(rc))
1527 {
1528 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1529 return rc;
1530 }
1531
1532 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1533
1534 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1535 }
1536 else
1537 {
1538 void * pvPage;
1539 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1540 if (!RT_SUCCESS(rc))
1541 {
1542 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1543 return rc;
1544 }
1545
1546 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1547
1548 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1549 }
1550
1551 return VINF_SUCCESS;
1552}
1553
1554static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1555{
1556 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1557 {
1558 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1559 if (!RT_SUCCESS(rc))
1560 {
1561 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1562 return rc;
1563 }
1564 }
1565
1566 return VINF_SUCCESS;
1567}
1568
1569static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1570 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1571 uint8_t **ppu8Vram, bool *pfIn)
1572{
1573 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1574 {
1575 WARN(("cmd too small"));
1576 return -1;
1577 }
1578
1579 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1580 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1581 {
1582 WARN(("invalid cmd size"));
1583 return -1;
1584 }
1585 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1586
1587 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1588 if (offVRAM & PAGE_OFFSET_MASK)
1589 {
1590 WARN(("offVRAM address is not on page boundary\n"));
1591 return -1;
1592 }
1593 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1594
1595 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1596 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1597 if (offVRAM >= pVGAState->vram_size)
1598 {
1599 WARN(("invalid vram offset"));
1600 return -1;
1601 }
1602
1603 if (offVRAM + (cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1604 {
1605 WARN(("invalid cPages"));
1606 return -1;
1607 }
1608
1609 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1610 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1611
1612 *ppPages = pPages;
1613 *pcPages = cPages;
1614 *ppu8Vram = pu8Vram;
1615 *pfIn = fIn;
1616 return 0;
1617}
1618
1619static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1620{
1621 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1622 if (offVRAM & PAGE_OFFSET_MASK)
1623 {
1624 WARN(("offVRAM address is not on page boundary\n"));
1625 return -1;
1626 }
1627
1628 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1629 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1630 if (offVRAM >= pVGAState->vram_size)
1631 {
1632 WARN(("invalid vram offset"));
1633 return -1;
1634 }
1635
1636 uint32_t cbFill = pFill->u32CbFill;
1637
1638 if (offVRAM + cbFill >= pVGAState->vram_size)
1639 {
1640 WARN(("invalid cPages"));
1641 return -1;
1642 }
1643
1644 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1645 uint32_t u32Color = pFill->u32Pattern;
1646
1647 Assert(!(cbFill % 4));
1648 for (uint32_t i = 0; i < cbFill / 4; ++i)
1649 {
1650 pu32Vram[i] = u32Color;
1651 }
1652
1653 return 0;
1654}
1655
1656static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1657{
1658 switch (pCmd->u8OpCode)
1659 {
1660 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1661 return 0;
1662 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1663 {
1664 PVGASTATE pVGAState = pVdma->pVGAState;
1665 const VBOXCMDVBVAPAGEIDX *pPages;
1666 uint32_t cPages;
1667 uint8_t *pu8Vram;
1668 bool fIn;
1669 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1670 &pPages, &cPages,
1671 &pu8Vram, &fIn);
1672 if (i8Result < 0)
1673 {
1674 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1675 return i8Result;
1676 }
1677
1678 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1679 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1680 if (!RT_SUCCESS(rc))
1681 {
1682 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1683 return -1;
1684 }
1685
1686 return 0;
1687 }
1688 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1689 {
1690 PVGASTATE pVGAState = pVdma->pVGAState;
1691 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1692 {
1693 WARN(("cmd too small"));
1694 return -1;
1695 }
1696
1697 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1698 }
1699 default:
1700 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1701 }
1702}
1703
1704#if 0
1705typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1706{
1707 VBOXCMDVBVA_HDR Hdr;
1708 /* for now can only contain offVRAM.
1709 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1710 VBOXCMDVBVA_ALLOCINFO Alloc;
1711 uint32_t u32Reserved;
1712 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1713} VBOXCMDVBVA_PAGING_TRANSFER;
1714#endif
1715
1716AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1717AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1718AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1719AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1720
1721#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1722
1723static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1724{
1725 switch (pCmd->u8OpCode)
1726 {
1727 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1728 {
1729 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1730 {
1731 WARN(("invalid command size"));
1732 return -1;
1733 }
1734 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1735 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1736 uint32_t cbRealCmd = pCmd->u8Flags;
1737 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1738 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1739 {
1740 WARN(("invalid sysmem cmd size"));
1741 return -1;
1742 }
1743
1744 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1745
1746 PGMPAGEMAPLOCK Lock;
1747 PVGASTATE pVGAState = pVdma->pVGAState;
1748 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1749 const void * pvCmd;
1750 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1751 if (!RT_SUCCESS(rc))
1752 {
1753 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1754 return -1;
1755 }
1756
1757 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1758
1759 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1760
1761 if (cbRealCmd <= cbCmdPart)
1762 {
1763 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1764 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1765 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1766 return i8Result;
1767 }
1768
1769 VBOXCMDVBVA_HDR Hdr;
1770 const void *pvCurCmdTail;
1771 uint32_t cbCurCmdTail;
1772 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1773 {
1774 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1775 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1776 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1777 }
1778 else
1779 {
1780 memcpy(&Hdr, pvCmd, cbCmdPart);
1781 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1782 phCmd += cbCmdPart;
1783 Assert(!(phCmd & PAGE_OFFSET_MASK));
1784 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1785 if (!RT_SUCCESS(rc))
1786 {
1787 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1788 return -1;
1789 }
1790
1791 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1792 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1793 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1794 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1795 }
1796
1797 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1798 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1799
1800 int8_t i8Result = 0;
1801
1802 switch (pRealCmdHdr->u8OpCode)
1803 {
1804 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1805 {
1806 const uint32_t *pPages;
1807 uint32_t cPages;
1808 uint8_t *pu8Vram;
1809 bool fIn;
1810 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1811 &pPages, &cPages,
1812 &pu8Vram, &fIn);
1813 if (i8Result < 0)
1814 {
1815 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1816 /* we need to break, not return, to ensure currently locked page is released */
1817 break;
1818 }
1819
1820 if (cbCurCmdTail & 3)
1821 {
1822 WARN(("command is not alligned properly %d", cbCurCmdTail));
1823 i8Result = -1;
1824 /* we need to break, not return, to ensure currently locked page is released */
1825 break;
1826 }
1827
1828 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1829 Assert(cCurPages < cPages);
1830
1831 do
1832 {
1833 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1834 if (!RT_SUCCESS(rc))
1835 {
1836 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1837 i8Result = -1;
1838 /* we need to break, not return, to ensure currently locked page is released */
1839 break;
1840 }
1841
1842 Assert(cPages >= cCurPages);
1843 cPages -= cCurPages;
1844
1845 if (!cPages)
1846 break;
1847
1848 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1849
1850 Assert(!(phCmd & PAGE_OFFSET_MASK));
1851
1852 phCmd += PAGE_SIZE;
1853 pu8Vram += (cCurPages << PAGE_SHIFT);
1854
1855 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1856 if (!RT_SUCCESS(rc))
1857 {
1858 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1859 /* the page is not locked, return */
1860 return -1;
1861 }
1862
1863 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1864 if (cCurPages > cPages)
1865 cCurPages = cPages;
1866 } while (1);
1867 break;
1868 }
1869 default:
1870 WARN(("command can not be splitted"));
1871 i8Result = -1;
1872 break;
1873 }
1874
1875 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1876 return i8Result;
1877 }
1878 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
1879 {
1880 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
1881 ++pCmd;
1882 cbCmd -= sizeof (*pCmd);
1883 uint32_t cbCurCmd = 0;
1884 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
1885 {
1886 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1887 {
1888 WARN(("invalid command size"));
1889 return -1;
1890 }
1891
1892 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
1893 if (cbCmd < cbCurCmd)
1894 {
1895 WARN(("invalid command size"));
1896 return -1;
1897 }
1898
1899 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
1900 if (i8Result < 0)
1901 {
1902 WARN(("vboxVDMACrCmdVbvaProcess failed"));
1903 return i8Result;
1904 }
1905 }
1906 return 0;
1907 }
1908 default:
1909 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
1910 }
1911}
1912
1913static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
1914{
1915 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1916 return;
1917
1918 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1919 {
1920 WARN(("invalid command size"));
1921 return;
1922 }
1923
1924 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
1925
1926 /* check if the command is cancelled */
1927 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
1928 {
1929 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
1930 return;
1931 }
1932
1933 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
1934}
1935
1936static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
1937{
1938 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
1939 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
1940 int rc = VERR_NO_MEMORY;
1941 if (pCmd)
1942 {
1943 PVGASTATE pVGAState = pVdma->pVGAState;
1944 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
1945 pCmd->cbVRam = pVGAState->vram_size;
1946 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
1947 if (RT_SUCCESS(rc))
1948 {
1949 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
1950 if (RT_SUCCESS(rc))
1951 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
1952 else if (rc != VERR_NOT_SUPPORTED)
1953 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
1954 }
1955 else
1956 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
1957
1958 vboxVDMACrCtlRelease(&pCmd->Hdr);
1959 }
1960
1961 if (!RT_SUCCESS(rc))
1962 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
1963
1964 return rc;
1965}
1966
1967static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
1968
1969/* check if this is external cmd to be passed to chromium backend */
1970static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
1971{
1972 PVBOXVDMACMD pDmaCmd = NULL;
1973 uint32_t cbDmaCmd = 0;
1974 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1975 int rc = VINF_NOT_SUPPORTED;
1976
1977 cbDmaCmd = pCmdDr->cbBuf;
1978
1979 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1980 {
1981 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
1982 {
1983 AssertMsgFailed(("invalid buffer data!"));
1984 return VERR_INVALID_PARAMETER;
1985 }
1986
1987 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
1988 {
1989 AssertMsgFailed(("invalid command buffer data!"));
1990 return VERR_INVALID_PARAMETER;
1991 }
1992
1993 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
1994 }
1995 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1996 {
1997 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
1998 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
1999 {
2000 AssertMsgFailed(("invalid command buffer data from offset!"));
2001 return VERR_INVALID_PARAMETER;
2002 }
2003 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2004 }
2005
2006 if (pDmaCmd)
2007 {
2008 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2009 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2010
2011 switch (pDmaCmd->enmType)
2012 {
2013 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2014 {
2015 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2016 if (cbBody < sizeof (*pCrCmd))
2017 {
2018 AssertMsgFailed(("invalid chromium command buffer size!"));
2019 return VERR_INVALID_PARAMETER;
2020 }
2021 PVGASTATE pVGAState = pVdma->pVGAState;
2022 rc = VINF_SUCCESS;
2023 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2024 {
2025 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2026 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2027 break;
2028 }
2029 else
2030 {
2031 Assert(0);
2032 }
2033
2034 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2035 AssertRC(tmpRc);
2036 break;
2037 }
2038 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2039 {
2040 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2041 if (cbBody < sizeof (*pTransfer))
2042 {
2043 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2044 return VERR_INVALID_PARAMETER;
2045 }
2046
2047 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2048 AssertRC(rc);
2049 if (RT_SUCCESS(rc))
2050 {
2051 pCmdDr->rc = VINF_SUCCESS;
2052 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2053 AssertRC(rc);
2054 rc = VINF_SUCCESS;
2055 }
2056 break;
2057 }
2058 default:
2059 break;
2060 }
2061 }
2062 return rc;
2063}
2064
2065int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2066{
2067 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2068 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2069 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2070 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2071 AssertRC(rc);
2072 pDr->rc = rc;
2073
2074 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2075 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2076 AssertRC(rc);
2077 return rc;
2078}
2079
2080int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2081{
2082 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2083 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2084 pCmdPrivate->rc = rc;
2085 if (pCmdPrivate->pfnCompletion)
2086 {
2087 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2088 }
2089 return VINF_SUCCESS;
2090}
2091
2092static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2093 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2094 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2095 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2096{
2097 /* we do not support color conversion */
2098 Assert(pDstDesc->format == pSrcDesc->format);
2099 /* we do not support stretching */
2100 Assert(pDstRectl->height == pSrcRectl->height);
2101 Assert(pDstRectl->width == pSrcRectl->width);
2102 if (pDstDesc->format != pSrcDesc->format)
2103 return VERR_INVALID_FUNCTION;
2104 if (pDstDesc->width == pDstRectl->width
2105 && pSrcDesc->width == pSrcRectl->width
2106 && pSrcDesc->width == pDstDesc->width)
2107 {
2108 Assert(!pDstRectl->left);
2109 Assert(!pSrcRectl->left);
2110 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2111 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2112 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2113 }
2114 else
2115 {
2116 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2117 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2118 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2119 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2120 Assert(cbDstLine <= pDstDesc->pitch);
2121 uint32_t cbDstSkip = pDstDesc->pitch;
2122 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2123
2124 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2125 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2126 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2127 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2128 Assert(cbSrcLine <= pSrcDesc->pitch);
2129 uint32_t cbSrcSkip = pSrcDesc->pitch;
2130 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2131
2132 Assert(cbDstLine == cbSrcLine);
2133
2134 for (uint32_t i = 0; ; ++i)
2135 {
2136 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2137 if (i == pDstRectl->height)
2138 break;
2139 pvDstStart += cbDstSkip;
2140 pvSrcStart += cbSrcSkip;
2141 }
2142 }
2143 return VINF_SUCCESS;
2144}
2145
2146static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2147{
2148 if (!pRectl1->width)
2149 *pRectl1 = *pRectl2;
2150 else
2151 {
2152 int16_t x21 = pRectl1->left + pRectl1->width;
2153 int16_t x22 = pRectl2->left + pRectl2->width;
2154 if (pRectl1->left > pRectl2->left)
2155 {
2156 pRectl1->left = pRectl2->left;
2157 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2158 }
2159 else if (x21 < x22)
2160 pRectl1->width = x22 - pRectl1->left;
2161
2162 x21 = pRectl1->top + pRectl1->height;
2163 x22 = pRectl2->top + pRectl2->height;
2164 if (pRectl1->top > pRectl2->top)
2165 {
2166 pRectl1->top = pRectl2->top;
2167 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2168 }
2169 else if (x21 < x22)
2170 pRectl1->height = x22 - pRectl1->top;
2171 }
2172}
2173
2174/*
2175 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2176 */
2177static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2178{
2179 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2180 Assert(cbBlt <= cbBuffer);
2181 if (cbBuffer < cbBlt)
2182 return VERR_INVALID_FUNCTION;
2183
2184 /* we do not support stretching for now */
2185 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2186 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2187 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2188 return VERR_INVALID_FUNCTION;
2189 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2190 return VERR_INVALID_FUNCTION;
2191 Assert(pBlt->cDstSubRects);
2192
2193 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2194 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2195
2196 if (pBlt->cDstSubRects)
2197 {
2198 VBOXVDMA_RECTL dstRectl, srcRectl;
2199 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2200 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2201 {
2202 pDstRectl = &pBlt->aDstSubRects[i];
2203 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2204 {
2205 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2206 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2207 dstRectl.width = pDstRectl->width;
2208 dstRectl.height = pDstRectl->height;
2209 pDstRectl = &dstRectl;
2210 }
2211
2212 pSrcRectl = &pBlt->aDstSubRects[i];
2213 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2214 {
2215 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2216 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2217 srcRectl.width = pSrcRectl->width;
2218 srcRectl.height = pSrcRectl->height;
2219 pSrcRectl = &srcRectl;
2220 }
2221
2222 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2223 &pBlt->dstDesc, &pBlt->srcDesc,
2224 pDstRectl,
2225 pSrcRectl);
2226 AssertRC(rc);
2227 if (!RT_SUCCESS(rc))
2228 return rc;
2229
2230 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2231 }
2232 }
2233 else
2234 {
2235 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2236 &pBlt->dstDesc, &pBlt->srcDesc,
2237 &pBlt->dstRectl,
2238 &pBlt->srcRectl);
2239 AssertRC(rc);
2240 if (!RT_SUCCESS(rc))
2241 return rc;
2242
2243 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2244 }
2245
2246 return cbBlt;
2247}
2248
2249static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2250{
2251 if (cbBuffer < sizeof (*pTransfer))
2252 return VERR_INVALID_PARAMETER;
2253
2254 PVGASTATE pVGAState = pVdma->pVGAState;
2255 uint8_t * pvRam = pVGAState->vram_ptrR3;
2256 PGMPAGEMAPLOCK SrcLock;
2257 PGMPAGEMAPLOCK DstLock;
2258 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2259 const void * pvSrc;
2260 void * pvDst;
2261 int rc = VINF_SUCCESS;
2262 uint32_t cbTransfer = pTransfer->cbTransferSize;
2263 uint32_t cbTransfered = 0;
2264 bool bSrcLocked = false;
2265 bool bDstLocked = false;
2266 do
2267 {
2268 uint32_t cbSubTransfer = cbTransfer;
2269 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2270 {
2271 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2272 }
2273 else
2274 {
2275 RTGCPHYS phPage = pTransfer->Src.phBuf;
2276 phPage += cbTransfered;
2277 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2278 AssertRC(rc);
2279 if (RT_SUCCESS(rc))
2280 {
2281 bSrcLocked = true;
2282 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2283 }
2284 else
2285 {
2286 break;
2287 }
2288 }
2289
2290 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2291 {
2292 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2293 }
2294 else
2295 {
2296 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2297 phPage += cbTransfered;
2298 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2299 AssertRC(rc);
2300 if (RT_SUCCESS(rc))
2301 {
2302 bDstLocked = true;
2303 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2304 }
2305 else
2306 {
2307 break;
2308 }
2309 }
2310
2311 if (RT_SUCCESS(rc))
2312 {
2313 memcpy(pvDst, pvSrc, cbSubTransfer);
2314 cbTransfer -= cbSubTransfer;
2315 cbTransfered += cbSubTransfer;
2316 }
2317 else
2318 {
2319 cbTransfer = 0; /* to break */
2320 }
2321
2322 if (bSrcLocked)
2323 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2324 if (bDstLocked)
2325 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2326 } while (cbTransfer);
2327
2328 if (RT_SUCCESS(rc))
2329 return sizeof (*pTransfer);
2330 return rc;
2331}
2332
2333static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2334{
2335 do
2336 {
2337 Assert(pvBuffer);
2338 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2339
2340 if (!pvBuffer)
2341 return VERR_INVALID_PARAMETER;
2342 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2343 return VERR_INVALID_PARAMETER;
2344
2345 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2346 uint32_t cbCmd = 0;
2347 switch (pCmd->enmType)
2348 {
2349 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2350 {
2351#ifdef VBOXWDDM_TEST_UHGSMI
2352 static int count = 0;
2353 static uint64_t start, end;
2354 if (count==0)
2355 {
2356 start = RTTimeNanoTS();
2357 }
2358 ++count;
2359 if (count==100000)
2360 {
2361 end = RTTimeNanoTS();
2362 float ems = (end-start)/1000000.f;
2363 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2364 }
2365#endif
2366 /* todo: post the buffer to chromium */
2367 return VINF_SUCCESS;
2368 }
2369 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2370 {
2371 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2372 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2373 Assert(cbBlt >= 0);
2374 Assert((uint32_t)cbBlt <= cbBuffer);
2375 if (cbBlt >= 0)
2376 {
2377 if ((uint32_t)cbBlt == cbBuffer)
2378 return VINF_SUCCESS;
2379 else
2380 {
2381 cbBuffer -= (uint32_t)cbBlt;
2382 pvBuffer -= cbBlt;
2383 }
2384 }
2385 else
2386 return cbBlt; /* error */
2387 break;
2388 }
2389 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2390 {
2391 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2392 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2393 Assert(cbTransfer >= 0);
2394 Assert((uint32_t)cbTransfer <= cbBuffer);
2395 if (cbTransfer >= 0)
2396 {
2397 if ((uint32_t)cbTransfer == cbBuffer)
2398 return VINF_SUCCESS;
2399 else
2400 {
2401 cbBuffer -= (uint32_t)cbTransfer;
2402 pvBuffer -= cbTransfer;
2403 }
2404 }
2405 else
2406 return cbTransfer; /* error */
2407 break;
2408 }
2409 case VBOXVDMACMD_TYPE_DMA_NOP:
2410 return VINF_SUCCESS;
2411 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2412 return VINF_SUCCESS;
2413 default:
2414 AssertBreakpoint();
2415 return VERR_INVALID_FUNCTION;
2416 }
2417 } while (1);
2418
2419 /* we should not be here */
2420 AssertBreakpoint();
2421 return VERR_INVALID_STATE;
2422}
2423
2424static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2425{
2426 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2427 PVGASTATE pVGAState = pVdma->pVGAState;
2428 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2429 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2430 uint8_t *pCmd;
2431 uint32_t cbCmd;
2432 int rc;
2433
2434 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2435
2436 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2437 {
2438 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2439 switch (enmType)
2440 {
2441 case VBVAEXHOST_DATA_TYPE_CMD:
2442 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2443 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2444 VBVARaiseIrqNoWait(pVGAState, 0);
2445 break;
2446 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2447 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2448 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2449 break;
2450 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2451 {
2452 bool fContinue = true;
2453 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2454 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2455 if (fContinue)
2456 break;
2457 }
2458 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2459 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2460 AssertRC(rc);
2461 break;
2462 default:
2463 WARN(("unexpected type %d\n", enmType));
2464 break;
2465 }
2466 }
2467
2468 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2469
2470 return VINF_SUCCESS;
2471}
2472
2473static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2474{
2475 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2476 const uint8_t * pvBuf;
2477 PGMPAGEMAPLOCK Lock;
2478 int rc;
2479 bool bReleaseLocked = false;
2480
2481 do
2482 {
2483 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2484
2485 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2486 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2487 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2488 {
2489 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2490 pvBuf = pvRam + pCmd->Location.offVramBuf;
2491 }
2492 else
2493 {
2494 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2495 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2496 Assert(offset + pCmd->cbBuf <= 0x1000);
2497 if (offset + pCmd->cbBuf > 0x1000)
2498 {
2499 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2500 rc = VERR_INVALID_PARAMETER;
2501 break;
2502 }
2503
2504 const void * pvPageBuf;
2505 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2506 AssertRC(rc);
2507 if (!RT_SUCCESS(rc))
2508 {
2509 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2510 break;
2511 }
2512
2513 pvBuf = (const uint8_t *)pvPageBuf;
2514 pvBuf += offset;
2515
2516 bReleaseLocked = true;
2517 }
2518
2519 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2520 AssertRC(rc);
2521
2522 if (bReleaseLocked)
2523 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2524 } while (0);
2525
2526 pCmd->rc = rc;
2527
2528 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2529 AssertRC(rc);
2530}
2531
2532static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2533{
2534 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2535 pCmd->i32Result = VINF_SUCCESS;
2536 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2537 AssertRC(rc);
2538}
2539
2540#endif /* #ifdef VBOX_WITH_CRHGSMI */
2541
2542#ifdef VBOX_VDMA_WITH_WATCHDOG
2543static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2544{
2545 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2546 PVGASTATE pVGAState = pVdma->pVGAState;
2547 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2548}
2549
2550static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2551{
2552 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2553 if (cMillis)
2554 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2555 else
2556 TMTimerStop(pVdma->WatchDogTimer);
2557 return VINF_SUCCESS;
2558}
2559#endif
2560
2561int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2562{
2563 int rc;
2564 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2565 Assert(pVdma);
2566 if (pVdma)
2567 {
2568 pVdma->pHgsmi = pVGAState->pHGSMI;
2569 pVdma->pVGAState = pVGAState;
2570
2571#ifdef VBOX_VDMA_WITH_WATCHDOG
2572 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2573 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2574 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2575 AssertRC(rc);
2576#endif
2577
2578#ifdef VBOX_WITH_CRHGSMI
2579 VBoxVDMAThreadInit(&pVdma->Thread);
2580
2581 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2582 if (RT_SUCCESS(rc))
2583 {
2584 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2585 if (RT_SUCCESS(rc))
2586 {
2587 pVGAState->pVdma = pVdma;
2588 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2589 return VINF_SUCCESS;
2590
2591 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2592 }
2593 else
2594 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2595
2596 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2597 }
2598 else
2599 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2600
2601
2602 RTMemFree(pVdma);
2603#else
2604 pVGAState->pVdma = pVdma;
2605 return VINF_SUCCESS;
2606#endif
2607 }
2608 else
2609 rc = VERR_OUT_OF_RESOURCES;
2610
2611 return rc;
2612}
2613
2614int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2615{
2616#ifdef VBOX_WITH_CRHGSMI
2617 vdmaVBVACtlDisableSync(pVdma);
2618#endif
2619 return VINF_SUCCESS;
2620}
2621
2622int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2623{
2624#ifdef VBOX_WITH_CRHGSMI
2625 vdmaVBVACtlDisableSync(pVdma);
2626 VBoxVDMAThreadCleanup(&pVdma->Thread);
2627 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2628 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2629#endif
2630 RTMemFree(pVdma);
2631 return VINF_SUCCESS;
2632}
2633
2634void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2635{
2636 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2637
2638 switch (pCmd->enmCtl)
2639 {
2640 case VBOXVDMA_CTL_TYPE_ENABLE:
2641 pCmd->i32Result = VINF_SUCCESS;
2642 break;
2643 case VBOXVDMA_CTL_TYPE_DISABLE:
2644 pCmd->i32Result = VINF_SUCCESS;
2645 break;
2646 case VBOXVDMA_CTL_TYPE_FLUSH:
2647 pCmd->i32Result = VINF_SUCCESS;
2648 break;
2649#ifdef VBOX_VDMA_WITH_WATCHDOG
2650 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2651 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2652 break;
2653#endif
2654 default:
2655 WARN(("cmd not supported"));
2656 pCmd->i32Result = VERR_NOT_SUPPORTED;
2657 }
2658
2659 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2660 AssertRC(rc);
2661}
2662
2663void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2664{
2665 int rc = VERR_NOT_IMPLEMENTED;
2666
2667#ifdef VBOX_WITH_CRHGSMI
2668 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2669 * this is why we process them specially */
2670 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2671 if (rc == VINF_SUCCESS)
2672 return;
2673
2674 if (RT_FAILURE(rc))
2675 {
2676 pCmd->rc = rc;
2677 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2678 AssertRC(rc);
2679 return;
2680 }
2681
2682 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2683#else
2684 pCmd->rc = rc;
2685 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2686 AssertRC(rc);
2687#endif
2688}
2689
2690/**/
2691#ifdef VBOX_WITH_CRHGSMI
2692
2693static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2694
2695static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2696{
2697 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2698 if (RT_SUCCESS(rc))
2699 {
2700 if (rc == VINF_SUCCESS)
2701 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2702 else
2703 Assert(rc == VINF_ALREADY_INITIALIZED);
2704 }
2705 else
2706 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2707
2708 return rc;
2709}
2710
2711static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2712{
2713 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2714 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2715 AssertRC(rc);
2716 pGCtl->i32Result = rc;
2717
2718 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2719 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2720 AssertRC(rc);
2721
2722 VBoxVBVAExHCtlFree(pVbva, pCtl);
2723}
2724
2725static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2726{
2727 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2728 if (!pHCtl)
2729 {
2730 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2731 return VERR_NO_MEMORY;
2732 }
2733
2734 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2735 pHCtl->u.cmd.cbCmd = cbCmd;
2736 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2737 if (RT_FAILURE(rc))
2738 {
2739 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2740 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2741 return rc;;
2742 }
2743 return VINF_SUCCESS;
2744}
2745
2746static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2747{
2748 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2749 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2750 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2751 if (RT_SUCCESS(rc))
2752 return VINF_SUCCESS;
2753
2754 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2755 pCtl->i32Result = rc;
2756 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2757 AssertRC(rc);
2758 return VINF_SUCCESS;
2759}
2760
2761static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2762{
2763 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2764 if (pVboxCtl->u.pfnInternal)
2765 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2766 VBoxVBVAExHCtlFree(pVbva, pCtl);
2767}
2768
2769static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2770 PFNCRCTLCOMPLETION pfnCompletion,
2771 void *pvCompletion)
2772{
2773 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2774 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2775 if (RT_FAILURE(rc))
2776 {
2777 if (rc == VERR_INVALID_STATE)
2778 {
2779 pCmd->u.pfnInternal = NULL;
2780 PVGASTATE pVGAState = pVdma->pVGAState;
2781 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2782 if (!RT_SUCCESS(rc))
2783 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2784
2785 return rc;
2786 }
2787 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2788 return rc;
2789 }
2790
2791 return VINF_SUCCESS;
2792}
2793
2794static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2795{
2796 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2797 {
2798 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2799 if (!RT_SUCCESS(rc))
2800 {
2801 WARN(("pfnVBVAEnable failed %d\n", rc));
2802 for (uint32_t j = 0; j < i; j++)
2803 {
2804 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2805 }
2806
2807 return rc;
2808 }
2809 }
2810 return VINF_SUCCESS;
2811}
2812
2813static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2814{
2815 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2816 {
2817 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2818 }
2819 return VINF_SUCCESS;
2820}
2821
2822static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2823{
2824 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2825 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2826
2827 if (RT_SUCCESS(rc))
2828 {
2829 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2830 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2831 if (rc == VINF_SUCCESS)
2832 {
2833 /* we need to inform Main about VBVA enable/disable
2834 * main expects notifications to be done from the main thread
2835 * submit it there */
2836 PVGASTATE pVGAState = pVdma->pVGAState;
2837
2838 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2839 vdmaVBVANotifyEnable(pVGAState);
2840 else
2841 vdmaVBVANotifyDisable(pVGAState);
2842 }
2843 else if (RT_FAILURE(rc))
2844 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2845 }
2846 else
2847 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2848
2849 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2850}
2851
2852static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2853{
2854 int rc;
2855 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2856 if (pHCtl)
2857 {
2858 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2859 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2860 pHCtl->pfnComplete = pfnComplete;
2861 pHCtl->pvComplete = pvComplete;
2862
2863 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
2864 if (RT_SUCCESS(rc))
2865 return VINF_SUCCESS;
2866 else
2867 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
2868
2869 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2870 }
2871 else
2872 {
2873 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2874 rc = VERR_NO_MEMORY;
2875 }
2876
2877 return rc;
2878}
2879
2880static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
2881{
2882 VBVAENABLE Enable = {0};
2883 Enable.u32Flags = VBVA_F_ENABLE;
2884 Enable.u32Offset = offVram;
2885
2886 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2887 Data.rc = VERR_NOT_IMPLEMENTED;
2888 int rc = RTSemEventCreate(&Data.hEvent);
2889 if (!RT_SUCCESS(rc))
2890 {
2891 WARN(("RTSemEventCreate failed %d\n", rc));
2892 return rc;
2893 }
2894
2895 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
2896 if (RT_SUCCESS(rc))
2897 {
2898 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2899 if (RT_SUCCESS(rc))
2900 {
2901 rc = Data.rc;
2902 if (!RT_SUCCESS(rc))
2903 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2904 }
2905 else
2906 WARN(("RTSemEventWait failed %d\n", rc));
2907 }
2908 else
2909 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
2910
2911 RTSemEventDestroy(Data.hEvent);
2912
2913 return rc;
2914}
2915
2916static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2917{
2918 int rc;
2919 VBVAEXHOSTCTL* pHCtl;
2920 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
2921 {
2922 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
2923 return VINF_SUCCESS;
2924 }
2925
2926 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
2927 if (!pHCtl)
2928 {
2929 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2930 return VERR_NO_MEMORY;
2931 }
2932
2933 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2934 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2935 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
2936 if (RT_SUCCESS(rc))
2937 return VINF_SUCCESS;
2938
2939 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2940 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2941 return rc;
2942}
2943
2944static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2945{
2946 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
2947 if (fEnable)
2948 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
2949 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
2950}
2951
2952static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
2953{
2954 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
2955 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2956 if (RT_SUCCESS(rc))
2957 return VINF_SUCCESS;
2958
2959 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
2960 pEnable->Hdr.i32Result = rc;
2961 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
2962 AssertRC(rc);
2963 return VINF_SUCCESS;
2964}
2965
2966static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2967{
2968 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
2969 pData->rc = rc;
2970 rc = RTSemEventSignal(pData->hEvent);
2971 if (!RT_SUCCESS(rc))
2972 WARN(("RTSemEventSignal failed %d\n", rc));
2973}
2974
2975static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
2976{
2977 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2978 Data.rc = VERR_NOT_IMPLEMENTED;
2979 int rc = RTSemEventCreate(&Data.hEvent);
2980 if (!RT_SUCCESS(rc))
2981 {
2982 WARN(("RTSemEventCreate failed %d\n", rc));
2983 return rc;
2984 }
2985
2986 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
2987 if (RT_SUCCESS(rc))
2988 {
2989 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2990 if (RT_SUCCESS(rc))
2991 {
2992 rc = Data.rc;
2993 if (!RT_SUCCESS(rc))
2994 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2995 }
2996 else
2997 WARN(("RTSemEventWait failed %d\n", rc));
2998 }
2999 else
3000 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3001
3002 RTSemEventDestroy(Data.hEvent);
3003
3004 return rc;
3005}
3006
3007static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3008{
3009 VBVAEXHOSTCTL Ctl;
3010 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3011 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3012}
3013
3014static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3015{
3016 VBVAEXHOSTCTL Ctl;
3017 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3018 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3019}
3020
3021static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3022{
3023 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3024 switch (rc)
3025 {
3026 case VINF_SUCCESS:
3027 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3028 case VINF_ALREADY_INITIALIZED:
3029 case VINF_EOF:
3030 case VERR_INVALID_STATE:
3031 return VINF_SUCCESS;
3032 default:
3033 Assert(!RT_FAILURE(rc));
3034 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3035 }
3036}
3037
3038
3039int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3040 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3041 PFNCRCTLCOMPLETION pfnCompletion,
3042 void *pvCompletion)
3043{
3044 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3045 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3046 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3047}
3048
3049typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3050{
3051 struct VBOXVDMAHOST *pVdma;
3052 uint32_t fProcessing;
3053 int rc;
3054} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3055
3056static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3057{
3058 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3059
3060 pData->rc = rc;
3061 pData->fProcessing = 0;
3062
3063 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3064
3065 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3066
3067 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3068}
3069
3070int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3071 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3072{
3073 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3074 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3075 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3076 Data.pVdma = pVdma;
3077 Data.fProcessing = 1;
3078 Data.rc = VERR_INTERNAL_ERROR;
3079 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3080 if (!RT_SUCCESS(rc))
3081 {
3082 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3083 return rc;
3084 }
3085
3086 while (Data.fProcessing)
3087 {
3088 /* Poll infrequently to make sure no completed message has been missed. */
3089 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3090
3091 if (Data.fProcessing)
3092 RTThreadYield();
3093 }
3094
3095 /* 'Our' message has been processed, so should reset the semaphore.
3096 * There is still possible that another message has been processed
3097 * and the semaphore has been signalled again.
3098 * Reset only if there are no other messages completed.
3099 */
3100 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3101 Assert(c >= 0);
3102 if (!c)
3103 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3104
3105 rc = Data.rc;
3106 if (!RT_SUCCESS(rc))
3107 WARN(("host call failed %d", rc));
3108
3109 return rc;
3110}
3111
3112int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3113{
3114 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3115 int rc = VINF_SUCCESS;
3116 switch (pCtl->u32Type)
3117 {
3118 case VBOXCMDVBVACTL_TYPE_3DCTL:
3119 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3120 case VBOXCMDVBVACTL_TYPE_RESIZE:
3121 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3122 case VBOXCMDVBVACTL_TYPE_ENABLE:
3123 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3124 {
3125 WARN(("incorrect enable size\n"));
3126 rc = VERR_INVALID_PARAMETER;
3127 break;
3128 }
3129 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3130 default:
3131 WARN(("unsupported type\n"));
3132 rc = VERR_INVALID_PARAMETER;
3133 break;
3134 }
3135
3136 pCtl->i32Result = rc;
3137 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3138 AssertRC(rc);
3139 return VINF_SUCCESS;
3140}
3141
3142int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3143{
3144 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3145 {
3146 WARN(("vdma VBVA is disabled\n"));
3147 return VERR_INVALID_STATE;
3148 }
3149
3150 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3151}
3152
3153int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3154{
3155 WARN(("flush\n"));
3156 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3157 {
3158 WARN(("vdma VBVA is disabled\n"));
3159 return VERR_INVALID_STATE;
3160 }
3161 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3162}
3163
3164void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3165{
3166 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3167 return;
3168 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3169}
3170
3171#endif
3172
3173int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3174{
3175#ifdef VBOX_WITH_CRHGSMI
3176 int rc = vdmaVBVAPause(pVdma);
3177 if (RT_SUCCESS(rc))
3178 return VINF_SUCCESS;
3179
3180 if (rc != VERR_INVALID_STATE)
3181 {
3182 WARN(("vdmaVBVAPause failed %d\n", rc));
3183 return rc;
3184 }
3185
3186#ifdef DEBUG_misha
3187 WARN(("debug prep"));
3188#endif
3189
3190 PVGASTATE pVGAState = pVdma->pVGAState;
3191 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3192 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3193 Assert(pCmd);
3194 if (pCmd)
3195 {
3196 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3197 AssertRC(rc);
3198 if (RT_SUCCESS(rc))
3199 {
3200 rc = vboxVDMACrCtlGetRc(pCmd);
3201 }
3202 vboxVDMACrCtlRelease(pCmd);
3203 return rc;
3204 }
3205 return VERR_NO_MEMORY;
3206#else
3207 return VINF_SUCCESS;
3208#endif
3209}
3210
3211int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3212{
3213#ifdef VBOX_WITH_CRHGSMI
3214 int rc = vdmaVBVAResume(pVdma);
3215 if (RT_SUCCESS(rc))
3216 return VINF_SUCCESS;
3217
3218 if (rc != VERR_INVALID_STATE)
3219 {
3220 WARN(("vdmaVBVAResume failed %d\n", rc));
3221 return rc;
3222 }
3223
3224#ifdef DEBUG_misha
3225 WARN(("debug done"));
3226#endif
3227
3228 PVGASTATE pVGAState = pVdma->pVGAState;
3229 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3230 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3231 Assert(pCmd);
3232 if (pCmd)
3233 {
3234 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3235 AssertRC(rc);
3236 if (RT_SUCCESS(rc))
3237 {
3238 rc = vboxVDMACrCtlGetRc(pCmd);
3239 }
3240 vboxVDMACrCtlRelease(pCmd);
3241 return rc;
3242 }
3243 return VERR_NO_MEMORY;
3244#else
3245 return VINF_SUCCESS;
3246#endif
3247}
3248
3249int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3250{
3251 int rc;
3252
3253#ifdef VBOX_WITH_CRHGSMI
3254 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3255#endif
3256 {
3257 rc = SSMR3PutU32(pSSM, 0xffffffff);
3258 AssertRCReturn(rc, rc);
3259 return VINF_SUCCESS;
3260 }
3261
3262#ifdef VBOX_WITH_CRHGSMI
3263 PVGASTATE pVGAState = pVdma->pVGAState;
3264 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3265
3266 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3267 AssertRCReturn(rc, rc);
3268
3269 VBVAEXHOSTCTL HCtl;
3270 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3271 HCtl.u.state.pSSM = pSSM;
3272 HCtl.u.state.u32Version = 0;
3273 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3274#endif
3275}
3276
3277int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3278{
3279 uint32_t u32;
3280 int rc = SSMR3GetU32(pSSM, &u32);
3281 AssertRCReturn(rc, rc);
3282
3283 if (u32 != 0xffffffff)
3284 {
3285#ifdef VBOX_WITH_CRHGSMI
3286 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3287 AssertRCReturn(rc, rc);
3288
3289 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3290
3291 VBVAEXHOSTCTL HCtl;
3292 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3293 HCtl.u.state.pSSM = pSSM;
3294 HCtl.u.state.u32Version = u32Version;
3295 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3296 AssertRCReturn(rc, rc);
3297
3298 rc = vdmaVBVAResume(pVdma);
3299 AssertRCReturn(rc, rc);
3300
3301 return VINF_SUCCESS;
3302#else
3303 WARN(("Unsupported VBVACtl info!\n"));
3304 return VERR_VERSION_MISMATCH;
3305#endif
3306 }
3307
3308 return VINF_SUCCESS;
3309}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette