VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 51326

Last change on this file since 51326 was 51260, checked in by vboxsync, 11 years ago

wddm: resize enhancements & fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 106.0 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATING 1
52#define VBOXVDMATHREAD_STATE_CREATED 3
53#define VBOXVDMATHREAD_STATE_TERMINATING 4
54
55struct VBOXVDMATHREAD;
56
57typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
58
59typedef struct VBOXVDMATHREAD
60{
61 RTTHREAD hWorkerThread;
62 RTSEMEVENT hEvent;
63 volatile uint32_t u32State;
64 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
65 void *pvChanged;
66} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
67
68
69/* state transformations:
70 *
71 * submitter | processor
72 *
73 * LISTENING ---> PROCESSING
74 *
75 * */
76#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
77#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
78
79#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
80#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
81#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
82
83typedef struct VBVAEXHOSTCONTEXT
84{
85 VBVABUFFER *pVBVA;
86 volatile int32_t i32State;
87 volatile int32_t i32EnableState;
88 volatile uint32_t u32cCtls;
89 /* critical section for accessing ctl lists */
90 RTCRITSECT CltCritSect;
91 RTLISTANCHOR GuestCtlList;
92 RTLISTANCHOR HostCtlList;
93#ifndef VBOXVDBG_MEMCACHE_DISABLE
94 RTMEMCACHE CtlCache;
95#endif
96} VBVAEXHOSTCONTEXT;
97
98typedef enum
99{
100 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
101 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
102 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
103 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
104 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
105 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
106 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
107 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
108 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
109 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
110 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
111 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
112} VBVAEXHOSTCTL_TYPE;
113
114struct VBVAEXHOSTCTL;
115
116typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
117
118typedef struct VBVAEXHOSTCTL
119{
120 RTLISTNODE Node;
121 VBVAEXHOSTCTL_TYPE enmType;
122 union
123 {
124 struct
125 {
126 uint8_t * pu8Cmd;
127 uint32_t cbCmd;
128 } cmd;
129
130 struct
131 {
132 PSSMHANDLE pSSM;
133 uint32_t u32Version;
134 } state;
135 } u;
136 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
137 void *pvComplete;
138} VBVAEXHOSTCTL;
139
140/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
141 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
142 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
143 * see mor edetailed comments in headers for function definitions */
144typedef enum
145{
146 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
147 VBVAEXHOST_DATA_TYPE_CMD,
148 VBVAEXHOST_DATA_TYPE_HOSTCTL,
149 VBVAEXHOST_DATA_TYPE_GUESTCTL
150} VBVAEXHOST_DATA_TYPE;
151static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
152
153static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
154static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
155
156/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
157 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
158static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
159
160static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
161static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
162static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
163static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
164static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
165static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
166
167static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
168{
169#ifndef VBOXVDBG_MEMCACHE_DISABLE
170 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
171#else
172 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
173#endif
174}
175
176static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
177{
178#ifndef VBOXVDBG_MEMCACHE_DISABLE
179 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
180#else
181 RTMemFree(pCtl);
182#endif
183}
184
185static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
186{
187 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
188 if (!pCtl)
189 {
190 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
191 return NULL;
192 }
193
194 pCtl->enmType = enmType;
195 return pCtl;
196}
197
198static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
199{
200 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
201
202 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
203 return VINF_SUCCESS;
204 return VERR_SEM_BUSY;
205}
206
207static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
208{
209 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
210
211 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
212 return NULL;
213
214 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
215 if (RT_SUCCESS(rc))
216 {
217 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
218 if (pCtl)
219 *pfHostCtl = true;
220 else if (!fHostOnlyMode)
221 {
222 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
223 {
224 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
225 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
226 * and there are no HostCtl commands*/
227 Assert(pCtl);
228 *pfHostCtl = false;
229 }
230 }
231
232 if (pCtl)
233 {
234 RTListNodeRemove(&pCtl->Node);
235 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
236 }
237
238 RTCritSectLeave(&pCmdVbva->CltCritSect);
239
240 return pCtl;
241 }
242 else
243 WARN(("RTCritSectEnter failed %d\n", rc));
244
245 return NULL;
246}
247
248static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
249{
250 bool fHostCtl = false;
251 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
252 Assert(!pCtl || fHostCtl);
253 return pCtl;
254}
255
256static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
257{
258 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
259 {
260 WARN(("Invalid state\n"));
261 return VERR_INVALID_STATE;
262 }
263
264 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
265 return VINF_SUCCESS;
266}
267
268static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
269{
270 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
271 {
272 WARN(("Invalid state\n"));
273 return VERR_INVALID_STATE;
274 }
275
276 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
277 return VINF_SUCCESS;
278}
279
280
281static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
282{
283 switch (pCtl->enmType)
284 {
285 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
286 {
287 int rc = VBoxVBVAExHPPause(pCmdVbva);
288 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
289 return true;
290 }
291 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
292 {
293 int rc = VBoxVBVAExHPResume(pCmdVbva);
294 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
295 return true;
296 }
297 default:
298 return false;
299 }
300}
301
302static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
303{
304 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
305
306 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
307}
308
309static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
310{
311 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
312 if (pCmdVbva->pVBVA)
313 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
314}
315
316static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
317{
318 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
319 if (pCmdVbva->pVBVA)
320 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
321}
322
323static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
324{
325 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
326 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
327
328 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
329
330 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
331 uint32_t indexRecordFree = pVBVA->indexRecordFree;
332
333 Log(("first = %d, free = %d\n",
334 indexRecordFirst, indexRecordFree));
335
336 if (indexRecordFirst == indexRecordFree)
337 {
338 /* No records to process. Return without assigning output variables. */
339 return VINF_EOF;
340 }
341
342 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
343
344 /* A new record need to be processed. */
345 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
346 {
347 /* the record is being recorded, try again */
348 return VINF_TRY_AGAIN;
349 }
350
351 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
352
353 if (!cbRecord)
354 {
355 /* the record is being recorded, try again */
356 return VINF_TRY_AGAIN;
357 }
358
359 /* we should not get partial commands here actually */
360 Assert(cbRecord);
361
362 /* The size of largest contiguous chunk in the ring biffer. */
363 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
364
365 /* The pointer to data in the ring buffer. */
366 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
367
368 /* Fetch or point the data. */
369 if (u32BytesTillBoundary >= cbRecord)
370 {
371 /* The command does not cross buffer boundary. Return address in the buffer. */
372 *ppCmd = pSrc;
373 *pcbCmd = cbRecord;
374 return VINF_SUCCESS;
375 }
376
377 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
378 return VERR_INVALID_STATE;
379}
380
381static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
382{
383 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
384 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
385
386 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
387}
388
389static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
390{
391 if (pCtl->pfnComplete)
392 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
393 else
394 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
395}
396
397static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
398{
399 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
400 VBVAEXHOSTCTL*pCtl;
401 bool fHostClt;
402
403 for(;;)
404 {
405 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
406 if (pCtl)
407 {
408 if (fHostClt)
409 {
410 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
411 {
412 *ppCmd = (uint8_t*)pCtl;
413 *pcbCmd = sizeof (*pCtl);
414 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
415 }
416 continue;
417 }
418 else
419 {
420 *ppCmd = (uint8_t*)pCtl;
421 *pcbCmd = sizeof (*pCtl);
422 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
423 }
424 }
425
426 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
427 return VBVAEXHOST_DATA_TYPE_NO_DATA;
428
429 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
430 switch (rc)
431 {
432 case VINF_SUCCESS:
433 return VBVAEXHOST_DATA_TYPE_CMD;
434 case VINF_EOF:
435 return VBVAEXHOST_DATA_TYPE_NO_DATA;
436 case VINF_TRY_AGAIN:
437 RTThreadSleep(1);
438 continue;
439 default:
440 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
441 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
442 return VBVAEXHOST_DATA_TYPE_NO_DATA;
443 }
444 }
445
446 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
447 return VBVAEXHOST_DATA_TYPE_NO_DATA;
448}
449
450static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
451{
452 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
453 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
454 {
455 vboxVBVAExHPHgEventClear(pCmdVbva);
456 vboxVBVAExHPProcessorRelease(pCmdVbva);
457 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
458 * 1. we check the queue -> and it is empty
459 * 2. submitter adds command to the queue
460 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
461 * 4. we clear the "processing" state
462 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
463 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
464 **/
465 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
466 if (RT_SUCCESS(rc))
467 {
468 /* we are the processor now */
469 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
470 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
471 {
472 vboxVBVAExHPProcessorRelease(pCmdVbva);
473 return VBVAEXHOST_DATA_TYPE_NO_DATA;
474 }
475
476 vboxVBVAExHPHgEventSet(pCmdVbva);
477 }
478 }
479
480 return enmType;
481}
482
483DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
484{
485 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
486
487 if (pVBVA)
488 {
489 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
490 uint32_t indexRecordFree = pVBVA->indexRecordFree;
491
492 if (indexRecordFirst != indexRecordFree)
493 return true;
494 }
495
496 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
497}
498
499/* Checks whether the new commands are ready for processing
500 * @returns
501 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
502 * VINF_EOF - no commands in a queue
503 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
504 * VERR_INVALID_STATE - the VBVA is paused or pausing */
505static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
506{
507 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
508 if (RT_SUCCESS(rc))
509 {
510 /* we are the processor now */
511 if (vboxVBVAExHSHasCommands(pCmdVbva))
512 {
513 vboxVBVAExHPHgEventSet(pCmdVbva);
514 return VINF_SUCCESS;
515 }
516
517 vboxVBVAExHPProcessorRelease(pCmdVbva);
518 return VINF_EOF;
519 }
520 if (rc == VERR_SEM_BUSY)
521 return VINF_ALREADY_INITIALIZED;
522 return VERR_INVALID_STATE;
523}
524
525static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
526{
527 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
528 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
529 if (RT_SUCCESS(rc))
530 {
531#ifndef VBOXVDBG_MEMCACHE_DISABLE
532 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
533 0, /* size_t cbAlignment */
534 UINT32_MAX, /* uint32_t cMaxObjects */
535 NULL, /* PFNMEMCACHECTOR pfnCtor*/
536 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
537 NULL, /* void *pvUser*/
538 0 /* uint32_t fFlags*/
539 );
540 if (RT_SUCCESS(rc))
541#endif
542 {
543 RTListInit(&pCmdVbva->GuestCtlList);
544 RTListInit(&pCmdVbva->HostCtlList);
545 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
546 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
547 return VINF_SUCCESS;
548 }
549#ifndef VBOXVDBG_MEMCACHE_DISABLE
550 else
551 WARN(("RTMemCacheCreate failed %d\n", rc));
552#endif
553 }
554 else
555 WARN(("RTCritSectInit failed %d\n", rc));
556
557 return rc;
558}
559
560DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
561{
562 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
563}
564
565DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
566{
567 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
568}
569
570static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
571{
572 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
573 {
574 WARN(("VBVAEx is enabled already\n"));
575 return VERR_INVALID_STATE;
576 }
577
578 pCmdVbva->pVBVA = pVBVA;
579 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
580 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
581 return VINF_SUCCESS;
582}
583
584static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
585{
586 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
587 return VINF_SUCCESS;
588
589 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
590 return VINF_SUCCESS;
591}
592
593static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
594{
595 /* ensure the processor is stopped */
596 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
597
598 /* ensure no one tries to submit the command */
599 if (pCmdVbva->pVBVA)
600 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
601
602 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
603 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
604
605 RTCritSectDelete(&pCmdVbva->CltCritSect);
606
607#ifndef VBOXVDBG_MEMCACHE_DISABLE
608 RTMemCacheDestroy(pCmdVbva->CtlCache);
609#endif
610
611 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
612}
613
614static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
615{
616 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
619 AssertRCReturn(rc, rc);
620 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
621 AssertRCReturn(rc, rc);
622
623 return VINF_SUCCESS;
624}
625
626static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
627{
628 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
629 {
630 WARN(("vbva not paused\n"));
631 return VERR_INVALID_STATE;
632 }
633
634 VBVAEXHOSTCTL* pCtl;
635 int rc;
636 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
637 {
638 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
639 AssertRCReturn(rc, rc);
640 }
641
642 rc = SSMR3PutU32(pSSM, 0);
643 AssertRCReturn(rc, rc);
644
645 return VINF_SUCCESS;
646}
647/* Saves state
648 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
649 */
650static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
651{
652 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
653 if (RT_FAILURE(rc))
654 {
655 WARN(("RTCritSectEnter failed %d\n", rc));
656 return rc;
657 }
658
659 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
660 if (RT_FAILURE(rc))
661 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
662
663 RTCritSectLeave(&pCmdVbva->CltCritSect);
664
665 return rc;
666}
667
668static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
669{
670 uint32_t u32;
671 int rc = SSMR3GetU32(pSSM, &u32);
672 AssertRCReturn(rc, rc);
673
674 if (!u32)
675 return VINF_EOF;
676
677 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
678 if (!pHCtl)
679 {
680 WARN(("VBoxVBVAExHCtlCreate failed\n"));
681 return VERR_NO_MEMORY;
682 }
683
684 rc = SSMR3GetU32(pSSM, &u32);
685 AssertRCReturn(rc, rc);
686 pHCtl->u.cmd.cbCmd = u32;
687
688 rc = SSMR3GetU32(pSSM, &u32);
689 AssertRCReturn(rc, rc);
690 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
691
692 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
693 ++pCmdVbva->u32cCtls;
694
695 return VINF_SUCCESS;
696}
697
698
699static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
700{
701 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
702 {
703 WARN(("vbva not stopped\n"));
704 return VERR_INVALID_STATE;
705 }
706
707 int rc;
708
709 do {
710 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
711 AssertRCReturn(rc, rc);
712 } while (VINF_EOF != rc);
713
714 return VINF_SUCCESS;
715}
716
717/* Loads state
718 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
719 */
720static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
721{
722 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
723 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
724 if (RT_FAILURE(rc))
725 {
726 WARN(("RTCritSectEnter failed %d\n", rc));
727 return rc;
728 }
729
730 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
731 if (RT_FAILURE(rc))
732 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
733
734 RTCritSectLeave(&pCmdVbva->CltCritSect);
735
736 return rc;
737}
738
739typedef enum
740{
741 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
742 VBVAEXHOSTCTL_SOURCE_HOST
743} VBVAEXHOSTCTL_SOURCE;
744
745
746static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
747{
748 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
749 {
750 Log(("cmd vbva not enabled\n"));
751 return VERR_INVALID_STATE;
752 }
753
754 pCtl->pfnComplete = pfnComplete;
755 pCtl->pvComplete = pvComplete;
756
757 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
758 if (RT_SUCCESS(rc))
759 {
760 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
761 {
762 Log(("cmd vbva not enabled\n"));
763 RTCritSectLeave(&pCmdVbva->CltCritSect);
764 return VERR_INVALID_STATE;
765 }
766
767 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
768 {
769 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
770 }
771 else
772 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
773
774 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
775
776 RTCritSectLeave(&pCmdVbva->CltCritSect);
777
778 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
779 }
780 else
781 WARN(("RTCritSectEnter failed %d\n", rc));
782
783 return rc;
784}
785
786#ifdef VBOX_WITH_CRHGSMI
787typedef struct VBOXVDMA_SOURCE
788{
789 VBVAINFOSCREEN Screen;
790 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
791} VBOXVDMA_SOURCE;
792#endif
793
794typedef struct VBOXVDMAHOST
795{
796 PHGSMIINSTANCE pHgsmi;
797 PVGASTATE pVGAState;
798#ifdef VBOX_WITH_CRHGSMI
799 VBVAEXHOSTCONTEXT CmdVbva;
800 VBOXVDMATHREAD Thread;
801 VBOXCRCMD_SVRINFO CrSrvInfo;
802 VBVAEXHOSTCTL* pCurRemainingHostCtl;
803 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
804 int32_t volatile i32cHostCrCtlCompleted;
805// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
806#endif
807#ifdef VBOX_VDMA_WITH_WATCHDOG
808 PTMTIMERR3 WatchDogTimer;
809#endif
810} VBOXVDMAHOST, *PVBOXVDMAHOST;
811
812#ifdef VBOX_WITH_CRHGSMI
813
814void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
815{
816 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
817 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
818 void *pvChanged = pThread->pvChanged;
819
820 pThread->pfnChanged = NULL;
821 pThread->pvChanged = NULL;
822
823 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
824
825 if (pfnChanged)
826 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
827}
828
829void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
830{
831 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
832 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
833 void *pvChanged = pThread->pvChanged;
834
835 pThread->pfnChanged = NULL;
836 pThread->pvChanged = NULL;
837
838 if (pfnChanged)
839 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
840}
841
842DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
843{
844 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
845}
846
847void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
848{
849 memset(pThread, 0, sizeof (*pThread));
850 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
851}
852
853int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
854{
855 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
856 switch (u32State)
857 {
858 case VBOXVDMATHREAD_STATE_TERMINATED:
859 return VINF_SUCCESS;
860 case VBOXVDMATHREAD_STATE_TERMINATING:
861 {
862 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
863 if (!RT_SUCCESS(rc))
864 {
865 WARN(("RTThreadWait failed %d\n", rc));
866 return rc;
867 }
868
869 RTSemEventDestroy(pThread->hEvent);
870
871 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
872 return VINF_SUCCESS;
873 }
874 default:
875 WARN(("invalid state"));
876 return VERR_INVALID_STATE;
877 }
878}
879
880int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
881{
882 int rc = VBoxVDMAThreadCleanup(pThread);
883 if (RT_FAILURE(rc))
884 {
885 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
886 return rc;
887 }
888
889 rc = RTSemEventCreate(&pThread->hEvent);
890 if (RT_SUCCESS(rc))
891 {
892 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
893 pThread->pfnChanged = pfnCreated;
894 pThread->pvChanged = pvCreated;
895 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
896 if (RT_SUCCESS(rc))
897 return VINF_SUCCESS;
898 else
899 WARN(("RTThreadCreate failed %d\n", rc));
900
901 RTSemEventDestroy(pThread->hEvent);
902 }
903 else
904 WARN(("RTSemEventCreate failed %d\n", rc));
905
906 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
907
908 return rc;
909}
910
911DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
912{
913 int rc = RTSemEventSignal(pThread->hEvent);
914 AssertRC(rc);
915 return rc;
916}
917
918DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
919{
920 int rc = RTSemEventWait(pThread->hEvent, cMillies);
921 AssertRC(rc);
922 return rc;
923}
924
925int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
926{
927 int rc;
928 do
929 {
930 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
931 switch (u32State)
932 {
933 case VBOXVDMATHREAD_STATE_CREATED:
934 pThread->pfnChanged = pfnTerminated;
935 pThread->pvChanged = pvTerminated;
936 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
937 if (fNotify)
938 {
939 rc = VBoxVDMAThreadEventNotify(pThread);
940 AssertRC(rc);
941 }
942 return VINF_SUCCESS;
943 case VBOXVDMATHREAD_STATE_TERMINATING:
944 case VBOXVDMATHREAD_STATE_TERMINATED:
945 {
946 WARN(("thread is marked to termination or terminated\nn"));
947 return VERR_INVALID_STATE;
948 }
949 case VBOXVDMATHREAD_STATE_CREATING:
950 {
951 /* wait till the thread creation is completed */
952 WARN(("concurrent thread create/destron\n"));
953 RTThreadYield();
954 continue;
955 }
956 default:
957 WARN(("invalid state"));
958 return VERR_INVALID_STATE;
959 }
960 } while (1);
961
962 WARN(("should never be here\n"));
963 return VERR_INTERNAL_ERROR;
964}
965
966static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
967
968typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
969typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
970
971typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
972{
973 uint32_t cRefs;
974 int32_t rc;
975 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
976 void *pvCompletion;
977 VBOXVDMACMD_CHROMIUM_CTL Cmd;
978} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
979
980#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
981
982static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
983{
984 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
985 Assert(pHdr);
986 if (pHdr)
987 {
988 pHdr->cRefs = 1;
989 pHdr->rc = VERR_NOT_IMPLEMENTED;
990 pHdr->Cmd.enmType = enmCmd;
991 pHdr->Cmd.cbCmd = cbCmd;
992 return &pHdr->Cmd;
993 }
994
995 return NULL;
996}
997
998DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
999{
1000 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1001 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1002 if(!cRefs)
1003 {
1004 RTMemFree(pHdr);
1005 }
1006}
1007
1008DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1009{
1010 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1011 ASMAtomicIncU32(&pHdr->cRefs);
1012}
1013
1014DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1015{
1016 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1017 return pHdr->rc;
1018}
1019
1020static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1021{
1022 RTSemEventSignal((RTSEMEVENT)pvContext);
1023}
1024
1025static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1026{
1027 vboxVDMACrCtlRelease(pCmd);
1028}
1029
1030
1031static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1032{
1033 if ( pVGAState->pDrv
1034 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1035 {
1036 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1037 pHdr->pfnCompletion = pfnCompletion;
1038 pHdr->pvCompletion = pvCompletion;
1039 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1040 return VINF_SUCCESS;
1041 }
1042#ifdef DEBUG_misha
1043 Assert(0);
1044#endif
1045 return VERR_NOT_SUPPORTED;
1046}
1047
1048static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1049{
1050 RTSEMEVENT hComplEvent;
1051 int rc = RTSemEventCreate(&hComplEvent);
1052 AssertRC(rc);
1053 if(RT_SUCCESS(rc))
1054 {
1055 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1056#ifdef DEBUG_misha
1057 AssertRC(rc);
1058#endif
1059 if (RT_SUCCESS(rc))
1060 {
1061 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1062 AssertRC(rc);
1063 if(RT_SUCCESS(rc))
1064 {
1065 RTSemEventDestroy(hComplEvent);
1066 }
1067 }
1068 else
1069 {
1070 /* the command is completed */
1071 RTSemEventDestroy(hComplEvent);
1072 }
1073 }
1074 return rc;
1075}
1076
1077typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1078{
1079 int rc;
1080 RTSEMEVENT hEvent;
1081} VDMA_VBVA_CTL_CYNC_COMPLETION;
1082
1083static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1084{
1085 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1086 pData->rc = rc;
1087 rc = RTSemEventSignal(pData->hEvent);
1088 if (!RT_SUCCESS(rc))
1089 WARN(("RTSemEventSignal failed %d\n", rc));
1090}
1091
1092static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1093{
1094 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1095 Data.rc = VERR_NOT_IMPLEMENTED;
1096 int rc = RTSemEventCreate(&Data.hEvent);
1097 if (!RT_SUCCESS(rc))
1098 {
1099 WARN(("RTSemEventCreate failed %d\n", rc));
1100 return rc;
1101 }
1102
1103 PVGASTATE pVGAState = pVdma->pVGAState;
1104 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1105 if (RT_SUCCESS(rc))
1106 {
1107 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1108 if (RT_SUCCESS(rc))
1109 {
1110 rc = Data.rc;
1111 if (!RT_SUCCESS(rc))
1112 {
1113 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1114 }
1115
1116 }
1117 else
1118 WARN(("RTSemEventWait failed %d\n", rc));
1119 }
1120 else
1121 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1122
1123
1124 RTSemEventDestroy(Data.hEvent);
1125
1126 return rc;
1127}
1128
1129static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1130{
1131 VBVAEXHOSTCTL HCtl;
1132 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1133 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1134}
1135
1136static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1137{
1138 struct VBOXVDMAHOST *pVdma = hClient;
1139 if (!pVdma->pCurRemainingHostCtl)
1140 {
1141 /* disable VBVA, all subsequent host commands will go HGCM way */
1142 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1143 }
1144 else
1145 {
1146 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1147 }
1148
1149 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1150 if (pVdma->pCurRemainingHostCtl)
1151 {
1152 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1153 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1154 }
1155
1156 *pcbCtl = 0;
1157 return NULL;
1158}
1159
1160static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1161{
1162 struct VBOXVDMAHOST *pVdma = hClient;
1163 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1164 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1165}
1166
1167static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1168{
1169 struct VBOXVDMAHOST *pVdma = hClient;
1170 VBVAEXHOSTCTL HCtl;
1171 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1172 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1173
1174 pHgcmEnableData->hRHCmd = pVdma;
1175 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1176
1177 if (RT_FAILURE(rc))
1178 {
1179 if (rc == VERR_INVALID_STATE)
1180 rc = VINF_SUCCESS;
1181 else
1182 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1183 }
1184
1185 return rc;
1186}
1187
1188static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1189{
1190 VBOXCRCMDCTL_ENABLE Enable;
1191 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1192 Enable.Data.hRHCmd = pVdma;
1193 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1194
1195 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1196 Assert(!pVdma->pCurRemainingHostCtl);
1197 if (RT_SUCCESS(rc))
1198 {
1199 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1200 return VINF_SUCCESS;
1201 }
1202
1203 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1204 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1205
1206 return rc;
1207}
1208
1209static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1210{
1211 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1212 {
1213 WARN(("vdma VBVA is already enabled\n"));
1214 return VERR_INVALID_STATE;
1215 }
1216
1217 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1218 if (!pVBVA)
1219 {
1220 WARN(("invalid offset %d\n", u32Offset));
1221 return VERR_INVALID_PARAMETER;
1222 }
1223
1224 if (!pVdma->CrSrvInfo.pfnEnable)
1225 {
1226#ifdef DEBUG_misha
1227 WARN(("pfnEnable is NULL\n"));
1228 return VERR_NOT_SUPPORTED;
1229#endif
1230 }
1231
1232 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1233 if (RT_SUCCESS(rc))
1234 {
1235 VBOXCRCMDCTL_DISABLE Disable;
1236 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1237 Disable.Data.hNotifyTerm = pVdma;
1238 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1239 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1240 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1241 if (RT_SUCCESS(rc))
1242 {
1243 PVGASTATE pVGAState = pVdma->pVGAState;
1244 VBOXCRCMD_SVRENABLE_INFO Info;
1245 Info.hCltScr = pVGAState->pDrv;
1246 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1247 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1248 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1249 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1250 if (RT_SUCCESS(rc))
1251 return VINF_SUCCESS;
1252 else
1253 WARN(("pfnEnable failed %d\n", rc));
1254
1255 vboxVDMACrHgcmHandleEnable(pVdma);
1256 }
1257 else
1258 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1259
1260 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1261 }
1262 else
1263 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1264
1265 return rc;
1266}
1267
1268static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1269{
1270 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1271 {
1272 Log(("vdma VBVA is already disabled\n"));
1273 return VINF_SUCCESS;
1274 }
1275
1276 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1277 if (RT_SUCCESS(rc))
1278 {
1279 if (fDoHgcmEnable)
1280 {
1281 /* disable is a bit tricky
1282 * we need to ensure the host ctl commands do not come out of order
1283 * and do not come over HGCM channel until after it is enabled */
1284 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1285 if (RT_SUCCESS(rc))
1286 return rc;
1287
1288 PVGASTATE pVGAState = pVdma->pVGAState;
1289 VBOXCRCMD_SVRENABLE_INFO Info;
1290 Info.hCltScr = pVGAState->pDrv;
1291 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1292 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1293 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1294 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1295 }
1296 }
1297 else
1298 WARN(("pfnDisable failed %d\n", rc));
1299
1300 return rc;
1301}
1302
1303static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1304{
1305 *pfContinue = true;
1306
1307 switch (pCmd->enmType)
1308 {
1309 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1310 {
1311 PVGASTATE pVGAState = pVdma->pVGAState;
1312 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1313 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1314 if (RT_FAILURE(rc))
1315 {
1316 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1317 return rc;
1318 }
1319 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1320 }
1321 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1322 {
1323 PVGASTATE pVGAState = pVdma->pVGAState;
1324 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1325 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1326 if (RT_FAILURE(rc))
1327 {
1328 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1329 return rc;
1330 }
1331 return pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1332 }
1333 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1334 {
1335 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1336 {
1337 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1338 return VERR_INVALID_STATE;
1339 }
1340 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1341 }
1342 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1343 {
1344 int rc = vdmaVBVADisableProcess(pVdma, true);
1345 if (RT_FAILURE(rc))
1346 {
1347 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1348 return rc;
1349 }
1350
1351 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1352 }
1353 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1354 {
1355 int rc = vdmaVBVADisableProcess(pVdma, false);
1356 if (RT_FAILURE(rc))
1357 {
1358 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1359 return rc;
1360 }
1361
1362 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1363 if (RT_FAILURE(rc))
1364 {
1365 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1366 return rc;
1367 }
1368
1369 *pfContinue = false;
1370 return VINF_SUCCESS;
1371 }
1372 default:
1373 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1374 return VERR_INVALID_PARAMETER;
1375 }
1376}
1377
1378static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1379{
1380 PVGASTATE pVGAState = pVdma->pVGAState;
1381 VBVAINFOSCREEN Screen = pEntry->Screen;
1382 VBVAINFOVIEW View;
1383 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1384 uint32_t u32ViewIndex = Screen.u32ViewIndex;
1385 uint16_t u16Flags = Screen.u16Flags;
1386 bool fDisable = false;
1387
1388 memcpy(aTargetMap, pEntry->aTargetMap, sizeof (aTargetMap));
1389
1390 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1391
1392 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1393 {
1394 fDisable = true;
1395 memset(&Screen, 0, sizeof (Screen));
1396 Screen.u32ViewIndex = u32ViewIndex;
1397 Screen.u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1398 }
1399
1400 if (u32ViewIndex > pVGAState->cMonitors)
1401 {
1402 if (u32ViewIndex != 0xffffffff)
1403 {
1404 WARN(("invalid view index\n"));
1405 return VERR_INVALID_PARAMETER;
1406 }
1407 else if (!fDisable)
1408 {
1409 WARN(("0xffffffff view index only valid for disable requests\n"));
1410 return VERR_INVALID_PARAMETER;
1411 }
1412 }
1413
1414 View.u32ViewOffset = 0;
1415 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1416 View.u32MaxScreenSize = View.u32ViewSize + Screen.u32Width + 1; /* <- make VBVAInfoScreen logic (offEnd < pView->u32MaxScreenSize) happy */
1417
1418 int rc = VINF_SUCCESS;
1419
1420 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1421 i >= 0;
1422 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1423 {
1424 Screen.u32ViewIndex = i;
1425
1426 VBVAINFOSCREEN CurScreen;
1427 VBVAINFOVIEW CurView;
1428
1429 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1430 AssertRC(rc);
1431
1432 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1433 continue;
1434
1435 if (!fDisable || !CurView.u32ViewSize)
1436 {
1437 View.u32ViewIndex = Screen.u32ViewIndex;
1438
1439 rc = VBVAInfoView(pVGAState, &View);
1440 if (RT_FAILURE(rc))
1441 {
1442 WARN(("VBVAInfoView failed %d\n", rc));
1443 break;
1444 }
1445 }
1446
1447 rc = VBVAInfoScreen(pVGAState, &Screen);
1448 if (RT_FAILURE(rc))
1449 {
1450 WARN(("VBVAInfoScreen failed %d\n", rc));
1451 break;
1452 }
1453 }
1454
1455 if (RT_FAILURE(rc))
1456 return rc;
1457
1458 Screen.u32ViewIndex = u32ViewIndex;
1459
1460 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1461 if (RT_FAILURE(rc))
1462 WARN(("pfnResize failed %d\n", rc));
1463
1464 return rc;
1465}
1466
1467static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1468{
1469 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1470 switch (enmType)
1471 {
1472 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1473 {
1474 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1475 {
1476 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1477 return VERR_INVALID_STATE;
1478 }
1479 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1480 }
1481 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1482 {
1483 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1484 {
1485 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1486 return VERR_INVALID_STATE;
1487 }
1488
1489 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1490
1491 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1492 {
1493 WARN(("invalid buffer size\n"));
1494 return VERR_INVALID_PARAMETER;
1495 }
1496
1497 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1498 if (!cElements)
1499 {
1500 WARN(("invalid buffer size\n"));
1501 return VERR_INVALID_PARAMETER;
1502 }
1503
1504 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1505
1506 int rc = VINF_SUCCESS;
1507
1508 for (uint32_t i = 0; i < cElements; ++i)
1509 {
1510 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1511 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1512 if (RT_FAILURE(rc))
1513 {
1514 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1515 break;
1516 }
1517 }
1518 return rc;
1519 }
1520 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1521 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1522 {
1523 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1524 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1525 uint32_t u32Offset = pEnable->u32Offset;
1526 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1527 if (!RT_SUCCESS(rc))
1528 {
1529 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1530 return rc;
1531 }
1532
1533 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1534 {
1535 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1536 if (!RT_SUCCESS(rc))
1537 {
1538 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1539 return rc;
1540 }
1541 }
1542
1543 return VINF_SUCCESS;
1544 }
1545 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1546 {
1547 int rc = vdmaVBVADisableProcess(pVdma, true);
1548 if (RT_FAILURE(rc))
1549 {
1550 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1551 return rc;
1552 }
1553
1554 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1555 }
1556 default:
1557 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1558 return VERR_INVALID_PARAMETER;
1559 }
1560}
1561
1562/**
1563 * @param fIn - whether this is a page in or out op.
1564 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1565 */
1566static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1567{
1568 RTGCPHYS phPage = (RTGCPHYS)(iPage << PAGE_SHIFT);
1569 PGMPAGEMAPLOCK Lock;
1570 int rc;
1571
1572 if (fIn)
1573 {
1574 const void * pvPage;
1575 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1576 if (!RT_SUCCESS(rc))
1577 {
1578 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1579 return rc;
1580 }
1581
1582 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1583
1584 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1585 }
1586 else
1587 {
1588 void * pvPage;
1589 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1590 if (!RT_SUCCESS(rc))
1591 {
1592 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1593 return rc;
1594 }
1595
1596 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1597
1598 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1599 }
1600
1601 return VINF_SUCCESS;
1602}
1603
1604static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1605{
1606 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1607 {
1608 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1609 if (!RT_SUCCESS(rc))
1610 {
1611 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1612 return rc;
1613 }
1614 }
1615
1616 return VINF_SUCCESS;
1617}
1618
1619static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1620 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1621 uint8_t **ppu8Vram, bool *pfIn)
1622{
1623 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1624 {
1625 WARN(("cmd too small"));
1626 return -1;
1627 }
1628
1629 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1630 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1631 {
1632 WARN(("invalid cmd size"));
1633 return -1;
1634 }
1635 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1636
1637 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1638 if (offVRAM & PAGE_OFFSET_MASK)
1639 {
1640 WARN(("offVRAM address is not on page boundary\n"));
1641 return -1;
1642 }
1643 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1644
1645 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1646 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1647 if (offVRAM >= pVGAState->vram_size)
1648 {
1649 WARN(("invalid vram offset"));
1650 return -1;
1651 }
1652
1653 if (offVRAM + (cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1654 {
1655 WARN(("invalid cPages"));
1656 return -1;
1657 }
1658
1659 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1660 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1661
1662 *ppPages = pPages;
1663 *pcPages = cPages;
1664 *ppu8Vram = pu8Vram;
1665 *pfIn = fIn;
1666 return 0;
1667}
1668
1669static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1670{
1671 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1672 if (offVRAM & PAGE_OFFSET_MASK)
1673 {
1674 WARN(("offVRAM address is not on page boundary\n"));
1675 return -1;
1676 }
1677
1678 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1679 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1680 if (offVRAM >= pVGAState->vram_size)
1681 {
1682 WARN(("invalid vram offset"));
1683 return -1;
1684 }
1685
1686 uint32_t cbFill = pFill->u32CbFill;
1687
1688 if (offVRAM + cbFill >= pVGAState->vram_size)
1689 {
1690 WARN(("invalid cPages"));
1691 return -1;
1692 }
1693
1694 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1695 uint32_t u32Color = pFill->u32Pattern;
1696
1697 Assert(!(cbFill % 4));
1698 for (uint32_t i = 0; i < cbFill / 4; ++i)
1699 {
1700 pu32Vram[i] = u32Color;
1701 }
1702
1703 return 0;
1704}
1705
1706static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1707{
1708 switch (pCmd->u8OpCode)
1709 {
1710 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1711 return 0;
1712 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1713 {
1714 PVGASTATE pVGAState = pVdma->pVGAState;
1715 const VBOXCMDVBVAPAGEIDX *pPages;
1716 uint32_t cPages;
1717 uint8_t *pu8Vram;
1718 bool fIn;
1719 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1720 &pPages, &cPages,
1721 &pu8Vram, &fIn);
1722 if (i8Result < 0)
1723 {
1724 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1725 return i8Result;
1726 }
1727
1728 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1729 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1730 if (!RT_SUCCESS(rc))
1731 {
1732 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1733 return -1;
1734 }
1735
1736 return 0;
1737 }
1738 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1739 {
1740 PVGASTATE pVGAState = pVdma->pVGAState;
1741 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1742 {
1743 WARN(("cmd too small"));
1744 return -1;
1745 }
1746
1747 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1748 }
1749 default:
1750 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1751 }
1752}
1753
1754#if 0
1755typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1756{
1757 VBOXCMDVBVA_HDR Hdr;
1758 /* for now can only contain offVRAM.
1759 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1760 VBOXCMDVBVA_ALLOCINFO Alloc;
1761 uint32_t u32Reserved;
1762 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1763} VBOXCMDVBVA_PAGING_TRANSFER;
1764#endif
1765
1766AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1767AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1768AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1769AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1770
1771#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1772
1773static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1774{
1775 switch (pCmd->u8OpCode)
1776 {
1777 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1778 {
1779 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1780 {
1781 WARN(("invalid command size"));
1782 return -1;
1783 }
1784 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1785 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1786 uint32_t cbRealCmd = pCmd->u8Flags;
1787 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1788 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1789 {
1790 WARN(("invalid sysmem cmd size"));
1791 return -1;
1792 }
1793
1794 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1795
1796 PGMPAGEMAPLOCK Lock;
1797 PVGASTATE pVGAState = pVdma->pVGAState;
1798 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1799 const void * pvCmd;
1800 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1801 if (!RT_SUCCESS(rc))
1802 {
1803 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1804 return -1;
1805 }
1806
1807 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1808
1809 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1810
1811 if (cbRealCmd <= cbCmdPart)
1812 {
1813 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1814 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1815 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1816 return i8Result;
1817 }
1818
1819 VBOXCMDVBVA_HDR Hdr;
1820 const void *pvCurCmdTail;
1821 uint32_t cbCurCmdTail;
1822 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1823 {
1824 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1825 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1826 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1827 }
1828 else
1829 {
1830 memcpy(&Hdr, pvCmd, cbCmdPart);
1831 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1832 phCmd += cbCmdPart;
1833 Assert(!(phCmd & PAGE_OFFSET_MASK));
1834 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1835 if (!RT_SUCCESS(rc))
1836 {
1837 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1838 return -1;
1839 }
1840
1841 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1842 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1843 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1844 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1845 }
1846
1847 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1848 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1849
1850 int8_t i8Result = 0;
1851
1852 switch (pRealCmdHdr->u8OpCode)
1853 {
1854 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1855 {
1856 const uint32_t *pPages;
1857 uint32_t cPages;
1858 uint8_t *pu8Vram;
1859 bool fIn;
1860 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1861 &pPages, &cPages,
1862 &pu8Vram, &fIn);
1863 if (i8Result < 0)
1864 {
1865 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1866 /* we need to break, not return, to ensure currently locked page is released */
1867 break;
1868 }
1869
1870 if (cbCurCmdTail & 3)
1871 {
1872 WARN(("command is not alligned properly %d", cbCurCmdTail));
1873 i8Result = -1;
1874 /* we need to break, not return, to ensure currently locked page is released */
1875 break;
1876 }
1877
1878 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1879 Assert(cCurPages < cPages);
1880
1881 do
1882 {
1883 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1884 if (!RT_SUCCESS(rc))
1885 {
1886 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1887 i8Result = -1;
1888 /* we need to break, not return, to ensure currently locked page is released */
1889 break;
1890 }
1891
1892 Assert(cPages >= cCurPages);
1893 cPages -= cCurPages;
1894
1895 if (!cPages)
1896 break;
1897
1898 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1899
1900 Assert(!(phCmd & PAGE_OFFSET_MASK));
1901
1902 phCmd += PAGE_SIZE;
1903 pu8Vram += (cCurPages << PAGE_SHIFT);
1904
1905 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1906 if (!RT_SUCCESS(rc))
1907 {
1908 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1909 /* the page is not locked, return */
1910 return -1;
1911 }
1912
1913 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1914 if (cCurPages > cPages)
1915 cCurPages = cPages;
1916 } while (1);
1917 break;
1918 }
1919 default:
1920 WARN(("command can not be splitted"));
1921 i8Result = -1;
1922 break;
1923 }
1924
1925 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1926 return i8Result;
1927 }
1928 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
1929 {
1930 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
1931 ++pCmd;
1932 cbCmd -= sizeof (*pCmd);
1933 uint32_t cbCurCmd = 0;
1934 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
1935 {
1936 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1937 {
1938 WARN(("invalid command size"));
1939 return -1;
1940 }
1941
1942 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
1943 if (cbCmd < cbCurCmd)
1944 {
1945 WARN(("invalid command size"));
1946 return -1;
1947 }
1948
1949 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
1950 if (i8Result < 0)
1951 {
1952 WARN(("vboxVDMACrCmdVbvaProcess failed"));
1953 return i8Result;
1954 }
1955 }
1956 return 0;
1957 }
1958 default:
1959 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
1960 }
1961}
1962
1963static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
1964{
1965 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1966 return;
1967
1968 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1969 {
1970 WARN(("invalid command size"));
1971 return;
1972 }
1973
1974 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
1975
1976 /* check if the command is cancelled */
1977 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
1978 {
1979 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
1980 return;
1981 }
1982
1983 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
1984}
1985
1986static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
1987{
1988 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
1989 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
1990 int rc = VERR_NO_MEMORY;
1991 if (pCmd)
1992 {
1993 PVGASTATE pVGAState = pVdma->pVGAState;
1994 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
1995 pCmd->cbVRam = pVGAState->vram_size;
1996 pCmd->pLed = &pVGAState->Led3D;
1997 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
1998 if (RT_SUCCESS(rc))
1999 {
2000 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2001 if (RT_SUCCESS(rc))
2002 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2003 else if (rc != VERR_NOT_SUPPORTED)
2004 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2005 }
2006 else
2007 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2008
2009 vboxVDMACrCtlRelease(&pCmd->Hdr);
2010 }
2011
2012 if (!RT_SUCCESS(rc))
2013 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2014
2015 return rc;
2016}
2017
2018static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2019
2020/* check if this is external cmd to be passed to chromium backend */
2021static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2022{
2023 PVBOXVDMACMD pDmaCmd = NULL;
2024 uint32_t cbDmaCmd = 0;
2025 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2026 int rc = VINF_NOT_SUPPORTED;
2027
2028 cbDmaCmd = pCmdDr->cbBuf;
2029
2030 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2031 {
2032 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2033 {
2034 AssertMsgFailed(("invalid buffer data!"));
2035 return VERR_INVALID_PARAMETER;
2036 }
2037
2038 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2039 {
2040 AssertMsgFailed(("invalid command buffer data!"));
2041 return VERR_INVALID_PARAMETER;
2042 }
2043
2044 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2045 }
2046 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2047 {
2048 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2049 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2050 {
2051 AssertMsgFailed(("invalid command buffer data from offset!"));
2052 return VERR_INVALID_PARAMETER;
2053 }
2054 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2055 }
2056
2057 if (pDmaCmd)
2058 {
2059 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2060 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2061
2062 switch (pDmaCmd->enmType)
2063 {
2064 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2065 {
2066 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2067 if (cbBody < sizeof (*pCrCmd))
2068 {
2069 AssertMsgFailed(("invalid chromium command buffer size!"));
2070 return VERR_INVALID_PARAMETER;
2071 }
2072 PVGASTATE pVGAState = pVdma->pVGAState;
2073 rc = VINF_SUCCESS;
2074 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2075 {
2076 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2077 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2078 break;
2079 }
2080 else
2081 {
2082 Assert(0);
2083 }
2084
2085 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2086 AssertRC(tmpRc);
2087 break;
2088 }
2089 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2090 {
2091 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2092 if (cbBody < sizeof (*pTransfer))
2093 {
2094 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2095 return VERR_INVALID_PARAMETER;
2096 }
2097
2098 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2099 AssertRC(rc);
2100 if (RT_SUCCESS(rc))
2101 {
2102 pCmdDr->rc = VINF_SUCCESS;
2103 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2104 AssertRC(rc);
2105 rc = VINF_SUCCESS;
2106 }
2107 break;
2108 }
2109 default:
2110 break;
2111 }
2112 }
2113 return rc;
2114}
2115
2116int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2117{
2118 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2119 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2120 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2121 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2122 AssertRC(rc);
2123 pDr->rc = rc;
2124
2125 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2126 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2127 AssertRC(rc);
2128 return rc;
2129}
2130
2131int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2132{
2133 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2134 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2135 pCmdPrivate->rc = rc;
2136 if (pCmdPrivate->pfnCompletion)
2137 {
2138 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2139 }
2140 return VINF_SUCCESS;
2141}
2142
2143static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2144 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2145 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2146 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2147{
2148 /* we do not support color conversion */
2149 Assert(pDstDesc->format == pSrcDesc->format);
2150 /* we do not support stretching */
2151 Assert(pDstRectl->height == pSrcRectl->height);
2152 Assert(pDstRectl->width == pSrcRectl->width);
2153 if (pDstDesc->format != pSrcDesc->format)
2154 return VERR_INVALID_FUNCTION;
2155 if (pDstDesc->width == pDstRectl->width
2156 && pSrcDesc->width == pSrcRectl->width
2157 && pSrcDesc->width == pDstDesc->width)
2158 {
2159 Assert(!pDstRectl->left);
2160 Assert(!pSrcRectl->left);
2161 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2162 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2163 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2164 }
2165 else
2166 {
2167 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2168 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2169 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2170 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2171 Assert(cbDstLine <= pDstDesc->pitch);
2172 uint32_t cbDstSkip = pDstDesc->pitch;
2173 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2174
2175 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2176 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2177 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2178 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2179 Assert(cbSrcLine <= pSrcDesc->pitch);
2180 uint32_t cbSrcSkip = pSrcDesc->pitch;
2181 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2182
2183 Assert(cbDstLine == cbSrcLine);
2184
2185 for (uint32_t i = 0; ; ++i)
2186 {
2187 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2188 if (i == pDstRectl->height)
2189 break;
2190 pvDstStart += cbDstSkip;
2191 pvSrcStart += cbSrcSkip;
2192 }
2193 }
2194 return VINF_SUCCESS;
2195}
2196
2197static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2198{
2199 if (!pRectl1->width)
2200 *pRectl1 = *pRectl2;
2201 else
2202 {
2203 int16_t x21 = pRectl1->left + pRectl1->width;
2204 int16_t x22 = pRectl2->left + pRectl2->width;
2205 if (pRectl1->left > pRectl2->left)
2206 {
2207 pRectl1->left = pRectl2->left;
2208 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2209 }
2210 else if (x21 < x22)
2211 pRectl1->width = x22 - pRectl1->left;
2212
2213 x21 = pRectl1->top + pRectl1->height;
2214 x22 = pRectl2->top + pRectl2->height;
2215 if (pRectl1->top > pRectl2->top)
2216 {
2217 pRectl1->top = pRectl2->top;
2218 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2219 }
2220 else if (x21 < x22)
2221 pRectl1->height = x22 - pRectl1->top;
2222 }
2223}
2224
2225/*
2226 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2227 */
2228static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2229{
2230 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2231 Assert(cbBlt <= cbBuffer);
2232 if (cbBuffer < cbBlt)
2233 return VERR_INVALID_FUNCTION;
2234
2235 /* we do not support stretching for now */
2236 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2237 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2238 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2239 return VERR_INVALID_FUNCTION;
2240 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2241 return VERR_INVALID_FUNCTION;
2242 Assert(pBlt->cDstSubRects);
2243
2244 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2245 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2246
2247 if (pBlt->cDstSubRects)
2248 {
2249 VBOXVDMA_RECTL dstRectl, srcRectl;
2250 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2251 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2252 {
2253 pDstRectl = &pBlt->aDstSubRects[i];
2254 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2255 {
2256 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2257 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2258 dstRectl.width = pDstRectl->width;
2259 dstRectl.height = pDstRectl->height;
2260 pDstRectl = &dstRectl;
2261 }
2262
2263 pSrcRectl = &pBlt->aDstSubRects[i];
2264 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2265 {
2266 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2267 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2268 srcRectl.width = pSrcRectl->width;
2269 srcRectl.height = pSrcRectl->height;
2270 pSrcRectl = &srcRectl;
2271 }
2272
2273 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2274 &pBlt->dstDesc, &pBlt->srcDesc,
2275 pDstRectl,
2276 pSrcRectl);
2277 AssertRC(rc);
2278 if (!RT_SUCCESS(rc))
2279 return rc;
2280
2281 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2282 }
2283 }
2284 else
2285 {
2286 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2287 &pBlt->dstDesc, &pBlt->srcDesc,
2288 &pBlt->dstRectl,
2289 &pBlt->srcRectl);
2290 AssertRC(rc);
2291 if (!RT_SUCCESS(rc))
2292 return rc;
2293
2294 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2295 }
2296
2297 return cbBlt;
2298}
2299
2300static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2301{
2302 if (cbBuffer < sizeof (*pTransfer))
2303 return VERR_INVALID_PARAMETER;
2304
2305 PVGASTATE pVGAState = pVdma->pVGAState;
2306 uint8_t * pvRam = pVGAState->vram_ptrR3;
2307 PGMPAGEMAPLOCK SrcLock;
2308 PGMPAGEMAPLOCK DstLock;
2309 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2310 const void * pvSrc;
2311 void * pvDst;
2312 int rc = VINF_SUCCESS;
2313 uint32_t cbTransfer = pTransfer->cbTransferSize;
2314 uint32_t cbTransfered = 0;
2315 bool bSrcLocked = false;
2316 bool bDstLocked = false;
2317 do
2318 {
2319 uint32_t cbSubTransfer = cbTransfer;
2320 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2321 {
2322 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2323 }
2324 else
2325 {
2326 RTGCPHYS phPage = pTransfer->Src.phBuf;
2327 phPage += cbTransfered;
2328 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2329 AssertRC(rc);
2330 if (RT_SUCCESS(rc))
2331 {
2332 bSrcLocked = true;
2333 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2334 }
2335 else
2336 {
2337 break;
2338 }
2339 }
2340
2341 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2342 {
2343 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2344 }
2345 else
2346 {
2347 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2348 phPage += cbTransfered;
2349 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2350 AssertRC(rc);
2351 if (RT_SUCCESS(rc))
2352 {
2353 bDstLocked = true;
2354 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2355 }
2356 else
2357 {
2358 break;
2359 }
2360 }
2361
2362 if (RT_SUCCESS(rc))
2363 {
2364 memcpy(pvDst, pvSrc, cbSubTransfer);
2365 cbTransfer -= cbSubTransfer;
2366 cbTransfered += cbSubTransfer;
2367 }
2368 else
2369 {
2370 cbTransfer = 0; /* to break */
2371 }
2372
2373 if (bSrcLocked)
2374 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2375 if (bDstLocked)
2376 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2377 } while (cbTransfer);
2378
2379 if (RT_SUCCESS(rc))
2380 return sizeof (*pTransfer);
2381 return rc;
2382}
2383
2384static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2385{
2386 do
2387 {
2388 Assert(pvBuffer);
2389 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2390
2391 if (!pvBuffer)
2392 return VERR_INVALID_PARAMETER;
2393 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2394 return VERR_INVALID_PARAMETER;
2395
2396 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2397 uint32_t cbCmd = 0;
2398 switch (pCmd->enmType)
2399 {
2400 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2401 {
2402#ifdef VBOXWDDM_TEST_UHGSMI
2403 static int count = 0;
2404 static uint64_t start, end;
2405 if (count==0)
2406 {
2407 start = RTTimeNanoTS();
2408 }
2409 ++count;
2410 if (count==100000)
2411 {
2412 end = RTTimeNanoTS();
2413 float ems = (end-start)/1000000.f;
2414 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2415 }
2416#endif
2417 /* todo: post the buffer to chromium */
2418 return VINF_SUCCESS;
2419 }
2420 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2421 {
2422 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2423 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2424 Assert(cbBlt >= 0);
2425 Assert((uint32_t)cbBlt <= cbBuffer);
2426 if (cbBlt >= 0)
2427 {
2428 if ((uint32_t)cbBlt == cbBuffer)
2429 return VINF_SUCCESS;
2430 else
2431 {
2432 cbBuffer -= (uint32_t)cbBlt;
2433 pvBuffer -= cbBlt;
2434 }
2435 }
2436 else
2437 return cbBlt; /* error */
2438 break;
2439 }
2440 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2441 {
2442 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2443 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2444 Assert(cbTransfer >= 0);
2445 Assert((uint32_t)cbTransfer <= cbBuffer);
2446 if (cbTransfer >= 0)
2447 {
2448 if ((uint32_t)cbTransfer == cbBuffer)
2449 return VINF_SUCCESS;
2450 else
2451 {
2452 cbBuffer -= (uint32_t)cbTransfer;
2453 pvBuffer -= cbTransfer;
2454 }
2455 }
2456 else
2457 return cbTransfer; /* error */
2458 break;
2459 }
2460 case VBOXVDMACMD_TYPE_DMA_NOP:
2461 return VINF_SUCCESS;
2462 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2463 return VINF_SUCCESS;
2464 default:
2465 AssertBreakpoint();
2466 return VERR_INVALID_FUNCTION;
2467 }
2468 } while (1);
2469
2470 /* we should not be here */
2471 AssertBreakpoint();
2472 return VERR_INVALID_STATE;
2473}
2474
2475static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2476{
2477 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2478 PVGASTATE pVGAState = pVdma->pVGAState;
2479 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2480 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2481 uint8_t *pCmd;
2482 uint32_t cbCmd;
2483 int rc;
2484
2485 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2486
2487 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2488 {
2489 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2490 switch (enmType)
2491 {
2492 case VBVAEXHOST_DATA_TYPE_CMD:
2493 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2494 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2495 VBVARaiseIrqNoWait(pVGAState, 0);
2496 break;
2497 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2498 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2499 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2500 break;
2501 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2502 {
2503 bool fContinue = true;
2504 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2505 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2506 if (fContinue)
2507 break;
2508 }
2509 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2510 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2511 AssertRC(rc);
2512 break;
2513 default:
2514 WARN(("unexpected type %d\n", enmType));
2515 break;
2516 }
2517 }
2518
2519 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2520
2521 return VINF_SUCCESS;
2522}
2523
2524static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2525{
2526 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2527 const uint8_t * pvBuf;
2528 PGMPAGEMAPLOCK Lock;
2529 int rc;
2530 bool bReleaseLocked = false;
2531
2532 do
2533 {
2534 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2535
2536 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2537 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2538 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2539 {
2540 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2541 pvBuf = pvRam + pCmd->Location.offVramBuf;
2542 }
2543 else
2544 {
2545 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2546 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2547 Assert(offset + pCmd->cbBuf <= 0x1000);
2548 if (offset + pCmd->cbBuf > 0x1000)
2549 {
2550 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2551 rc = VERR_INVALID_PARAMETER;
2552 break;
2553 }
2554
2555 const void * pvPageBuf;
2556 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2557 AssertRC(rc);
2558 if (!RT_SUCCESS(rc))
2559 {
2560 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2561 break;
2562 }
2563
2564 pvBuf = (const uint8_t *)pvPageBuf;
2565 pvBuf += offset;
2566
2567 bReleaseLocked = true;
2568 }
2569
2570 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2571 AssertRC(rc);
2572
2573 if (bReleaseLocked)
2574 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2575 } while (0);
2576
2577 pCmd->rc = rc;
2578
2579 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2580 AssertRC(rc);
2581}
2582
2583static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2584{
2585 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2586 pCmd->i32Result = VINF_SUCCESS;
2587 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2588 AssertRC(rc);
2589}
2590
2591#endif /* #ifdef VBOX_WITH_CRHGSMI */
2592
2593#ifdef VBOX_VDMA_WITH_WATCHDOG
2594static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2595{
2596 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2597 PVGASTATE pVGAState = pVdma->pVGAState;
2598 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2599}
2600
2601static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2602{
2603 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2604 if (cMillis)
2605 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2606 else
2607 TMTimerStop(pVdma->WatchDogTimer);
2608 return VINF_SUCCESS;
2609}
2610#endif
2611
2612int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2613{
2614 int rc;
2615 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2616 Assert(pVdma);
2617 if (pVdma)
2618 {
2619 pVdma->pHgsmi = pVGAState->pHGSMI;
2620 pVdma->pVGAState = pVGAState;
2621
2622#ifdef VBOX_VDMA_WITH_WATCHDOG
2623 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2624 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2625 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2626 AssertRC(rc);
2627#endif
2628
2629#ifdef VBOX_WITH_CRHGSMI
2630 VBoxVDMAThreadInit(&pVdma->Thread);
2631
2632 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2633 if (RT_SUCCESS(rc))
2634 {
2635 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2636 if (RT_SUCCESS(rc))
2637 {
2638 pVGAState->pVdma = pVdma;
2639 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2640 return VINF_SUCCESS;
2641
2642 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2643 }
2644 else
2645 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2646
2647 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2648 }
2649 else
2650 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2651
2652
2653 RTMemFree(pVdma);
2654#else
2655 pVGAState->pVdma = pVdma;
2656 return VINF_SUCCESS;
2657#endif
2658 }
2659 else
2660 rc = VERR_OUT_OF_RESOURCES;
2661
2662 return rc;
2663}
2664
2665int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2666{
2667#ifdef VBOX_WITH_CRHGSMI
2668 vdmaVBVACtlDisableSync(pVdma);
2669#endif
2670 return VINF_SUCCESS;
2671}
2672
2673int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2674{
2675#ifdef VBOX_WITH_CRHGSMI
2676 vdmaVBVACtlDisableSync(pVdma);
2677 VBoxVDMAThreadCleanup(&pVdma->Thread);
2678 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2679 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2680#endif
2681 RTMemFree(pVdma);
2682 return VINF_SUCCESS;
2683}
2684
2685void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2686{
2687 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2688
2689 switch (pCmd->enmCtl)
2690 {
2691 case VBOXVDMA_CTL_TYPE_ENABLE:
2692 pCmd->i32Result = VINF_SUCCESS;
2693 break;
2694 case VBOXVDMA_CTL_TYPE_DISABLE:
2695 pCmd->i32Result = VINF_SUCCESS;
2696 break;
2697 case VBOXVDMA_CTL_TYPE_FLUSH:
2698 pCmd->i32Result = VINF_SUCCESS;
2699 break;
2700#ifdef VBOX_VDMA_WITH_WATCHDOG
2701 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2702 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2703 break;
2704#endif
2705 default:
2706 WARN(("cmd not supported"));
2707 pCmd->i32Result = VERR_NOT_SUPPORTED;
2708 }
2709
2710 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2711 AssertRC(rc);
2712}
2713
2714void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2715{
2716 int rc = VERR_NOT_IMPLEMENTED;
2717
2718#ifdef VBOX_WITH_CRHGSMI
2719 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2720 * this is why we process them specially */
2721 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2722 if (rc == VINF_SUCCESS)
2723 return;
2724
2725 if (RT_FAILURE(rc))
2726 {
2727 pCmd->rc = rc;
2728 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2729 AssertRC(rc);
2730 return;
2731 }
2732
2733 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2734#else
2735 pCmd->rc = rc;
2736 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2737 AssertRC(rc);
2738#endif
2739}
2740
2741/**/
2742#ifdef VBOX_WITH_CRHGSMI
2743
2744static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2745
2746static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2747{
2748 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2749 if (RT_SUCCESS(rc))
2750 {
2751 if (rc == VINF_SUCCESS)
2752 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2753 else
2754 Assert(rc == VINF_ALREADY_INITIALIZED);
2755 }
2756 else
2757 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2758
2759 return rc;
2760}
2761
2762static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2763{
2764 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2765 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2766 AssertRC(rc);
2767 pGCtl->i32Result = rc;
2768
2769 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2770 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2771 AssertRC(rc);
2772
2773 VBoxVBVAExHCtlFree(pVbva, pCtl);
2774}
2775
2776static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2777{
2778 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2779 if (!pHCtl)
2780 {
2781 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2782 return VERR_NO_MEMORY;
2783 }
2784
2785 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2786 pHCtl->u.cmd.cbCmd = cbCmd;
2787 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2788 if (RT_FAILURE(rc))
2789 {
2790 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2791 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2792 return rc;;
2793 }
2794 return VINF_SUCCESS;
2795}
2796
2797static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2798{
2799 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2800 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2801 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2802 if (RT_SUCCESS(rc))
2803 return VINF_SUCCESS;
2804
2805 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2806 pCtl->i32Result = rc;
2807 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2808 AssertRC(rc);
2809 return VINF_SUCCESS;
2810}
2811
2812static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2813{
2814 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2815 if (pVboxCtl->u.pfnInternal)
2816 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2817 VBoxVBVAExHCtlFree(pVbva, pCtl);
2818}
2819
2820static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2821 PFNCRCTLCOMPLETION pfnCompletion,
2822 void *pvCompletion)
2823{
2824 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2825 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2826 if (RT_FAILURE(rc))
2827 {
2828 if (rc == VERR_INVALID_STATE)
2829 {
2830 pCmd->u.pfnInternal = NULL;
2831 PVGASTATE pVGAState = pVdma->pVGAState;
2832 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2833 if (!RT_SUCCESS(rc))
2834 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2835
2836 return rc;
2837 }
2838 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2839 return rc;
2840 }
2841
2842 return VINF_SUCCESS;
2843}
2844
2845static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2846{
2847 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2848 {
2849 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2850 if (!RT_SUCCESS(rc))
2851 {
2852 WARN(("pfnVBVAEnable failed %d\n", rc));
2853 for (uint32_t j = 0; j < i; j++)
2854 {
2855 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2856 }
2857
2858 return rc;
2859 }
2860 }
2861 return VINF_SUCCESS;
2862}
2863
2864static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2865{
2866 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2867 {
2868 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2869 }
2870 return VINF_SUCCESS;
2871}
2872
2873static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2874{
2875 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2876 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2877
2878 if (RT_SUCCESS(rc))
2879 {
2880 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2881 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2882 if (rc == VINF_SUCCESS)
2883 {
2884 /* we need to inform Main about VBVA enable/disable
2885 * main expects notifications to be done from the main thread
2886 * submit it there */
2887 PVGASTATE pVGAState = pVdma->pVGAState;
2888
2889 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2890 vdmaVBVANotifyEnable(pVGAState);
2891 else
2892 vdmaVBVANotifyDisable(pVGAState);
2893 }
2894 else if (RT_FAILURE(rc))
2895 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2896 }
2897 else
2898 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2899
2900 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2901}
2902
2903static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2904{
2905 int rc;
2906 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2907 if (pHCtl)
2908 {
2909 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2910 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2911 pHCtl->pfnComplete = pfnComplete;
2912 pHCtl->pvComplete = pvComplete;
2913
2914 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
2915 if (RT_SUCCESS(rc))
2916 return VINF_SUCCESS;
2917 else
2918 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
2919
2920 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2921 }
2922 else
2923 {
2924 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2925 rc = VERR_NO_MEMORY;
2926 }
2927
2928 return rc;
2929}
2930
2931static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
2932{
2933 VBVAENABLE Enable = {0};
2934 Enable.u32Flags = VBVA_F_ENABLE;
2935 Enable.u32Offset = offVram;
2936
2937 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2938 Data.rc = VERR_NOT_IMPLEMENTED;
2939 int rc = RTSemEventCreate(&Data.hEvent);
2940 if (!RT_SUCCESS(rc))
2941 {
2942 WARN(("RTSemEventCreate failed %d\n", rc));
2943 return rc;
2944 }
2945
2946 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
2947 if (RT_SUCCESS(rc))
2948 {
2949 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2950 if (RT_SUCCESS(rc))
2951 {
2952 rc = Data.rc;
2953 if (!RT_SUCCESS(rc))
2954 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2955 }
2956 else
2957 WARN(("RTSemEventWait failed %d\n", rc));
2958 }
2959 else
2960 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
2961
2962 RTSemEventDestroy(Data.hEvent);
2963
2964 return rc;
2965}
2966
2967static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2968{
2969 int rc;
2970 VBVAEXHOSTCTL* pHCtl;
2971 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
2972 {
2973 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
2974 return VINF_SUCCESS;
2975 }
2976
2977 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
2978 if (!pHCtl)
2979 {
2980 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2981 return VERR_NO_MEMORY;
2982 }
2983
2984 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2985 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2986 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
2987 if (RT_SUCCESS(rc))
2988 return VINF_SUCCESS;
2989
2990 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2991 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2992 return rc;
2993}
2994
2995static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2996{
2997 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
2998 if (fEnable)
2999 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3000 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3001}
3002
3003static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3004{
3005 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3006 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3007 if (RT_SUCCESS(rc))
3008 return VINF_SUCCESS;
3009
3010 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3011 pEnable->Hdr.i32Result = rc;
3012 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3013 AssertRC(rc);
3014 return VINF_SUCCESS;
3015}
3016
3017static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
3018{
3019 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3020 pData->rc = rc;
3021 rc = RTSemEventSignal(pData->hEvent);
3022 if (!RT_SUCCESS(rc))
3023 WARN(("RTSemEventSignal failed %d\n", rc));
3024}
3025
3026static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3027{
3028 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3029 Data.rc = VERR_NOT_IMPLEMENTED;
3030 int rc = RTSemEventCreate(&Data.hEvent);
3031 if (!RT_SUCCESS(rc))
3032 {
3033 WARN(("RTSemEventCreate failed %d\n", rc));
3034 return rc;
3035 }
3036
3037 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3038 if (RT_SUCCESS(rc))
3039 {
3040 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3041 if (RT_SUCCESS(rc))
3042 {
3043 rc = Data.rc;
3044 if (!RT_SUCCESS(rc))
3045 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3046 }
3047 else
3048 WARN(("RTSemEventWait failed %d\n", rc));
3049 }
3050 else
3051 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3052
3053 RTSemEventDestroy(Data.hEvent);
3054
3055 return rc;
3056}
3057
3058static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3059{
3060 VBVAEXHOSTCTL Ctl;
3061 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3062 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3063}
3064
3065static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3066{
3067 VBVAEXHOSTCTL Ctl;
3068 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3069 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3070}
3071
3072static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3073{
3074 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3075 switch (rc)
3076 {
3077 case VINF_SUCCESS:
3078 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3079 case VINF_ALREADY_INITIALIZED:
3080 case VINF_EOF:
3081 case VERR_INVALID_STATE:
3082 return VINF_SUCCESS;
3083 default:
3084 Assert(!RT_FAILURE(rc));
3085 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3086 }
3087}
3088
3089
3090int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3091 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3092 PFNCRCTLCOMPLETION pfnCompletion,
3093 void *pvCompletion)
3094{
3095 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3096 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3097 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3098}
3099
3100typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3101{
3102 struct VBOXVDMAHOST *pVdma;
3103 uint32_t fProcessing;
3104 int rc;
3105} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3106
3107static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3108{
3109 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3110
3111 pData->rc = rc;
3112 pData->fProcessing = 0;
3113
3114 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3115
3116 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3117
3118 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3119}
3120
3121int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3122 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3123{
3124 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3125 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3126 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3127 Data.pVdma = pVdma;
3128 Data.fProcessing = 1;
3129 Data.rc = VERR_INTERNAL_ERROR;
3130 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3131 if (!RT_SUCCESS(rc))
3132 {
3133 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3134 return rc;
3135 }
3136
3137 while (Data.fProcessing)
3138 {
3139 /* Poll infrequently to make sure no completed message has been missed. */
3140 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3141
3142 if (Data.fProcessing)
3143 RTThreadYield();
3144 }
3145
3146 /* 'Our' message has been processed, so should reset the semaphore.
3147 * There is still possible that another message has been processed
3148 * and the semaphore has been signalled again.
3149 * Reset only if there are no other messages completed.
3150 */
3151 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3152 Assert(c >= 0);
3153 if (!c)
3154 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3155
3156 rc = Data.rc;
3157 if (!RT_SUCCESS(rc))
3158 WARN(("host call failed %d", rc));
3159
3160 return rc;
3161}
3162
3163int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3164{
3165 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3166 int rc = VINF_SUCCESS;
3167 switch (pCtl->u32Type)
3168 {
3169 case VBOXCMDVBVACTL_TYPE_3DCTL:
3170 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3171 case VBOXCMDVBVACTL_TYPE_RESIZE:
3172 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3173 case VBOXCMDVBVACTL_TYPE_ENABLE:
3174 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3175 {
3176 WARN(("incorrect enable size\n"));
3177 rc = VERR_INVALID_PARAMETER;
3178 break;
3179 }
3180 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3181 default:
3182 WARN(("unsupported type\n"));
3183 rc = VERR_INVALID_PARAMETER;
3184 break;
3185 }
3186
3187 pCtl->i32Result = rc;
3188 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3189 AssertRC(rc);
3190 return VINF_SUCCESS;
3191}
3192
3193int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3194{
3195 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3196 {
3197 WARN(("vdma VBVA is disabled\n"));
3198 return VERR_INVALID_STATE;
3199 }
3200
3201 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3202}
3203
3204int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3205{
3206 WARN(("flush\n"));
3207 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3208 {
3209 WARN(("vdma VBVA is disabled\n"));
3210 return VERR_INVALID_STATE;
3211 }
3212 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3213}
3214
3215void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3216{
3217 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3218 return;
3219 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3220}
3221
3222#endif
3223
3224int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3225{
3226#ifdef VBOX_WITH_CRHGSMI
3227 int rc = vdmaVBVAPause(pVdma);
3228 if (RT_SUCCESS(rc))
3229 return VINF_SUCCESS;
3230
3231 if (rc != VERR_INVALID_STATE)
3232 {
3233 WARN(("vdmaVBVAPause failed %d\n", rc));
3234 return rc;
3235 }
3236
3237#ifdef DEBUG_misha
3238 WARN(("debug prep"));
3239#endif
3240
3241 PVGASTATE pVGAState = pVdma->pVGAState;
3242 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3243 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3244 Assert(pCmd);
3245 if (pCmd)
3246 {
3247 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3248 AssertRC(rc);
3249 if (RT_SUCCESS(rc))
3250 {
3251 rc = vboxVDMACrCtlGetRc(pCmd);
3252 }
3253 vboxVDMACrCtlRelease(pCmd);
3254 return rc;
3255 }
3256 return VERR_NO_MEMORY;
3257#else
3258 return VINF_SUCCESS;
3259#endif
3260}
3261
3262int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3263{
3264#ifdef VBOX_WITH_CRHGSMI
3265 int rc = vdmaVBVAResume(pVdma);
3266 if (RT_SUCCESS(rc))
3267 return VINF_SUCCESS;
3268
3269 if (rc != VERR_INVALID_STATE)
3270 {
3271 WARN(("vdmaVBVAResume failed %d\n", rc));
3272 return rc;
3273 }
3274
3275#ifdef DEBUG_misha
3276 WARN(("debug done"));
3277#endif
3278
3279 PVGASTATE pVGAState = pVdma->pVGAState;
3280 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3281 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3282 Assert(pCmd);
3283 if (pCmd)
3284 {
3285 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3286 AssertRC(rc);
3287 if (RT_SUCCESS(rc))
3288 {
3289 rc = vboxVDMACrCtlGetRc(pCmd);
3290 }
3291 vboxVDMACrCtlRelease(pCmd);
3292 return rc;
3293 }
3294 return VERR_NO_MEMORY;
3295#else
3296 return VINF_SUCCESS;
3297#endif
3298}
3299
3300int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3301{
3302 int rc;
3303
3304#ifdef VBOX_WITH_CRHGSMI
3305 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3306#endif
3307 {
3308 rc = SSMR3PutU32(pSSM, 0xffffffff);
3309 AssertRCReturn(rc, rc);
3310 return VINF_SUCCESS;
3311 }
3312
3313#ifdef VBOX_WITH_CRHGSMI
3314 PVGASTATE pVGAState = pVdma->pVGAState;
3315 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3316
3317 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3318 AssertRCReturn(rc, rc);
3319
3320 VBVAEXHOSTCTL HCtl;
3321 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3322 HCtl.u.state.pSSM = pSSM;
3323 HCtl.u.state.u32Version = 0;
3324 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3325#endif
3326}
3327
3328int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3329{
3330 uint32_t u32;
3331 int rc = SSMR3GetU32(pSSM, &u32);
3332 AssertRCReturn(rc, rc);
3333
3334 if (u32 != 0xffffffff)
3335 {
3336#ifdef VBOX_WITH_CRHGSMI
3337 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3338 AssertRCReturn(rc, rc);
3339
3340 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3341
3342 VBVAEXHOSTCTL HCtl;
3343 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3344 HCtl.u.state.pSSM = pSSM;
3345 HCtl.u.state.u32Version = u32Version;
3346 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3347 AssertRCReturn(rc, rc);
3348
3349 rc = vdmaVBVAResume(pVdma);
3350 AssertRCReturn(rc, rc);
3351
3352 return VINF_SUCCESS;
3353#else
3354 WARN(("Unsupported VBVACtl info!\n"));
3355 return VERR_VERSION_MISMATCH;
3356#endif
3357 }
3358
3359 return VINF_SUCCESS;
3360}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette