VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 51476

Last change on this file since 51476 was 51462, checked in by vboxsync, 11 years ago

Graphics: logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 108.5 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATING 1
52#define VBOXVDMATHREAD_STATE_CREATED 3
53#define VBOXVDMATHREAD_STATE_TERMINATING 4
54
55struct VBOXVDMATHREAD;
56
57typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
58
59typedef struct VBOXVDMATHREAD
60{
61 RTTHREAD hWorkerThread;
62 RTSEMEVENT hEvent;
63 volatile uint32_t u32State;
64 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
65 void *pvChanged;
66} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
67
68
69/* state transformations:
70 *
71 * submitter | processor
72 *
73 * LISTENING ---> PROCESSING
74 *
75 * */
76#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
77#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
78
79#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
80#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
81#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
82
83typedef struct VBVAEXHOSTCONTEXT
84{
85 VBVABUFFER *pVBVA;
86 volatile int32_t i32State;
87 volatile int32_t i32EnableState;
88 volatile uint32_t u32cCtls;
89 /* critical section for accessing ctl lists */
90 RTCRITSECT CltCritSect;
91 RTLISTANCHOR GuestCtlList;
92 RTLISTANCHOR HostCtlList;
93#ifndef VBOXVDBG_MEMCACHE_DISABLE
94 RTMEMCACHE CtlCache;
95#endif
96} VBVAEXHOSTCONTEXT;
97
98typedef enum
99{
100 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
101 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
102 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
103 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
104 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
105 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
106 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
107 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
108 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
109 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
110 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
111 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
112 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
113} VBVAEXHOSTCTL_TYPE;
114
115struct VBVAEXHOSTCTL;
116
117typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
118
119typedef struct VBVAEXHOSTCTL
120{
121 RTLISTNODE Node;
122 VBVAEXHOSTCTL_TYPE enmType;
123 union
124 {
125 struct
126 {
127 uint8_t * pu8Cmd;
128 uint32_t cbCmd;
129 } cmd;
130
131 struct
132 {
133 PSSMHANDLE pSSM;
134 uint32_t u32Version;
135 } state;
136 } u;
137 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
138 void *pvComplete;
139} VBVAEXHOSTCTL;
140
141/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
142 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
143 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
144 * see mor edetailed comments in headers for function definitions */
145typedef enum
146{
147 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
148 VBVAEXHOST_DATA_TYPE_CMD,
149 VBVAEXHOST_DATA_TYPE_HOSTCTL,
150 VBVAEXHOST_DATA_TYPE_GUESTCTL
151} VBVAEXHOST_DATA_TYPE;
152
153static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
154
155
156static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
157
158static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
159static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
160
161/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
162 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
163static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
164
165static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
166static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
167static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
168static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
169static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
170static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
171
172static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
173{
174#ifndef VBOXVDBG_MEMCACHE_DISABLE
175 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
176#else
177 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
178#endif
179}
180
181static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
182{
183#ifndef VBOXVDBG_MEMCACHE_DISABLE
184 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
185#else
186 RTMemFree(pCtl);
187#endif
188}
189
190static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
191{
192 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
193 if (!pCtl)
194 {
195 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
196 return NULL;
197 }
198
199 pCtl->enmType = enmType;
200 return pCtl;
201}
202
203static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
204{
205 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
206
207 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
208 return VINF_SUCCESS;
209 return VERR_SEM_BUSY;
210}
211
212static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
213{
214 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
215
216 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
217 return NULL;
218
219 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
220 if (RT_SUCCESS(rc))
221 {
222 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
223 if (pCtl)
224 *pfHostCtl = true;
225 else if (!fHostOnlyMode)
226 {
227 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
228 {
229 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
230 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
231 * and there are no HostCtl commands*/
232 Assert(pCtl);
233 *pfHostCtl = false;
234 }
235 }
236
237 if (pCtl)
238 {
239 RTListNodeRemove(&pCtl->Node);
240 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
241 }
242
243 RTCritSectLeave(&pCmdVbva->CltCritSect);
244
245 return pCtl;
246 }
247 else
248 WARN(("RTCritSectEnter failed %d\n", rc));
249
250 return NULL;
251}
252
253static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
254{
255 bool fHostCtl = false;
256 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
257 Assert(!pCtl || fHostCtl);
258 return pCtl;
259}
260
261static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
262{
263 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
264 {
265 WARN(("Invalid state\n"));
266 return VERR_INVALID_STATE;
267 }
268
269 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
270 return VINF_SUCCESS;
271}
272
273static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
274{
275 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
276 {
277 WARN(("Invalid state\n"));
278 return VERR_INVALID_STATE;
279 }
280
281 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
282 return VINF_SUCCESS;
283}
284
285
286static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
287{
288 switch (pCtl->enmType)
289 {
290 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
291 {
292 int rc = VBoxVBVAExHPPause(pCmdVbva);
293 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
294 return true;
295 }
296 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
297 {
298 int rc = VBoxVBVAExHPResume(pCmdVbva);
299 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
300 return true;
301 }
302 default:
303 return false;
304 }
305}
306
307static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
308{
309 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
310
311 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
312}
313
314static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
315{
316 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
317 if (pCmdVbva->pVBVA)
318 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
319}
320
321static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
322{
323 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
324 if (pCmdVbva->pVBVA)
325 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
326}
327
328static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
329{
330 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
331 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
332
333 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
334
335 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
336 uint32_t indexRecordFree = pVBVA->indexRecordFree;
337
338 Log(("first = %d, free = %d\n",
339 indexRecordFirst, indexRecordFree));
340
341 if (indexRecordFirst == indexRecordFree)
342 {
343 /* No records to process. Return without assigning output variables. */
344 return VINF_EOF;
345 }
346
347 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
348
349 /* A new record need to be processed. */
350 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
351 {
352 /* the record is being recorded, try again */
353 return VINF_TRY_AGAIN;
354 }
355
356 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
357
358 if (!cbRecord)
359 {
360 /* the record is being recorded, try again */
361 return VINF_TRY_AGAIN;
362 }
363
364 /* we should not get partial commands here actually */
365 Assert(cbRecord);
366
367 /* The size of largest contiguous chunk in the ring biffer. */
368 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
369
370 /* The pointer to data in the ring buffer. */
371 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
372
373 /* Fetch or point the data. */
374 if (u32BytesTillBoundary >= cbRecord)
375 {
376 /* The command does not cross buffer boundary. Return address in the buffer. */
377 *ppCmd = pSrc;
378 *pcbCmd = cbRecord;
379 return VINF_SUCCESS;
380 }
381
382 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
383 return VERR_INVALID_STATE;
384}
385
386static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
387{
388 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
389 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
390
391 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
392}
393
394static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
395{
396 if (pCtl->pfnComplete)
397 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
398 else
399 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
400}
401
402static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
403{
404 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
405 VBVAEXHOSTCTL*pCtl;
406 bool fHostClt;
407
408 for(;;)
409 {
410 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
411 if (pCtl)
412 {
413 if (fHostClt)
414 {
415 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
416 {
417 *ppCmd = (uint8_t*)pCtl;
418 *pcbCmd = sizeof (*pCtl);
419 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
420 }
421 continue;
422 }
423 else
424 {
425 *ppCmd = (uint8_t*)pCtl;
426 *pcbCmd = sizeof (*pCtl);
427 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
428 }
429 }
430
431 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
432 return VBVAEXHOST_DATA_TYPE_NO_DATA;
433
434 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
435 switch (rc)
436 {
437 case VINF_SUCCESS:
438 return VBVAEXHOST_DATA_TYPE_CMD;
439 case VINF_EOF:
440 return VBVAEXHOST_DATA_TYPE_NO_DATA;
441 case VINF_TRY_AGAIN:
442 RTThreadSleep(1);
443 continue;
444 default:
445 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
446 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
447 return VBVAEXHOST_DATA_TYPE_NO_DATA;
448 }
449 }
450
451 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
452 return VBVAEXHOST_DATA_TYPE_NO_DATA;
453}
454
455static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
456{
457 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
458 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
459 {
460 vboxVBVAExHPHgEventClear(pCmdVbva);
461 vboxVBVAExHPProcessorRelease(pCmdVbva);
462 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
463 * 1. we check the queue -> and it is empty
464 * 2. submitter adds command to the queue
465 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
466 * 4. we clear the "processing" state
467 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
468 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
469 **/
470 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
471 if (RT_SUCCESS(rc))
472 {
473 /* we are the processor now */
474 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
475 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
476 {
477 vboxVBVAExHPProcessorRelease(pCmdVbva);
478 return VBVAEXHOST_DATA_TYPE_NO_DATA;
479 }
480
481 vboxVBVAExHPHgEventSet(pCmdVbva);
482 }
483 }
484
485 return enmType;
486}
487
488DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
489{
490 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
491
492 if (pVBVA)
493 {
494 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
495 uint32_t indexRecordFree = pVBVA->indexRecordFree;
496
497 if (indexRecordFirst != indexRecordFree)
498 return true;
499 }
500
501 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
502}
503
504/* Checks whether the new commands are ready for processing
505 * @returns
506 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
507 * VINF_EOF - no commands in a queue
508 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
509 * VERR_INVALID_STATE - the VBVA is paused or pausing */
510static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
511{
512 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
513 if (RT_SUCCESS(rc))
514 {
515 /* we are the processor now */
516 if (vboxVBVAExHSHasCommands(pCmdVbva))
517 {
518 vboxVBVAExHPHgEventSet(pCmdVbva);
519 return VINF_SUCCESS;
520 }
521
522 vboxVBVAExHPProcessorRelease(pCmdVbva);
523 return VINF_EOF;
524 }
525 if (rc == VERR_SEM_BUSY)
526 return VINF_ALREADY_INITIALIZED;
527 return VERR_INVALID_STATE;
528}
529
530static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
531{
532 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
533 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
534 if (RT_SUCCESS(rc))
535 {
536#ifndef VBOXVDBG_MEMCACHE_DISABLE
537 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
538 0, /* size_t cbAlignment */
539 UINT32_MAX, /* uint32_t cMaxObjects */
540 NULL, /* PFNMEMCACHECTOR pfnCtor*/
541 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
542 NULL, /* void *pvUser*/
543 0 /* uint32_t fFlags*/
544 );
545 if (RT_SUCCESS(rc))
546#endif
547 {
548 RTListInit(&pCmdVbva->GuestCtlList);
549 RTListInit(&pCmdVbva->HostCtlList);
550 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
551 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
552 return VINF_SUCCESS;
553 }
554#ifndef VBOXVDBG_MEMCACHE_DISABLE
555 else
556 WARN(("RTMemCacheCreate failed %d\n", rc));
557#endif
558 }
559 else
560 WARN(("RTCritSectInit failed %d\n", rc));
561
562 return rc;
563}
564
565DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
566{
567 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
568}
569
570DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
571{
572 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
573}
574
575static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
576{
577 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
578 {
579 WARN(("VBVAEx is enabled already\n"));
580 return VERR_INVALID_STATE;
581 }
582
583 pCmdVbva->pVBVA = pVBVA;
584 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
585 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
586 return VINF_SUCCESS;
587}
588
589static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
590{
591 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
592 return VINF_SUCCESS;
593
594 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
595 return VINF_SUCCESS;
596}
597
598static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
599{
600 /* ensure the processor is stopped */
601 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
602
603 /* ensure no one tries to submit the command */
604 if (pCmdVbva->pVBVA)
605 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
606
607 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
608 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
609
610 RTCritSectDelete(&pCmdVbva->CltCritSect);
611
612#ifndef VBOXVDBG_MEMCACHE_DISABLE
613 RTMemCacheDestroy(pCmdVbva->CtlCache);
614#endif
615
616 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
617}
618
619static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
620{
621 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
626 AssertRCReturn(rc, rc);
627
628 return VINF_SUCCESS;
629}
630
631static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
632{
633 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
634 {
635 WARN(("vbva not paused\n"));
636 return VERR_INVALID_STATE;
637 }
638
639 VBVAEXHOSTCTL* pCtl;
640 int rc;
641 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
642 {
643 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
644 AssertRCReturn(rc, rc);
645 }
646
647 rc = SSMR3PutU32(pSSM, 0);
648 AssertRCReturn(rc, rc);
649
650 return VINF_SUCCESS;
651}
652/* Saves state
653 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
654 */
655static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
656{
657 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
658 if (RT_FAILURE(rc))
659 {
660 WARN(("RTCritSectEnter failed %d\n", rc));
661 return rc;
662 }
663
664 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
665 if (RT_FAILURE(rc))
666 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
667
668 RTCritSectLeave(&pCmdVbva->CltCritSect);
669
670 return rc;
671}
672
673static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
674{
675 uint32_t u32;
676 int rc = SSMR3GetU32(pSSM, &u32);
677 AssertRCReturn(rc, rc);
678
679 if (!u32)
680 return VINF_EOF;
681
682 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
683 if (!pHCtl)
684 {
685 WARN(("VBoxVBVAExHCtlCreate failed\n"));
686 return VERR_NO_MEMORY;
687 }
688
689 rc = SSMR3GetU32(pSSM, &u32);
690 AssertRCReturn(rc, rc);
691 pHCtl->u.cmd.cbCmd = u32;
692
693 rc = SSMR3GetU32(pSSM, &u32);
694 AssertRCReturn(rc, rc);
695 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
696
697 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
698 ++pCmdVbva->u32cCtls;
699
700 return VINF_SUCCESS;
701}
702
703
704static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
705{
706 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
707 {
708 WARN(("vbva not stopped\n"));
709 return VERR_INVALID_STATE;
710 }
711
712 int rc;
713
714 do {
715 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
716 AssertRCReturn(rc, rc);
717 } while (VINF_EOF != rc);
718
719 return VINF_SUCCESS;
720}
721
722/* Loads state
723 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
724 */
725static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
726{
727 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
728 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
729 if (RT_FAILURE(rc))
730 {
731 WARN(("RTCritSectEnter failed %d\n", rc));
732 return rc;
733 }
734
735 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
736 if (RT_FAILURE(rc))
737 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
738
739 RTCritSectLeave(&pCmdVbva->CltCritSect);
740
741 return rc;
742}
743
744typedef enum
745{
746 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
747 VBVAEXHOSTCTL_SOURCE_HOST
748} VBVAEXHOSTCTL_SOURCE;
749
750
751static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
752{
753 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
754 {
755 Log(("cmd vbva not enabled\n"));
756 return VERR_INVALID_STATE;
757 }
758
759 pCtl->pfnComplete = pfnComplete;
760 pCtl->pvComplete = pvComplete;
761
762 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
763 if (RT_SUCCESS(rc))
764 {
765 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
766 {
767 Log(("cmd vbva not enabled\n"));
768 RTCritSectLeave(&pCmdVbva->CltCritSect);
769 return VERR_INVALID_STATE;
770 }
771
772 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
773 {
774 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
775 }
776 else
777 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
778
779 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
780
781 RTCritSectLeave(&pCmdVbva->CltCritSect);
782
783 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
784 }
785 else
786 WARN(("RTCritSectEnter failed %d\n", rc));
787
788 return rc;
789}
790
791#ifdef VBOX_WITH_CRHGSMI
792typedef struct VBOXVDMA_SOURCE
793{
794 VBVAINFOSCREEN Screen;
795 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
796} VBOXVDMA_SOURCE;
797#endif
798
799typedef struct VBOXVDMAHOST
800{
801 PHGSMIINSTANCE pHgsmi;
802 PVGASTATE pVGAState;
803#ifdef VBOX_WITH_CRHGSMI
804 VBVAEXHOSTCONTEXT CmdVbva;
805 VBOXVDMATHREAD Thread;
806 VBOXCRCMD_SVRINFO CrSrvInfo;
807 VBVAEXHOSTCTL* pCurRemainingHostCtl;
808 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
809 int32_t volatile i32cHostCrCtlCompleted;
810// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
811#endif
812#ifdef VBOX_VDMA_WITH_WATCHDOG
813 PTMTIMERR3 WatchDogTimer;
814#endif
815} VBOXVDMAHOST, *PVBOXVDMAHOST;
816
817#ifdef VBOX_WITH_CRHGSMI
818
819void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
820{
821 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
822 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
823 void *pvChanged = pThread->pvChanged;
824
825 pThread->pfnChanged = NULL;
826 pThread->pvChanged = NULL;
827
828 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
829
830 if (pfnChanged)
831 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
832}
833
834void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
835{
836 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
837 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
838 void *pvChanged = pThread->pvChanged;
839
840 pThread->pfnChanged = NULL;
841 pThread->pvChanged = NULL;
842
843 if (pfnChanged)
844 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
845}
846
847DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
848{
849 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
850}
851
852void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
853{
854 memset(pThread, 0, sizeof (*pThread));
855 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
856}
857
858int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
859{
860 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
861 switch (u32State)
862 {
863 case VBOXVDMATHREAD_STATE_TERMINATED:
864 return VINF_SUCCESS;
865 case VBOXVDMATHREAD_STATE_TERMINATING:
866 {
867 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
868 if (!RT_SUCCESS(rc))
869 {
870 WARN(("RTThreadWait failed %d\n", rc));
871 return rc;
872 }
873
874 RTSemEventDestroy(pThread->hEvent);
875
876 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
877 return VINF_SUCCESS;
878 }
879 default:
880 WARN(("invalid state"));
881 return VERR_INVALID_STATE;
882 }
883}
884
885int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
886{
887 int rc = VBoxVDMAThreadCleanup(pThread);
888 if (RT_FAILURE(rc))
889 {
890 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
891 return rc;
892 }
893
894 rc = RTSemEventCreate(&pThread->hEvent);
895 if (RT_SUCCESS(rc))
896 {
897 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
898 pThread->pfnChanged = pfnCreated;
899 pThread->pvChanged = pvCreated;
900 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
901 if (RT_SUCCESS(rc))
902 return VINF_SUCCESS;
903 else
904 WARN(("RTThreadCreate failed %d\n", rc));
905
906 RTSemEventDestroy(pThread->hEvent);
907 }
908 else
909 WARN(("RTSemEventCreate failed %d\n", rc));
910
911 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
912
913 return rc;
914}
915
916DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
917{
918 int rc = RTSemEventSignal(pThread->hEvent);
919 AssertRC(rc);
920 return rc;
921}
922
923DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
924{
925 int rc = RTSemEventWait(pThread->hEvent, cMillies);
926 AssertRC(rc);
927 return rc;
928}
929
930int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
931{
932 int rc;
933 do
934 {
935 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
936 switch (u32State)
937 {
938 case VBOXVDMATHREAD_STATE_CREATED:
939 pThread->pfnChanged = pfnTerminated;
940 pThread->pvChanged = pvTerminated;
941 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
942 if (fNotify)
943 {
944 rc = VBoxVDMAThreadEventNotify(pThread);
945 AssertRC(rc);
946 }
947 return VINF_SUCCESS;
948 case VBOXVDMATHREAD_STATE_TERMINATING:
949 case VBOXVDMATHREAD_STATE_TERMINATED:
950 {
951 WARN(("thread is marked to termination or terminated\nn"));
952 return VERR_INVALID_STATE;
953 }
954 case VBOXVDMATHREAD_STATE_CREATING:
955 {
956 /* wait till the thread creation is completed */
957 WARN(("concurrent thread create/destron\n"));
958 RTThreadYield();
959 continue;
960 }
961 default:
962 WARN(("invalid state"));
963 return VERR_INVALID_STATE;
964 }
965 } while (1);
966
967 WARN(("should never be here\n"));
968 return VERR_INTERNAL_ERROR;
969}
970
971static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
972
973typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
974typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
975
976typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
977{
978 uint32_t cRefs;
979 int32_t rc;
980 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
981 void *pvCompletion;
982 VBOXVDMACMD_CHROMIUM_CTL Cmd;
983} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
984
985#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
986
987static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
988{
989 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
990 Assert(pHdr);
991 if (pHdr)
992 {
993 pHdr->cRefs = 1;
994 pHdr->rc = VERR_NOT_IMPLEMENTED;
995 pHdr->Cmd.enmType = enmCmd;
996 pHdr->Cmd.cbCmd = cbCmd;
997 return &pHdr->Cmd;
998 }
999
1000 return NULL;
1001}
1002
1003DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1004{
1005 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1006 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1007 if(!cRefs)
1008 {
1009 RTMemFree(pHdr);
1010 }
1011}
1012
1013DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1014{
1015 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1016 ASMAtomicIncU32(&pHdr->cRefs);
1017}
1018
1019DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1020{
1021 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1022 return pHdr->rc;
1023}
1024
1025static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1026{
1027 RTSemEventSignal((RTSEMEVENT)pvContext);
1028}
1029
1030static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1031{
1032 vboxVDMACrCtlRelease(pCmd);
1033}
1034
1035
1036static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1037{
1038 if ( pVGAState->pDrv
1039 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1040 {
1041 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1042 pHdr->pfnCompletion = pfnCompletion;
1043 pHdr->pvCompletion = pvCompletion;
1044 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1045 return VINF_SUCCESS;
1046 }
1047#ifdef DEBUG_misha
1048 Assert(0);
1049#endif
1050 return VERR_NOT_SUPPORTED;
1051}
1052
1053static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1054{
1055 RTSEMEVENT hComplEvent;
1056 int rc = RTSemEventCreate(&hComplEvent);
1057 AssertRC(rc);
1058 if(RT_SUCCESS(rc))
1059 {
1060 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1061#ifdef DEBUG_misha
1062 AssertRC(rc);
1063#endif
1064 if (RT_SUCCESS(rc))
1065 {
1066 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1067 AssertRC(rc);
1068 if(RT_SUCCESS(rc))
1069 {
1070 RTSemEventDestroy(hComplEvent);
1071 }
1072 }
1073 else
1074 {
1075 /* the command is completed */
1076 RTSemEventDestroy(hComplEvent);
1077 }
1078 }
1079 return rc;
1080}
1081
1082typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1083{
1084 int rc;
1085 RTSEMEVENT hEvent;
1086} VDMA_VBVA_CTL_CYNC_COMPLETION;
1087
1088static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1089{
1090 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1091 pData->rc = rc;
1092 rc = RTSemEventSignal(pData->hEvent);
1093 if (!RT_SUCCESS(rc))
1094 WARN(("RTSemEventSignal failed %d\n", rc));
1095}
1096
1097static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1098{
1099 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1100 Data.rc = VERR_NOT_IMPLEMENTED;
1101 int rc = RTSemEventCreate(&Data.hEvent);
1102 if (!RT_SUCCESS(rc))
1103 {
1104 WARN(("RTSemEventCreate failed %d\n", rc));
1105 return rc;
1106 }
1107
1108 PVGASTATE pVGAState = pVdma->pVGAState;
1109 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1110 if (RT_SUCCESS(rc))
1111 {
1112 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1113 if (RT_SUCCESS(rc))
1114 {
1115 rc = Data.rc;
1116 if (!RT_SUCCESS(rc))
1117 {
1118 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1119 }
1120
1121 }
1122 else
1123 WARN(("RTSemEventWait failed %d\n", rc));
1124 }
1125 else
1126 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1127
1128
1129 RTSemEventDestroy(Data.hEvent);
1130
1131 return rc;
1132}
1133
1134static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1135{
1136 VBVAEXHOSTCTL HCtl;
1137 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1138 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1139 if (RT_FAILURE(rc))
1140 {
1141 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1142 return rc;
1143 }
1144
1145 vgaUpdateDisplayAll(pVdma->pVGAState);
1146
1147 return VINF_SUCCESS;
1148}
1149
1150static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1151{
1152 struct VBOXVDMAHOST *pVdma = hClient;
1153 if (!pVdma->pCurRemainingHostCtl)
1154 {
1155 /* disable VBVA, all subsequent host commands will go HGCM way */
1156 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1157 }
1158 else
1159 {
1160 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1161 }
1162
1163 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1164 if (pVdma->pCurRemainingHostCtl)
1165 {
1166 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1167 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1168 }
1169
1170 *pcbCtl = 0;
1171 return NULL;
1172}
1173
1174static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1175{
1176 struct VBOXVDMAHOST *pVdma = hClient;
1177 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1178 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1179}
1180
1181static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1182{
1183 struct VBOXVDMAHOST *pVdma = hClient;
1184 VBVAEXHOSTCTL HCtl;
1185 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1186 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1187
1188 pHgcmEnableData->hRHCmd = pVdma;
1189 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1190
1191 if (RT_FAILURE(rc))
1192 {
1193 if (rc == VERR_INVALID_STATE)
1194 rc = VINF_SUCCESS;
1195 else
1196 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1197 }
1198
1199 return rc;
1200}
1201
1202static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1203{
1204 VBOXCRCMDCTL_ENABLE Enable;
1205 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1206 Enable.Data.hRHCmd = pVdma;
1207 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1208
1209 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1210 Assert(!pVdma->pCurRemainingHostCtl);
1211 if (RT_SUCCESS(rc))
1212 {
1213 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1214 return VINF_SUCCESS;
1215 }
1216
1217 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1218 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1219
1220 return rc;
1221}
1222
1223static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1224{
1225 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1226 {
1227 WARN(("vdma VBVA is already enabled\n"));
1228 return VERR_INVALID_STATE;
1229 }
1230
1231 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1232 if (!pVBVA)
1233 {
1234 WARN(("invalid offset %d\n", u32Offset));
1235 return VERR_INVALID_PARAMETER;
1236 }
1237
1238 if (!pVdma->CrSrvInfo.pfnEnable)
1239 {
1240#ifdef DEBUG_misha
1241 WARN(("pfnEnable is NULL\n"));
1242 return VERR_NOT_SUPPORTED;
1243#endif
1244 }
1245
1246 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1247 if (RT_SUCCESS(rc))
1248 {
1249 VBOXCRCMDCTL_DISABLE Disable;
1250 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1251 Disable.Data.hNotifyTerm = pVdma;
1252 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1253 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1254 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1255 if (RT_SUCCESS(rc))
1256 {
1257 PVGASTATE pVGAState = pVdma->pVGAState;
1258 VBOXCRCMD_SVRENABLE_INFO Info;
1259 Info.hCltScr = pVGAState->pDrv;
1260 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1261 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1262 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1263 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1264 if (RT_SUCCESS(rc))
1265 return VINF_SUCCESS;
1266 else
1267 WARN(("pfnEnable failed %d\n", rc));
1268
1269 vboxVDMACrHgcmHandleEnable(pVdma);
1270 }
1271 else
1272 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1273
1274 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1275 }
1276 else
1277 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1278
1279 return rc;
1280}
1281
1282static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1283{
1284 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1285 {
1286 Log(("vdma VBVA is already disabled\n"));
1287 return VINF_SUCCESS;
1288 }
1289
1290 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1291 if (RT_SUCCESS(rc))
1292 {
1293 if (fDoHgcmEnable)
1294 {
1295 PVGASTATE pVGAState = pVdma->pVGAState;
1296
1297 /* disable is a bit tricky
1298 * we need to ensure the host ctl commands do not come out of order
1299 * and do not come over HGCM channel until after it is enabled */
1300 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1301 if (RT_SUCCESS(rc))
1302 {
1303 vdmaVBVANotifyDisable(pVGAState);
1304 return VINF_SUCCESS;
1305 }
1306
1307 VBOXCRCMD_SVRENABLE_INFO Info;
1308 Info.hCltScr = pVGAState->pDrv;
1309 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1310 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1311 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1312 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1313 }
1314 }
1315 else
1316 WARN(("pfnDisable failed %d\n", rc));
1317
1318 return rc;
1319}
1320
1321static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1322{
1323 *pfContinue = true;
1324
1325 switch (pCmd->enmType)
1326 {
1327 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1328 {
1329 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1330 {
1331 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1332 return VERR_INVALID_STATE;
1333 }
1334 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1335 }
1336 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1337 {
1338 int rc = vdmaVBVADisableProcess(pVdma, true);
1339 if (RT_FAILURE(rc))
1340 {
1341 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1342 return rc;
1343 }
1344
1345 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1346 }
1347 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1348 {
1349 int rc = vdmaVBVADisableProcess(pVdma, false);
1350 if (RT_FAILURE(rc))
1351 {
1352 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1353 return rc;
1354 }
1355
1356 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1357 if (RT_FAILURE(rc))
1358 {
1359 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1360 return rc;
1361 }
1362
1363 *pfContinue = false;
1364 return VINF_SUCCESS;
1365 }
1366 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1367 {
1368 PVGASTATE pVGAState = pVdma->pVGAState;
1369 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1370 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1371 if (RT_FAILURE(rc))
1372 {
1373 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1374 return rc;
1375 }
1376 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1377 }
1378 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1379 {
1380 PVGASTATE pVGAState = pVdma->pVGAState;
1381 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1382
1383 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1384 if (RT_FAILURE(rc))
1385 {
1386 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1387 return rc;
1388 }
1389
1390 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1391 if (RT_FAILURE(rc))
1392 {
1393 WARN(("pfnLoadState failed %d\n", rc));
1394 return rc;
1395 }
1396
1397 return VINF_SUCCESS;
1398 }
1399 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1400 {
1401 PVGASTATE pVGAState = pVdma->pVGAState;
1402
1403 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1404 {
1405 VBVAINFOSCREEN CurScreen;
1406 VBVAINFOVIEW CurView;
1407
1408 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1409 if (RT_FAILURE(rc))
1410 {
1411 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1412 return rc;
1413 }
1414
1415 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1416 if (RT_FAILURE(rc))
1417 {
1418 WARN(("VBVAInfoScreen failed %d\n", rc));
1419 return rc;
1420 }
1421 }
1422
1423 return VINF_SUCCESS;
1424 }
1425 default:
1426 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1427 return VERR_INVALID_PARAMETER;
1428 }
1429}
1430
1431static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1432{
1433 PVGASTATE pVGAState = pVdma->pVGAState;
1434 VBVAINFOSCREEN Screen = pEntry->Screen;
1435 VBVAINFOVIEW View;
1436 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1437 uint32_t u32ViewIndex = Screen.u32ViewIndex;
1438 uint16_t u16Flags = Screen.u16Flags;
1439 bool fDisable = false;
1440
1441 memcpy(aTargetMap, pEntry->aTargetMap, sizeof (aTargetMap));
1442
1443 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1444
1445 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1446 {
1447 fDisable = true;
1448 memset(&Screen, 0, sizeof (Screen));
1449 Screen.u32ViewIndex = u32ViewIndex;
1450 Screen.u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1451 }
1452
1453 if (u32ViewIndex > pVGAState->cMonitors)
1454 {
1455 if (u32ViewIndex != 0xffffffff)
1456 {
1457 WARN(("invalid view index\n"));
1458 return VERR_INVALID_PARAMETER;
1459 }
1460 else if (!fDisable)
1461 {
1462 WARN(("0xffffffff view index only valid for disable requests\n"));
1463 return VERR_INVALID_PARAMETER;
1464 }
1465 }
1466
1467 View.u32ViewOffset = 0;
1468 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1469 View.u32MaxScreenSize = View.u32ViewSize + Screen.u32Width + 1; /* <- make VBVAInfoScreen logic (offEnd < pView->u32MaxScreenSize) happy */
1470
1471 int rc = VINF_SUCCESS;
1472
1473 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1474 i >= 0;
1475 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1476 {
1477 Screen.u32ViewIndex = i;
1478
1479 VBVAINFOSCREEN CurScreen;
1480 VBVAINFOVIEW CurView;
1481
1482 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1483 AssertRC(rc);
1484
1485 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1486 continue;
1487
1488 if (!fDisable || !CurView.u32ViewSize)
1489 {
1490 View.u32ViewIndex = Screen.u32ViewIndex;
1491
1492 rc = VBVAInfoView(pVGAState, &View);
1493 if (RT_FAILURE(rc))
1494 {
1495 WARN(("VBVAInfoView failed %d\n", rc));
1496 break;
1497 }
1498 }
1499
1500 rc = VBVAInfoScreen(pVGAState, &Screen);
1501 if (RT_FAILURE(rc))
1502 {
1503 WARN(("VBVAInfoScreen failed %d\n", rc));
1504 break;
1505 }
1506 }
1507
1508 if (RT_FAILURE(rc))
1509 return rc;
1510
1511 Screen.u32ViewIndex = u32ViewIndex;
1512
1513 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1514 if (RT_FAILURE(rc))
1515 WARN(("pfnResize failed %d\n", rc));
1516
1517 return rc;
1518}
1519
1520static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1521{
1522 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1523 switch (enmType)
1524 {
1525 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1526 {
1527 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1528 {
1529 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1530 return VERR_INVALID_STATE;
1531 }
1532 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1533 }
1534 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1535 {
1536 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1537 {
1538 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1539 return VERR_INVALID_STATE;
1540 }
1541
1542 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1543
1544 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1545 {
1546 WARN(("invalid buffer size\n"));
1547 return VERR_INVALID_PARAMETER;
1548 }
1549
1550 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1551 if (!cElements)
1552 {
1553 WARN(("invalid buffer size\n"));
1554 return VERR_INVALID_PARAMETER;
1555 }
1556
1557 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1558
1559 int rc = VINF_SUCCESS;
1560
1561 for (uint32_t i = 0; i < cElements; ++i)
1562 {
1563 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1564 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1565 if (RT_FAILURE(rc))
1566 {
1567 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1568 break;
1569 }
1570 }
1571 return rc;
1572 }
1573 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1574 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1575 {
1576 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1577 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1578 uint32_t u32Offset = pEnable->u32Offset;
1579 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1580 if (!RT_SUCCESS(rc))
1581 {
1582 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1583 return rc;
1584 }
1585
1586 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1587 {
1588 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1589 if (!RT_SUCCESS(rc))
1590 {
1591 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1592 return rc;
1593 }
1594 }
1595
1596 return VINF_SUCCESS;
1597 }
1598 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1599 {
1600 int rc = vdmaVBVADisableProcess(pVdma, true);
1601 if (RT_FAILURE(rc))
1602 {
1603 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1604 return rc;
1605 }
1606
1607 /* do vgaUpdateDisplayAll right away */
1608 vgaUpdateDisplayAll(pVdma->pVGAState);
1609
1610 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1611 }
1612 default:
1613 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1614 return VERR_INVALID_PARAMETER;
1615 }
1616}
1617
1618/**
1619 * @param fIn - whether this is a page in or out op.
1620 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1621 */
1622static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1623{
1624 RTGCPHYS phPage = (RTGCPHYS)(iPage << PAGE_SHIFT);
1625 PGMPAGEMAPLOCK Lock;
1626 int rc;
1627
1628 if (fIn)
1629 {
1630 const void * pvPage;
1631 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1632 if (!RT_SUCCESS(rc))
1633 {
1634 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1635 return rc;
1636 }
1637
1638 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1639
1640 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1641 }
1642 else
1643 {
1644 void * pvPage;
1645 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1646 if (!RT_SUCCESS(rc))
1647 {
1648 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1649 return rc;
1650 }
1651
1652 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1653
1654 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1655 }
1656
1657 return VINF_SUCCESS;
1658}
1659
1660static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1661{
1662 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1663 {
1664 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1665 if (!RT_SUCCESS(rc))
1666 {
1667 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1668 return rc;
1669 }
1670 }
1671
1672 return VINF_SUCCESS;
1673}
1674
1675static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1676 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1677 uint8_t **ppu8Vram, bool *pfIn)
1678{
1679 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1680 {
1681 WARN(("cmd too small"));
1682 return -1;
1683 }
1684
1685 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1686 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1687 {
1688 WARN(("invalid cmd size"));
1689 return -1;
1690 }
1691 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1692
1693 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1694 if (offVRAM & PAGE_OFFSET_MASK)
1695 {
1696 WARN(("offVRAM address is not on page boundary\n"));
1697 return -1;
1698 }
1699 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1700
1701 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1702 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1703 if (offVRAM >= pVGAState->vram_size)
1704 {
1705 WARN(("invalid vram offset"));
1706 return -1;
1707 }
1708
1709 if (offVRAM + (cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1710 {
1711 WARN(("invalid cPages"));
1712 return -1;
1713 }
1714
1715 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1716 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1717
1718 *ppPages = pPages;
1719 *pcPages = cPages;
1720 *ppu8Vram = pu8Vram;
1721 *pfIn = fIn;
1722 return 0;
1723}
1724
1725static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1726{
1727 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1728 if (offVRAM & PAGE_OFFSET_MASK)
1729 {
1730 WARN(("offVRAM address is not on page boundary\n"));
1731 return -1;
1732 }
1733
1734 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1735 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1736 if (offVRAM >= pVGAState->vram_size)
1737 {
1738 WARN(("invalid vram offset"));
1739 return -1;
1740 }
1741
1742 uint32_t cbFill = pFill->u32CbFill;
1743
1744 if (offVRAM + cbFill >= pVGAState->vram_size)
1745 {
1746 WARN(("invalid cPages"));
1747 return -1;
1748 }
1749
1750 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1751 uint32_t u32Color = pFill->u32Pattern;
1752
1753 Assert(!(cbFill % 4));
1754 for (uint32_t i = 0; i < cbFill / 4; ++i)
1755 {
1756 pu32Vram[i] = u32Color;
1757 }
1758
1759 return 0;
1760}
1761
1762static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1763{
1764 switch (pCmd->u8OpCode)
1765 {
1766 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1767 return 0;
1768 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1769 {
1770 PVGASTATE pVGAState = pVdma->pVGAState;
1771 const VBOXCMDVBVAPAGEIDX *pPages;
1772 uint32_t cPages;
1773 uint8_t *pu8Vram;
1774 bool fIn;
1775 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1776 &pPages, &cPages,
1777 &pu8Vram, &fIn);
1778 if (i8Result < 0)
1779 {
1780 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1781 return i8Result;
1782 }
1783
1784 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1785 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1786 if (!RT_SUCCESS(rc))
1787 {
1788 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1789 return -1;
1790 }
1791
1792 return 0;
1793 }
1794 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1795 {
1796 PVGASTATE pVGAState = pVdma->pVGAState;
1797 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1798 {
1799 WARN(("cmd too small"));
1800 return -1;
1801 }
1802
1803 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1804 }
1805 default:
1806 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1807 }
1808}
1809
1810#if 0
1811typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1812{
1813 VBOXCMDVBVA_HDR Hdr;
1814 /* for now can only contain offVRAM.
1815 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1816 VBOXCMDVBVA_ALLOCINFO Alloc;
1817 uint32_t u32Reserved;
1818 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1819} VBOXCMDVBVA_PAGING_TRANSFER;
1820#endif
1821
1822AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1823AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1824AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1825AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1826
1827#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1828
1829static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1830{
1831 switch (pCmd->u8OpCode)
1832 {
1833 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1834 {
1835 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1836 {
1837 WARN(("invalid command size"));
1838 return -1;
1839 }
1840 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1841 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1842 uint32_t cbRealCmd = pCmd->u8Flags;
1843 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1844 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1845 {
1846 WARN(("invalid sysmem cmd size"));
1847 return -1;
1848 }
1849
1850 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1851
1852 PGMPAGEMAPLOCK Lock;
1853 PVGASTATE pVGAState = pVdma->pVGAState;
1854 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1855 const void * pvCmd;
1856 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1857 if (!RT_SUCCESS(rc))
1858 {
1859 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1860 return -1;
1861 }
1862
1863 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1864
1865 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1866
1867 if (cbRealCmd <= cbCmdPart)
1868 {
1869 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1870 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1871 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1872 return i8Result;
1873 }
1874
1875 VBOXCMDVBVA_HDR Hdr;
1876 const void *pvCurCmdTail;
1877 uint32_t cbCurCmdTail;
1878 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1879 {
1880 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1881 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1882 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1883 }
1884 else
1885 {
1886 memcpy(&Hdr, pvCmd, cbCmdPart);
1887 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1888 phCmd += cbCmdPart;
1889 Assert(!(phCmd & PAGE_OFFSET_MASK));
1890 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1891 if (!RT_SUCCESS(rc))
1892 {
1893 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1894 return -1;
1895 }
1896
1897 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1898 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1899 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1900 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1901 }
1902
1903 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1904 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1905
1906 int8_t i8Result = 0;
1907
1908 switch (pRealCmdHdr->u8OpCode)
1909 {
1910 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1911 {
1912 const uint32_t *pPages;
1913 uint32_t cPages;
1914 uint8_t *pu8Vram;
1915 bool fIn;
1916 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1917 &pPages, &cPages,
1918 &pu8Vram, &fIn);
1919 if (i8Result < 0)
1920 {
1921 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1922 /* we need to break, not return, to ensure currently locked page is released */
1923 break;
1924 }
1925
1926 if (cbCurCmdTail & 3)
1927 {
1928 WARN(("command is not alligned properly %d", cbCurCmdTail));
1929 i8Result = -1;
1930 /* we need to break, not return, to ensure currently locked page is released */
1931 break;
1932 }
1933
1934 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1935 Assert(cCurPages < cPages);
1936
1937 do
1938 {
1939 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1940 if (!RT_SUCCESS(rc))
1941 {
1942 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1943 i8Result = -1;
1944 /* we need to break, not return, to ensure currently locked page is released */
1945 break;
1946 }
1947
1948 Assert(cPages >= cCurPages);
1949 cPages -= cCurPages;
1950
1951 if (!cPages)
1952 break;
1953
1954 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1955
1956 Assert(!(phCmd & PAGE_OFFSET_MASK));
1957
1958 phCmd += PAGE_SIZE;
1959 pu8Vram += (cCurPages << PAGE_SHIFT);
1960
1961 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1962 if (!RT_SUCCESS(rc))
1963 {
1964 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1965 /* the page is not locked, return */
1966 return -1;
1967 }
1968
1969 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1970 if (cCurPages > cPages)
1971 cCurPages = cPages;
1972 } while (1);
1973 break;
1974 }
1975 default:
1976 WARN(("command can not be splitted"));
1977 i8Result = -1;
1978 break;
1979 }
1980
1981 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1982 return i8Result;
1983 }
1984 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
1985 {
1986 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
1987 ++pCmd;
1988 cbCmd -= sizeof (*pCmd);
1989 uint32_t cbCurCmd = 0;
1990 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
1991 {
1992 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1993 {
1994 WARN(("invalid command size"));
1995 return -1;
1996 }
1997
1998 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
1999 if (cbCmd < cbCurCmd)
2000 {
2001 WARN(("invalid command size"));
2002 return -1;
2003 }
2004
2005 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
2006 if (i8Result < 0)
2007 {
2008 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2009 return i8Result;
2010 }
2011 }
2012 return 0;
2013 }
2014 default:
2015 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2016 }
2017}
2018
2019static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2020{
2021 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2022 return;
2023
2024 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2025 {
2026 WARN(("invalid command size"));
2027 return;
2028 }
2029
2030 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2031
2032 /* check if the command is cancelled */
2033 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2034 {
2035 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2036 return;
2037 }
2038
2039 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2040}
2041
2042static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2043{
2044 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2045 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2046 int rc = VERR_NO_MEMORY;
2047 if (pCmd)
2048 {
2049 PVGASTATE pVGAState = pVdma->pVGAState;
2050 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2051 pCmd->cbVRam = pVGAState->vram_size;
2052 pCmd->pLed = &pVGAState->Led3D;
2053 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2054 if (RT_SUCCESS(rc))
2055 {
2056 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2057 if (RT_SUCCESS(rc))
2058 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2059 else if (rc != VERR_NOT_SUPPORTED)
2060 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2061 }
2062 else
2063 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2064
2065 vboxVDMACrCtlRelease(&pCmd->Hdr);
2066 }
2067
2068 if (!RT_SUCCESS(rc))
2069 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2070
2071 return rc;
2072}
2073
2074static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2075
2076/* check if this is external cmd to be passed to chromium backend */
2077static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2078{
2079 PVBOXVDMACMD pDmaCmd = NULL;
2080 uint32_t cbDmaCmd = 0;
2081 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2082 int rc = VINF_NOT_SUPPORTED;
2083
2084 cbDmaCmd = pCmdDr->cbBuf;
2085
2086 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2087 {
2088 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2089 {
2090 AssertMsgFailed(("invalid buffer data!"));
2091 return VERR_INVALID_PARAMETER;
2092 }
2093
2094 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2095 {
2096 AssertMsgFailed(("invalid command buffer data!"));
2097 return VERR_INVALID_PARAMETER;
2098 }
2099
2100 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2101 }
2102 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2103 {
2104 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2105 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2106 {
2107 AssertMsgFailed(("invalid command buffer data from offset!"));
2108 return VERR_INVALID_PARAMETER;
2109 }
2110 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2111 }
2112
2113 if (pDmaCmd)
2114 {
2115 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2116 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2117
2118 switch (pDmaCmd->enmType)
2119 {
2120 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2121 {
2122 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2123 if (cbBody < sizeof (*pCrCmd))
2124 {
2125 AssertMsgFailed(("invalid chromium command buffer size!"));
2126 return VERR_INVALID_PARAMETER;
2127 }
2128 PVGASTATE pVGAState = pVdma->pVGAState;
2129 rc = VINF_SUCCESS;
2130 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2131 {
2132 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2133 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2134 break;
2135 }
2136 else
2137 {
2138 Assert(0);
2139 }
2140
2141 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2142 AssertRC(tmpRc);
2143 break;
2144 }
2145 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2146 {
2147 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2148 if (cbBody < sizeof (*pTransfer))
2149 {
2150 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2151 return VERR_INVALID_PARAMETER;
2152 }
2153
2154 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2155 AssertRC(rc);
2156 if (RT_SUCCESS(rc))
2157 {
2158 pCmdDr->rc = VINF_SUCCESS;
2159 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2160 AssertRC(rc);
2161 rc = VINF_SUCCESS;
2162 }
2163 break;
2164 }
2165 default:
2166 break;
2167 }
2168 }
2169 return rc;
2170}
2171
2172int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2173{
2174 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2175 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2176 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2177 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2178 AssertRC(rc);
2179 pDr->rc = rc;
2180
2181 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2182 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2183 AssertRC(rc);
2184 return rc;
2185}
2186
2187int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2188{
2189 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2190 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2191 pCmdPrivate->rc = rc;
2192 if (pCmdPrivate->pfnCompletion)
2193 {
2194 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2195 }
2196 return VINF_SUCCESS;
2197}
2198
2199static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2200 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2201 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2202 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2203{
2204 /* we do not support color conversion */
2205 Assert(pDstDesc->format == pSrcDesc->format);
2206 /* we do not support stretching */
2207 Assert(pDstRectl->height == pSrcRectl->height);
2208 Assert(pDstRectl->width == pSrcRectl->width);
2209 if (pDstDesc->format != pSrcDesc->format)
2210 return VERR_INVALID_FUNCTION;
2211 if (pDstDesc->width == pDstRectl->width
2212 && pSrcDesc->width == pSrcRectl->width
2213 && pSrcDesc->width == pDstDesc->width)
2214 {
2215 Assert(!pDstRectl->left);
2216 Assert(!pSrcRectl->left);
2217 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2218 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2219 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2220 }
2221 else
2222 {
2223 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2224 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2225 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2226 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2227 Assert(cbDstLine <= pDstDesc->pitch);
2228 uint32_t cbDstSkip = pDstDesc->pitch;
2229 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2230
2231 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2232 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2233 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2234 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2235 Assert(cbSrcLine <= pSrcDesc->pitch);
2236 uint32_t cbSrcSkip = pSrcDesc->pitch;
2237 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2238
2239 Assert(cbDstLine == cbSrcLine);
2240
2241 for (uint32_t i = 0; ; ++i)
2242 {
2243 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2244 if (i == pDstRectl->height)
2245 break;
2246 pvDstStart += cbDstSkip;
2247 pvSrcStart += cbSrcSkip;
2248 }
2249 }
2250 return VINF_SUCCESS;
2251}
2252
2253static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2254{
2255 if (!pRectl1->width)
2256 *pRectl1 = *pRectl2;
2257 else
2258 {
2259 int16_t x21 = pRectl1->left + pRectl1->width;
2260 int16_t x22 = pRectl2->left + pRectl2->width;
2261 if (pRectl1->left > pRectl2->left)
2262 {
2263 pRectl1->left = pRectl2->left;
2264 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2265 }
2266 else if (x21 < x22)
2267 pRectl1->width = x22 - pRectl1->left;
2268
2269 x21 = pRectl1->top + pRectl1->height;
2270 x22 = pRectl2->top + pRectl2->height;
2271 if (pRectl1->top > pRectl2->top)
2272 {
2273 pRectl1->top = pRectl2->top;
2274 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2275 }
2276 else if (x21 < x22)
2277 pRectl1->height = x22 - pRectl1->top;
2278 }
2279}
2280
2281/*
2282 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2283 */
2284static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2285{
2286 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2287 Assert(cbBlt <= cbBuffer);
2288 if (cbBuffer < cbBlt)
2289 return VERR_INVALID_FUNCTION;
2290
2291 /* we do not support stretching for now */
2292 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2293 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2294 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2295 return VERR_INVALID_FUNCTION;
2296 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2297 return VERR_INVALID_FUNCTION;
2298 Assert(pBlt->cDstSubRects);
2299
2300 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2301 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2302
2303 if (pBlt->cDstSubRects)
2304 {
2305 VBOXVDMA_RECTL dstRectl, srcRectl;
2306 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2307 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2308 {
2309 pDstRectl = &pBlt->aDstSubRects[i];
2310 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2311 {
2312 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2313 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2314 dstRectl.width = pDstRectl->width;
2315 dstRectl.height = pDstRectl->height;
2316 pDstRectl = &dstRectl;
2317 }
2318
2319 pSrcRectl = &pBlt->aDstSubRects[i];
2320 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2321 {
2322 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2323 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2324 srcRectl.width = pSrcRectl->width;
2325 srcRectl.height = pSrcRectl->height;
2326 pSrcRectl = &srcRectl;
2327 }
2328
2329 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2330 &pBlt->dstDesc, &pBlt->srcDesc,
2331 pDstRectl,
2332 pSrcRectl);
2333 AssertRC(rc);
2334 if (!RT_SUCCESS(rc))
2335 return rc;
2336
2337 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2338 }
2339 }
2340 else
2341 {
2342 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2343 &pBlt->dstDesc, &pBlt->srcDesc,
2344 &pBlt->dstRectl,
2345 &pBlt->srcRectl);
2346 AssertRC(rc);
2347 if (!RT_SUCCESS(rc))
2348 return rc;
2349
2350 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2351 }
2352
2353 return cbBlt;
2354}
2355
2356static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2357{
2358 if (cbBuffer < sizeof (*pTransfer))
2359 return VERR_INVALID_PARAMETER;
2360
2361 PVGASTATE pVGAState = pVdma->pVGAState;
2362 uint8_t * pvRam = pVGAState->vram_ptrR3;
2363 PGMPAGEMAPLOCK SrcLock;
2364 PGMPAGEMAPLOCK DstLock;
2365 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2366 const void * pvSrc;
2367 void * pvDst;
2368 int rc = VINF_SUCCESS;
2369 uint32_t cbTransfer = pTransfer->cbTransferSize;
2370 uint32_t cbTransfered = 0;
2371 bool bSrcLocked = false;
2372 bool bDstLocked = false;
2373 do
2374 {
2375 uint32_t cbSubTransfer = cbTransfer;
2376 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2377 {
2378 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2379 }
2380 else
2381 {
2382 RTGCPHYS phPage = pTransfer->Src.phBuf;
2383 phPage += cbTransfered;
2384 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2385 AssertRC(rc);
2386 if (RT_SUCCESS(rc))
2387 {
2388 bSrcLocked = true;
2389 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2390 }
2391 else
2392 {
2393 break;
2394 }
2395 }
2396
2397 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2398 {
2399 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2400 }
2401 else
2402 {
2403 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2404 phPage += cbTransfered;
2405 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2406 AssertRC(rc);
2407 if (RT_SUCCESS(rc))
2408 {
2409 bDstLocked = true;
2410 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2411 }
2412 else
2413 {
2414 break;
2415 }
2416 }
2417
2418 if (RT_SUCCESS(rc))
2419 {
2420 memcpy(pvDst, pvSrc, cbSubTransfer);
2421 cbTransfer -= cbSubTransfer;
2422 cbTransfered += cbSubTransfer;
2423 }
2424 else
2425 {
2426 cbTransfer = 0; /* to break */
2427 }
2428
2429 if (bSrcLocked)
2430 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2431 if (bDstLocked)
2432 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2433 } while (cbTransfer);
2434
2435 if (RT_SUCCESS(rc))
2436 return sizeof (*pTransfer);
2437 return rc;
2438}
2439
2440static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2441{
2442 do
2443 {
2444 Assert(pvBuffer);
2445 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2446
2447 if (!pvBuffer)
2448 return VERR_INVALID_PARAMETER;
2449 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2450 return VERR_INVALID_PARAMETER;
2451
2452 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2453 uint32_t cbCmd = 0;
2454 switch (pCmd->enmType)
2455 {
2456 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2457 {
2458#ifdef VBOXWDDM_TEST_UHGSMI
2459 static int count = 0;
2460 static uint64_t start, end;
2461 if (count==0)
2462 {
2463 start = RTTimeNanoTS();
2464 }
2465 ++count;
2466 if (count==100000)
2467 {
2468 end = RTTimeNanoTS();
2469 float ems = (end-start)/1000000.f;
2470 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2471 }
2472#endif
2473 /* todo: post the buffer to chromium */
2474 return VINF_SUCCESS;
2475 }
2476 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2477 {
2478 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2479 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2480 Assert(cbBlt >= 0);
2481 Assert((uint32_t)cbBlt <= cbBuffer);
2482 if (cbBlt >= 0)
2483 {
2484 if ((uint32_t)cbBlt == cbBuffer)
2485 return VINF_SUCCESS;
2486 else
2487 {
2488 cbBuffer -= (uint32_t)cbBlt;
2489 pvBuffer -= cbBlt;
2490 }
2491 }
2492 else
2493 return cbBlt; /* error */
2494 break;
2495 }
2496 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2497 {
2498 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2499 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2500 Assert(cbTransfer >= 0);
2501 Assert((uint32_t)cbTransfer <= cbBuffer);
2502 if (cbTransfer >= 0)
2503 {
2504 if ((uint32_t)cbTransfer == cbBuffer)
2505 return VINF_SUCCESS;
2506 else
2507 {
2508 cbBuffer -= (uint32_t)cbTransfer;
2509 pvBuffer -= cbTransfer;
2510 }
2511 }
2512 else
2513 return cbTransfer; /* error */
2514 break;
2515 }
2516 case VBOXVDMACMD_TYPE_DMA_NOP:
2517 return VINF_SUCCESS;
2518 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2519 return VINF_SUCCESS;
2520 default:
2521 AssertBreakpoint();
2522 return VERR_INVALID_FUNCTION;
2523 }
2524 } while (1);
2525
2526 /* we should not be here */
2527 AssertBreakpoint();
2528 return VERR_INVALID_STATE;
2529}
2530
2531static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2532{
2533 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2534 PVGASTATE pVGAState = pVdma->pVGAState;
2535 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2536 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2537 uint8_t *pCmd;
2538 uint32_t cbCmd;
2539 int rc;
2540
2541 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2542
2543 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2544 {
2545 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2546 switch (enmType)
2547 {
2548 case VBVAEXHOST_DATA_TYPE_CMD:
2549 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2550 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2551 VBVARaiseIrqNoWait(pVGAState, 0);
2552 break;
2553 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2554 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2555 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2556 break;
2557 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2558 {
2559 bool fContinue = true;
2560 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2561 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2562 if (fContinue)
2563 break;
2564 }
2565 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2566 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2567 AssertRC(rc);
2568 break;
2569 default:
2570 WARN(("unexpected type %d\n", enmType));
2571 break;
2572 }
2573 }
2574
2575 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2576
2577 return VINF_SUCCESS;
2578}
2579
2580static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2581{
2582 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2583 const uint8_t * pvBuf;
2584 PGMPAGEMAPLOCK Lock;
2585 int rc;
2586 bool bReleaseLocked = false;
2587
2588 do
2589 {
2590 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2591
2592 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2593 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2594 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2595 {
2596 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2597 pvBuf = pvRam + pCmd->Location.offVramBuf;
2598 }
2599 else
2600 {
2601 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2602 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2603 Assert(offset + pCmd->cbBuf <= 0x1000);
2604 if (offset + pCmd->cbBuf > 0x1000)
2605 {
2606 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2607 rc = VERR_INVALID_PARAMETER;
2608 break;
2609 }
2610
2611 const void * pvPageBuf;
2612 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2613 AssertRC(rc);
2614 if (!RT_SUCCESS(rc))
2615 {
2616 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2617 break;
2618 }
2619
2620 pvBuf = (const uint8_t *)pvPageBuf;
2621 pvBuf += offset;
2622
2623 bReleaseLocked = true;
2624 }
2625
2626 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2627 AssertRC(rc);
2628
2629 if (bReleaseLocked)
2630 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2631 } while (0);
2632
2633 pCmd->rc = rc;
2634
2635 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2636 AssertRC(rc);
2637}
2638
2639static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2640{
2641 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2642 pCmd->i32Result = VINF_SUCCESS;
2643 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2644 AssertRC(rc);
2645}
2646
2647#endif /* #ifdef VBOX_WITH_CRHGSMI */
2648
2649#ifdef VBOX_VDMA_WITH_WATCHDOG
2650static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2651{
2652 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2653 PVGASTATE pVGAState = pVdma->pVGAState;
2654 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2655}
2656
2657static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2658{
2659 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2660 if (cMillis)
2661 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2662 else
2663 TMTimerStop(pVdma->WatchDogTimer);
2664 return VINF_SUCCESS;
2665}
2666#endif
2667
2668int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2669{
2670 int rc;
2671 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2672 Assert(pVdma);
2673 if (pVdma)
2674 {
2675 pVdma->pHgsmi = pVGAState->pHGSMI;
2676 pVdma->pVGAState = pVGAState;
2677
2678#ifdef VBOX_VDMA_WITH_WATCHDOG
2679 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2680 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2681 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2682 AssertRC(rc);
2683#endif
2684
2685#ifdef VBOX_WITH_CRHGSMI
2686 VBoxVDMAThreadInit(&pVdma->Thread);
2687
2688 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2689 if (RT_SUCCESS(rc))
2690 {
2691 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2692 if (RT_SUCCESS(rc))
2693 {
2694 pVGAState->pVdma = pVdma;
2695 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2696 return VINF_SUCCESS;
2697
2698 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2699 }
2700 else
2701 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2702
2703 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2704 }
2705 else
2706 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2707
2708
2709 RTMemFree(pVdma);
2710#else
2711 pVGAState->pVdma = pVdma;
2712 return VINF_SUCCESS;
2713#endif
2714 }
2715 else
2716 rc = VERR_OUT_OF_RESOURCES;
2717
2718 return rc;
2719}
2720
2721int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2722{
2723#ifdef VBOX_WITH_CRHGSMI
2724 vdmaVBVACtlDisableSync(pVdma);
2725#endif
2726 return VINF_SUCCESS;
2727}
2728
2729int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2730{
2731#ifdef VBOX_WITH_CRHGSMI
2732 vdmaVBVACtlDisableSync(pVdma);
2733 VBoxVDMAThreadCleanup(&pVdma->Thread);
2734 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2735 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2736#endif
2737 RTMemFree(pVdma);
2738 return VINF_SUCCESS;
2739}
2740
2741void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2742{
2743 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2744
2745 switch (pCmd->enmCtl)
2746 {
2747 case VBOXVDMA_CTL_TYPE_ENABLE:
2748 pCmd->i32Result = VINF_SUCCESS;
2749 break;
2750 case VBOXVDMA_CTL_TYPE_DISABLE:
2751 pCmd->i32Result = VINF_SUCCESS;
2752 break;
2753 case VBOXVDMA_CTL_TYPE_FLUSH:
2754 pCmd->i32Result = VINF_SUCCESS;
2755 break;
2756#ifdef VBOX_VDMA_WITH_WATCHDOG
2757 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2758 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2759 break;
2760#endif
2761 default:
2762 WARN(("cmd not supported"));
2763 pCmd->i32Result = VERR_NOT_SUPPORTED;
2764 }
2765
2766 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2767 AssertRC(rc);
2768}
2769
2770void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2771{
2772 int rc = VERR_NOT_IMPLEMENTED;
2773
2774#ifdef VBOX_WITH_CRHGSMI
2775 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2776 * this is why we process them specially */
2777 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2778 if (rc == VINF_SUCCESS)
2779 return;
2780
2781 if (RT_FAILURE(rc))
2782 {
2783 pCmd->rc = rc;
2784 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2785 AssertRC(rc);
2786 return;
2787 }
2788
2789 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2790#else
2791 pCmd->rc = rc;
2792 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2793 AssertRC(rc);
2794#endif
2795}
2796
2797/**/
2798#ifdef VBOX_WITH_CRHGSMI
2799
2800static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2801
2802static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2803{
2804 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2805 if (RT_SUCCESS(rc))
2806 {
2807 if (rc == VINF_SUCCESS)
2808 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2809 else
2810 Assert(rc == VINF_ALREADY_INITIALIZED);
2811 }
2812 else
2813 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2814
2815 return rc;
2816}
2817
2818static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2819{
2820 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2821 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2822 AssertRC(rc);
2823 pGCtl->i32Result = rc;
2824
2825 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2826 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2827 AssertRC(rc);
2828
2829 VBoxVBVAExHCtlFree(pVbva, pCtl);
2830}
2831
2832static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2833{
2834 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2835 if (!pHCtl)
2836 {
2837 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2838 return VERR_NO_MEMORY;
2839 }
2840
2841 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2842 pHCtl->u.cmd.cbCmd = cbCmd;
2843 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2844 if (RT_FAILURE(rc))
2845 {
2846 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2847 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2848 return rc;;
2849 }
2850 return VINF_SUCCESS;
2851}
2852
2853static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2854{
2855 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2856 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2857 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2858 if (RT_SUCCESS(rc))
2859 return VINF_SUCCESS;
2860
2861 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2862 pCtl->i32Result = rc;
2863 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2864 AssertRC(rc);
2865 return VINF_SUCCESS;
2866}
2867
2868static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2869{
2870 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2871 if (pVboxCtl->u.pfnInternal)
2872 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2873 VBoxVBVAExHCtlFree(pVbva, pCtl);
2874}
2875
2876static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2877 PFNCRCTLCOMPLETION pfnCompletion,
2878 void *pvCompletion)
2879{
2880 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2881 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2882 if (RT_FAILURE(rc))
2883 {
2884 if (rc == VERR_INVALID_STATE)
2885 {
2886 pCmd->u.pfnInternal = NULL;
2887 PVGASTATE pVGAState = pVdma->pVGAState;
2888 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2889 if (!RT_SUCCESS(rc))
2890 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2891
2892 return rc;
2893 }
2894 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2895 return rc;
2896 }
2897
2898 return VINF_SUCCESS;
2899}
2900
2901static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2902{
2903 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2904 {
2905 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2906 if (!RT_SUCCESS(rc))
2907 {
2908 WARN(("pfnVBVAEnable failed %d\n", rc));
2909 for (uint32_t j = 0; j < i; j++)
2910 {
2911 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2912 }
2913
2914 return rc;
2915 }
2916 }
2917 return VINF_SUCCESS;
2918}
2919
2920static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2921{
2922 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2923 {
2924 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2925 }
2926 return VINF_SUCCESS;
2927}
2928
2929static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2930{
2931 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2932 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2933
2934 if (RT_SUCCESS(rc))
2935 {
2936 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2937 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2938 if (rc == VINF_SUCCESS)
2939 {
2940 /* we need to inform Main about VBVA enable/disable
2941 * main expects notifications to be done from the main thread
2942 * submit it there */
2943 PVGASTATE pVGAState = pVdma->pVGAState;
2944
2945 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2946 vdmaVBVANotifyEnable(pVGAState);
2947 else
2948 vdmaVBVANotifyDisable(pVGAState);
2949 }
2950 else if (RT_FAILURE(rc))
2951 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2952 }
2953 else
2954 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2955
2956 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2957}
2958
2959static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2960{
2961 int rc;
2962 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2963 if (pHCtl)
2964 {
2965 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2966 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2967 pHCtl->pfnComplete = pfnComplete;
2968 pHCtl->pvComplete = pvComplete;
2969
2970 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
2971 if (RT_SUCCESS(rc))
2972 return VINF_SUCCESS;
2973 else
2974 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
2975
2976 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2977 }
2978 else
2979 {
2980 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2981 rc = VERR_NO_MEMORY;
2982 }
2983
2984 return rc;
2985}
2986
2987static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
2988{
2989 VBVAENABLE Enable = {0};
2990 Enable.u32Flags = VBVA_F_ENABLE;
2991 Enable.u32Offset = offVram;
2992
2993 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2994 Data.rc = VERR_NOT_IMPLEMENTED;
2995 int rc = RTSemEventCreate(&Data.hEvent);
2996 if (!RT_SUCCESS(rc))
2997 {
2998 WARN(("RTSemEventCreate failed %d\n", rc));
2999 return rc;
3000 }
3001
3002 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3003 if (RT_SUCCESS(rc))
3004 {
3005 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3006 if (RT_SUCCESS(rc))
3007 {
3008 rc = Data.rc;
3009 if (!RT_SUCCESS(rc))
3010 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3011 }
3012 else
3013 WARN(("RTSemEventWait failed %d\n", rc));
3014 }
3015 else
3016 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3017
3018 RTSemEventDestroy(Data.hEvent);
3019
3020 return rc;
3021}
3022
3023static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3024{
3025 int rc;
3026 VBVAEXHOSTCTL* pHCtl;
3027 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3028 {
3029 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3030 return VINF_SUCCESS;
3031 }
3032
3033 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3034 if (!pHCtl)
3035 {
3036 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3037 return VERR_NO_MEMORY;
3038 }
3039
3040 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3041 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3042 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3043 if (RT_SUCCESS(rc))
3044 return VINF_SUCCESS;
3045
3046 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3047 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3048 return rc;
3049}
3050
3051static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3052{
3053 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3054 if (fEnable)
3055 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3056 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3057}
3058
3059static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3060{
3061 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3062 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3063 if (RT_SUCCESS(rc))
3064 return VINF_SUCCESS;
3065
3066 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3067 pEnable->Hdr.i32Result = rc;
3068 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3069 AssertRC(rc);
3070 return VINF_SUCCESS;
3071}
3072
3073static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
3074{
3075 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3076 pData->rc = rc;
3077 rc = RTSemEventSignal(pData->hEvent);
3078 if (!RT_SUCCESS(rc))
3079 WARN(("RTSemEventSignal failed %d\n", rc));
3080}
3081
3082static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3083{
3084 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3085 Data.rc = VERR_NOT_IMPLEMENTED;
3086 int rc = RTSemEventCreate(&Data.hEvent);
3087 if (!RT_SUCCESS(rc))
3088 {
3089 WARN(("RTSemEventCreate failed %d\n", rc));
3090 return rc;
3091 }
3092
3093 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3094 if (RT_SUCCESS(rc))
3095 {
3096 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3097 if (RT_SUCCESS(rc))
3098 {
3099 rc = Data.rc;
3100 if (!RT_SUCCESS(rc))
3101 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3102 }
3103 else
3104 WARN(("RTSemEventWait failed %d\n", rc));
3105 }
3106 else
3107 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3108
3109 RTSemEventDestroy(Data.hEvent);
3110
3111 return rc;
3112}
3113
3114static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3115{
3116 VBVAEXHOSTCTL Ctl;
3117 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3118 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3119}
3120
3121static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3122{
3123 VBVAEXHOSTCTL Ctl;
3124 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3125 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3126}
3127
3128static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3129{
3130 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3131 switch (rc)
3132 {
3133 case VINF_SUCCESS:
3134 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3135 case VINF_ALREADY_INITIALIZED:
3136 case VINF_EOF:
3137 case VERR_INVALID_STATE:
3138 return VINF_SUCCESS;
3139 default:
3140 Assert(!RT_FAILURE(rc));
3141 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3142 }
3143}
3144
3145
3146int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3147 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3148 PFNCRCTLCOMPLETION pfnCompletion,
3149 void *pvCompletion)
3150{
3151 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3152 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3153 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3154}
3155
3156typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3157{
3158 struct VBOXVDMAHOST *pVdma;
3159 uint32_t fProcessing;
3160 int rc;
3161} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3162
3163static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3164{
3165 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3166
3167 pData->rc = rc;
3168 pData->fProcessing = 0;
3169
3170 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3171
3172 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3173
3174 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3175}
3176
3177int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3178 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3179{
3180 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3181 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3182 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3183 Data.pVdma = pVdma;
3184 Data.fProcessing = 1;
3185 Data.rc = VERR_INTERNAL_ERROR;
3186 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3187 if (!RT_SUCCESS(rc))
3188 {
3189 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3190 return rc;
3191 }
3192
3193 while (Data.fProcessing)
3194 {
3195 /* Poll infrequently to make sure no completed message has been missed. */
3196 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3197
3198 if (Data.fProcessing)
3199 RTThreadYield();
3200 }
3201
3202 /* 'Our' message has been processed, so should reset the semaphore.
3203 * There is still possible that another message has been processed
3204 * and the semaphore has been signalled again.
3205 * Reset only if there are no other messages completed.
3206 */
3207 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3208 Assert(c >= 0);
3209 if (!c)
3210 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3211
3212 rc = Data.rc;
3213 if (!RT_SUCCESS(rc))
3214 WARN(("host call failed %d", rc));
3215
3216 return rc;
3217}
3218
3219int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3220{
3221 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3222 int rc = VINF_SUCCESS;
3223 switch (pCtl->u32Type)
3224 {
3225 case VBOXCMDVBVACTL_TYPE_3DCTL:
3226 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3227 case VBOXCMDVBVACTL_TYPE_RESIZE:
3228 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3229 case VBOXCMDVBVACTL_TYPE_ENABLE:
3230 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3231 {
3232 WARN(("incorrect enable size\n"));
3233 rc = VERR_INVALID_PARAMETER;
3234 break;
3235 }
3236 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3237 default:
3238 WARN(("unsupported type\n"));
3239 rc = VERR_INVALID_PARAMETER;
3240 break;
3241 }
3242
3243 pCtl->i32Result = rc;
3244 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3245 AssertRC(rc);
3246 return VINF_SUCCESS;
3247}
3248
3249int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3250{
3251 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3252 {
3253 WARN(("vdma VBVA is disabled\n"));
3254 return VERR_INVALID_STATE;
3255 }
3256
3257 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3258}
3259
3260int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3261{
3262 WARN(("flush\n"));
3263 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3264 {
3265 WARN(("vdma VBVA is disabled\n"));
3266 return VERR_INVALID_STATE;
3267 }
3268 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3269}
3270
3271void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3272{
3273 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3274 return;
3275 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3276}
3277
3278bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3279{
3280 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3281}
3282#endif
3283
3284int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3285{
3286#ifdef VBOX_WITH_CRHGSMI
3287 int rc = vdmaVBVAPause(pVdma);
3288 if (RT_SUCCESS(rc))
3289 return VINF_SUCCESS;
3290
3291 if (rc != VERR_INVALID_STATE)
3292 {
3293 WARN(("vdmaVBVAPause failed %d\n", rc));
3294 return rc;
3295 }
3296
3297#ifdef DEBUG_misha
3298 WARN(("debug prep"));
3299#endif
3300
3301 PVGASTATE pVGAState = pVdma->pVGAState;
3302 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3303 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3304 Assert(pCmd);
3305 if (pCmd)
3306 {
3307 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3308 AssertRC(rc);
3309 if (RT_SUCCESS(rc))
3310 {
3311 rc = vboxVDMACrCtlGetRc(pCmd);
3312 }
3313 vboxVDMACrCtlRelease(pCmd);
3314 return rc;
3315 }
3316 return VERR_NO_MEMORY;
3317#else
3318 return VINF_SUCCESS;
3319#endif
3320}
3321
3322int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3323{
3324#ifdef VBOX_WITH_CRHGSMI
3325 int rc = vdmaVBVAResume(pVdma);
3326 if (RT_SUCCESS(rc))
3327 return VINF_SUCCESS;
3328
3329 if (rc != VERR_INVALID_STATE)
3330 {
3331 WARN(("vdmaVBVAResume failed %d\n", rc));
3332 return rc;
3333 }
3334
3335#ifdef DEBUG_misha
3336 WARN(("debug done"));
3337#endif
3338
3339 PVGASTATE pVGAState = pVdma->pVGAState;
3340 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3341 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3342 Assert(pCmd);
3343 if (pCmd)
3344 {
3345 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3346 AssertRC(rc);
3347 if (RT_SUCCESS(rc))
3348 {
3349 rc = vboxVDMACrCtlGetRc(pCmd);
3350 }
3351 vboxVDMACrCtlRelease(pCmd);
3352 return rc;
3353 }
3354 return VERR_NO_MEMORY;
3355#else
3356 return VINF_SUCCESS;
3357#endif
3358}
3359
3360int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3361{
3362 int rc;
3363
3364#ifdef VBOX_WITH_CRHGSMI
3365 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3366#endif
3367 {
3368 rc = SSMR3PutU32(pSSM, 0xffffffff);
3369 AssertRCReturn(rc, rc);
3370 return VINF_SUCCESS;
3371 }
3372
3373#ifdef VBOX_WITH_CRHGSMI
3374 PVGASTATE pVGAState = pVdma->pVGAState;
3375 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3376
3377 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3378 AssertRCReturn(rc, rc);
3379
3380 VBVAEXHOSTCTL HCtl;
3381 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3382 HCtl.u.state.pSSM = pSSM;
3383 HCtl.u.state.u32Version = 0;
3384 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3385#endif
3386}
3387
3388int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3389{
3390 uint32_t u32;
3391 int rc = SSMR3GetU32(pSSM, &u32);
3392 AssertRCReturn(rc, rc);
3393
3394 if (u32 != 0xffffffff)
3395 {
3396#ifdef VBOX_WITH_CRHGSMI
3397 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3398 AssertRCReturn(rc, rc);
3399
3400 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3401
3402 VBVAEXHOSTCTL HCtl;
3403 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3404 HCtl.u.state.pSSM = pSSM;
3405 HCtl.u.state.u32Version = u32Version;
3406 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3407 AssertRCReturn(rc, rc);
3408
3409 rc = vdmaVBVAResume(pVdma);
3410 AssertRCReturn(rc, rc);
3411
3412 return VINF_SUCCESS;
3413#else
3414 WARN(("Unsupported VBVACtl info!\n"));
3415 return VERR_VERSION_MISMATCH;
3416#endif
3417 }
3418
3419 return VINF_SUCCESS;
3420}
3421
3422int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3423{
3424#ifdef VBOX_WITH_CRHGSMI
3425 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3426 return VINF_SUCCESS;
3427
3428/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3429 * the purpose of this code is. */
3430 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3431 if (!pHCtl)
3432 {
3433 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3434 return VERR_NO_MEMORY;
3435 }
3436
3437 /* sanity */
3438 pHCtl->u.cmd.pu8Cmd = NULL;
3439 pHCtl->u.cmd.cbCmd = 0;
3440
3441 /* NULL completion will just free the ctl up */
3442 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3443 if (RT_FAILURE(rc))
3444 {
3445 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3446 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3447 return rc;
3448 }
3449#endif
3450 return VINF_SUCCESS;
3451}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette