VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 50754

Last change on this file since 50754 was 50754, checked in by vboxsync, 11 years ago

Dev/VGA/crOpenGL/wddm: command thread, more command processing

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 75.9 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16//#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24
25#include "DevVGA.h"
26#include "HGSMI/SHGSMIHost.h"
27
28#include <VBox/VBoxVideo3D.h>
29#include <VBox/VBoxVideoHost3D.h>
30
31#ifdef DEBUG_misha
32# define VBOXVDBG_MEMCACHE_DISABLE
33#endif
34
35#ifndef VBOXVDBG_MEMCACHE_DISABLE
36# include <iprt/memcache.h>
37#endif
38
39#ifdef DEBUG_misha
40#define WARN_BP() do { AssertFailed(); } while (0)
41#else
42#define WARN_BP() do { } while (0)
43#endif
44#define WARN(_msg) do { \
45 LogRel(_msg); \
46 WARN_BP(); \
47 } while (0)
48
49#define VBOXVDMATHREAD_STATE_TERMINATED 0
50#define VBOXVDMATHREAD_STATE_CREATED 1
51#define VBOXVDMATHREAD_STATE_TERMINATING 2
52
53typedef struct VBOXVDMATHREAD
54{
55 RTTHREAD hWorkerThread;
56 RTSEMEVENT hEvent;
57 RTSEMEVENT hClientEvent;
58 volatile uint32_t u32State;
59} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
60
61
62/* state transformations:
63 *
64 * submitter | processor
65 *
66 * LISTENING ---> PROCESSING
67 *
68 * */
69#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
70#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
71
72#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
73#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
74#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
75
76typedef struct VBVAEXHOSTCONTEXT
77{
78 VBVABUFFER *pVBVA;
79 volatile int32_t i32State;
80 volatile int32_t i32EnableState;
81 volatile uint32_t u32cCtls;
82 /* critical section for accessing ctl lists */
83 RTCRITSECT CltCritSect;
84 RTLISTNODE GuestCtlList;
85 RTLISTNODE HostCtlList;
86#ifndef VBOXVDBG_MEMCACHE_DISABLE
87 RTMEMCACHE CtlCache;
88#endif
89} VBVAEXHOSTCONTEXT;
90
91typedef enum
92{
93 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
94 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
95 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
96 VBVAEXHOSTCTL_TYPE_HH_ENABLE,
97 VBVAEXHOSTCTL_TYPE_HH_TERM,
98 VBVAEXHOSTCTL_TYPE_HH_RESET,
99 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
100 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
101 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
102 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
103 VBVAEXHOSTCTL_TYPE_GH_ENABLE_DISABLE
104} VBVAEXHOSTCTL_TYPE;
105
106struct VBVAEXHOSTCTL;
107
108typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
109
110typedef struct VBVAEXHOSTCTL
111{
112 RTLISTNODE Node;
113 VBVAEXHOSTCTL_TYPE enmType;
114 union
115 {
116 struct
117 {
118 uint8_t * pu8Cmd;
119 uint32_t cbCmd;
120 } cmd;
121
122 struct
123 {
124 PSSMHANDLE pSSM;
125 uint32_t u32Version;
126 } state;
127 } u;
128 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
129 void *pvComplete;
130} VBVAEXHOSTCTL;
131
132/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
133 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
134 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
135 * see mor edetailed comments in headers for function definitions */
136typedef enum
137{
138 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
139 VBVAEXHOST_DATA_TYPE_CMD,
140 VBVAEXHOST_DATA_TYPE_HOSTCTL,
141 VBVAEXHOST_DATA_TYPE_GUESTCTL,
142} VBVAEXHOST_DATA_TYPE;
143static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
144
145static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
146static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
147
148/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
149 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
150static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
151
152static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
153static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
154static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
155static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
156static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
157static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
158
159static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
160{
161#ifndef VBOXVDBG_MEMCACHE_DISABLE
162 return (VBVAEXHOSTCONTEXT*)RTMemCacheAlloc(pCmdVbva->CtlCache);
163#else
164 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
165#endif
166}
167
168static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
169{
170#ifndef VBOXVDBG_MEMCACHE_DISABLE
171 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
172#else
173 RTMemFree(pCtl);
174#endif
175}
176
177static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
178{
179 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
180 if (!pCtl)
181 {
182 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
183 return NULL;
184 }
185
186 pCtl->enmType = enmType;
187 return pCtl;
188}
189
190static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
191{
192 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
193
194 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
195 return VINF_SUCCESS;
196 return VERR_SEM_BUSY;
197}
198
199static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
200{
201 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
202
203 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
204 return NULL;
205
206 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
207 if (RT_SUCCESS(rc))
208 {
209 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
210 if (pCtl)
211 *pfHostCtl = true;
212 else if (!fHostOnlyMode)
213 {
214 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) > VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
215 {
216 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
217 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
218 * and there are no HostCtl commands*/
219 Assert(pCtl);
220 *pfHostCtl = false;
221 }
222 }
223
224 if (pCtl)
225 {
226 RTListNodeRemove(&pCtl->Node);
227 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
228 }
229
230 RTCritSectLeave(&pCmdVbva->CltCritSect);
231
232 return pCtl;
233 }
234 else
235 WARN(("RTCritSectEnter failed %d\n", rc));
236
237 return NULL;
238}
239
240static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
241{
242 bool fHostCtl;
243 return vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
244}
245
246
247static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
248{
249 switch (pCtl->enmType)
250 {
251 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
252 if (pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
253 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
254 return true;
255 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
256 if (pCmdVbva->i32EnableState == VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
257 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
258 return true;
259 default:
260 return false;
261 }
262}
263
264static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
265{
266 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
267
268 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
269}
270
271static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
272{
273 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
274 if (pCmdVbva->pVBVA)
275 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
276}
277
278static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
279{
280 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
281 if (pCmdVbva->pVBVA)
282 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
283}
284
285static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
286{
287 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
288 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
289
290 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
291
292 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
293 uint32_t indexRecordFree = pVBVA->indexRecordFree;
294
295 Log(("first = %d, free = %d\n",
296 indexRecordFirst, indexRecordFree));
297
298 if (indexRecordFirst == indexRecordFree)
299 {
300 /* No records to process. Return without assigning output variables. */
301 return VINF_EOF;
302 }
303
304 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
305
306 /* A new record need to be processed. */
307 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
308 {
309 /* the record is being recorded, try again */
310 return VINF_TRY_AGAIN;
311 }
312
313 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
314
315 if (!cbRecord)
316 {
317 /* the record is being recorded, try again */
318 return VINF_TRY_AGAIN;
319 }
320
321 /* we should not get partial commands here actually */
322 Assert(cbRecord);
323
324 /* The size of largest contiguous chunk in the ring biffer. */
325 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
326
327 /* The pointer to data in the ring buffer. */
328 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
329
330 /* Fetch or point the data. */
331 if (u32BytesTillBoundary >= cbRecord)
332 {
333 /* The command does not cross buffer boundary. Return address in the buffer. */
334 *ppCmd = pSrc;
335 *pcbCmd = cbRecord;
336 return VINF_SUCCESS;
337 }
338
339 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
340 return VERR_INVALID_STATE;
341}
342
343static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
344{
345 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
346 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
347
348 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
349}
350
351static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
352{
353 if (pCtl->pfnComplete)
354 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
355 else
356 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
357}
358
359static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
360{
361 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
362 VBVAEXHOSTCTL*pCtl;
363 bool fHostClt;
364
365 for(;;)
366 {
367 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
368 if (pCtl)
369 {
370 if (fHostClt)
371 {
372 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
373 {
374 *ppCmd = (uint8_t*)pCtl;
375 *pcbCmd = sizeof (*pCtl);
376 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
377 }
378 }
379 else
380 {
381 *ppCmd = (uint8_t*)pCtl;
382 *pcbCmd = sizeof (*pCtl);
383 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
384 }
385 }
386
387 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
388 return VBVAEXHOST_DATA_TYPE_NO_DATA;
389
390 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
391 switch (rc)
392 {
393 case VINF_SUCCESS:
394 return VBVAEXHOST_DATA_TYPE_CMD;
395 case VINF_EOF:
396 return VBVAEXHOST_DATA_TYPE_NO_DATA;
397 case VINF_TRY_AGAIN:
398 RTThreadSleep(1);
399 continue;
400 default:
401 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
402 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
403 return VBVAEXHOST_DATA_TYPE_NO_DATA;
404 }
405 }
406
407 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
408 return VBVAEXHOST_DATA_TYPE_NO_DATA;
409}
410
411static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
412{
413 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
414 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
415 {
416 vboxVBVAExHPHgEventClear(pCmdVbva);
417 vboxVBVAExHPProcessorRelease(pCmdVbva);
418 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
419 * 1. we check the queue -> and it is empty
420 * 2. submitter adds command to the queue
421 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
422 * 4. we clear the "processing" state
423 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
424 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
425 **/
426 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
427 if (RT_SUCCESS(rc))
428 {
429 /* we are the processor now */
430 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
431 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
432 {
433 vboxVBVAExHPProcessorRelease(pCmdVbva);
434 return VBVAEXHOST_DATA_TYPE_NO_DATA;
435 }
436
437 vboxVBVAExHPHgEventSet(pCmdVbva);
438 }
439 }
440
441 return enmType;
442}
443
444DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
445{
446 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
447
448 if (pVBVA)
449 {
450 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
451 uint32_t indexRecordFree = pVBVA->indexRecordFree;
452
453 if (indexRecordFirst != indexRecordFree)
454 return true;
455 }
456
457 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
458}
459
460/* Checks whether the new commands are ready for processing
461 * @returns
462 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
463 * VINF_EOF - no commands in a queue
464 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
465 * VERR_INVALID_STATE - the VBVA is paused or pausing */
466static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
467{
468 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
469 if (RT_SUCCESS(rc))
470 {
471 /* we are the processor now */
472 if (vboxVBVAExHSHasCommands(pCmdVbva))
473 {
474 vboxVBVAExHPHgEventSet(pCmdVbva);
475 return VINF_SUCCESS;
476 }
477
478 vboxVBVAExHPProcessorRelease(pCmdVbva);
479 return VINF_EOF;
480 }
481 if (rc == VERR_SEM_BUSY)
482 return VINF_ALREADY_INITIALIZED;
483 return VERR_INVALID_STATE;
484}
485
486static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
487{
488 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
489 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
490 if (RT_SUCCESS(rc))
491 {
492#ifndef VBOXVDBG_MEMCACHE_DISABLE
493 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
494 0, /* size_t cbAlignment */
495 UINT32_MAX, /* uint32_t cMaxObjects */
496 NULL, /* PFNMEMCACHECTOR pfnCtor*/
497 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
498 NULL, /* void *pvUser*/
499 0 /* uint32_t fFlags*/
500 );
501 if (RT_SUCCESS(rc))
502#endif
503 {
504 RTListInit(&pCmdVbva->GuestCtlList);
505 RTListInit(&pCmdVbva->HostCtlList);
506 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
507 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
508 return VINF_SUCCESS;
509 }
510#ifndef VBOXVDBG_MEMCACHE_DISABLE
511 else
512 WARN(("RTMemCacheCreate failed %d\n", rc));
513#endif
514 }
515 else
516 WARN(("RTCritSectInit failed %d\n", rc));
517
518 return rc;
519}
520
521DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
522{
523 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
524}
525
526static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
527{
528 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
529 return VINF_ALREADY_INITIALIZED;
530
531 pCmdVbva->pVBVA = pVBVA;
532 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
533 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
534 return VINF_SUCCESS;
535}
536
537static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
538{
539 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
540 return VINF_SUCCESS;
541
542 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
543 return VINF_SUCCESS;
544}
545
546static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
547{
548 /* ensure the processor is stopped */
549 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
550
551 /* ensure no one tries to submit the command */
552 if (pCmdVbva->pVBVA)
553 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
554
555 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
556 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
557
558 RTCritSectDelete(&pCmdVbva->CltCritSect);
559
560#ifndef VBOXVDBG_MEMCACHE_DISABLE
561 RTMemCacheDestroy(pCmdVbva->CtlCache);
562#endif
563
564 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
565}
566
567/* Saves state
568 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
569 */
570static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
571{
572 int rc;
573
574 int32_t i32EnableState = ASMAtomicUoReadS32(&pCmdVbva->i32EnableState);
575 if (i32EnableState >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
576 {
577 if (i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
578 {
579 WARN(("vbva not paused\n"));
580 return VERR_INVALID_STATE;
581 }
582
583 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pCmdVbva->pVBVA) - pu8VramBase));
584 AssertRCReturn(rc, rc);
585 return VINF_SUCCESS;
586 }
587
588 rc = SSMR3PutU32(pSSM, 0xffffffff);
589 AssertRCReturn(rc, rc);
590
591 return VINF_SUCCESS;
592}
593
594typedef enum
595{
596 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
597 VBVAEXHOSTCTL_SOURCE_HOST_ANY,
598 VBVAEXHOSTCTL_SOURCE_HOST_ENABLED
599} VBVAEXHOSTCTL_SOURCE;
600
601
602static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
603{
604 if ((enmSource == VBVAEXHOSTCTL_SOURCE_HOST_ENABLED) && !VBoxVBVAExHSIsEnabled(pCmdVbva))
605 {
606 WARN(("cmd vbva not enabled\n"));
607 return VERR_INVALID_STATE;
608 }
609
610 pCtl->pfnComplete = pfnComplete;
611 pCtl->pvComplete = pvComplete;
612
613 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
614 if (RT_SUCCESS(rc))
615 {
616 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
617 {
618 if ((enmSource == VBVAEXHOSTCTL_SOURCE_HOST_ENABLED) && !VBoxVBVAExHSIsEnabled(pCmdVbva))
619 {
620 WARN(("cmd vbva not enabled\n"));
621 RTCritSectLeave(&pCmdVbva->CltCritSect);
622 return VERR_INVALID_STATE;
623 }
624 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
625 }
626 else
627 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
628
629 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
630
631 RTCritSectLeave(&pCmdVbva->CltCritSect);
632
633 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
634 }
635 else
636 WARN(("RTCritSectEnter failed %d\n", rc));
637
638 return rc;
639}
640
641
642/* Loads state
643 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
644 */
645static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
646{
647 AssertMsgFailed(("implement!\n"));
648 uint32_t u32;
649 int rc = SSMR3GetU32(pSSM, &u32);
650 AssertRCReturn(rc, rc);
651 if (u32 != 0xffffffff)
652 {
653 VBVABUFFER *pVBVA = (VBVABUFFER*)pu8VramBase + u32;
654 rc = VBoxVBVAExHSEnable(pCmdVbva, pVBVA);
655 AssertRCReturn(rc, rc);
656 return VBoxVBVAExHSCheckCommands(pCmdVbva);
657 }
658
659 return VINF_SUCCESS;
660}
661
662typedef struct VBOXVDMAHOST
663{
664 PHGSMIINSTANCE pHgsmi;
665 PVGASTATE pVGAState;
666 VBVAEXHOSTCONTEXT CmdVbva;
667 VBOXVDMATHREAD Thread;
668 VBOXCRCMD_SVRINFO CrSrvInfo;
669 VBVAEXHOSTCTL* pCurRemainingHostCtl;
670#ifdef VBOX_VDMA_WITH_WATCHDOG
671 PTMTIMERR3 WatchDogTimer;
672#endif
673} VBOXVDMAHOST, *PVBOXVDMAHOST;
674
675int VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread)
676{
677 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATED);
678 int rc = RTSemEventSignal(pThread->hClientEvent);
679 AssertRC(rc);
680 if (RT_SUCCESS(rc))
681 {
682 pThread->u32State = VBOXVDMATHREAD_STATE_CREATED;
683 return VINF_SUCCESS;
684 }
685 return rc;
686}
687
688int VBoxVDMAThreadNotifyConstructFailed(PVBOXVDMATHREAD pThread)
689{
690 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATED);
691 int rc = RTSemEventSignal(pThread->hClientEvent);
692 AssertRC(rc);
693 if (RT_SUCCESS(rc))
694 return VINF_SUCCESS;
695 return rc;
696}
697
698DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
699{
700 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
701}
702
703int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread)
704{
705 int rc = RTSemEventCreate(&pThread->hEvent);
706 if (RT_SUCCESS(rc))
707 {
708 rc = RTSemEventCreate(&pThread->hClientEvent);
709 if (RT_SUCCESS(rc))
710 {
711 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
712 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
713 if (RT_SUCCESS(rc))
714 {
715 rc = RTSemEventWait(pThread->hClientEvent, RT_INDEFINITE_WAIT);
716 if (RT_SUCCESS(rc))
717 {
718 if (pThread->u32State == VBOXVDMATHREAD_STATE_CREATED)
719 return VINF_SUCCESS;
720 WARN(("thread routine failed the initialization\n"));
721 rc = VERR_INVALID_STATE;
722 }
723 else
724 WARN(("RTSemEventWait failed %d\n", rc));
725
726 RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
727 }
728 else
729 WARN(("RTThreadCreate failed %d\n", rc));
730
731 RTSemEventDestroy(pThread->hClientEvent);
732 }
733 else
734 WARN(("RTSemEventCreate failed %d\n", rc));
735
736 RTSemEventDestroy(pThread->hEvent);
737 }
738 else
739 WARN(("RTSemEventCreate failed %d\n", rc));
740
741 return rc;
742}
743
744DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
745{
746 int rc = RTSemEventSignal(pThread->hEvent);
747 AssertRC(rc);
748 return rc;
749}
750
751DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
752{
753 int rc = RTSemEventWait(pThread->hEvent, cMillies);
754 AssertRC(rc);
755 return rc;
756}
757
758void VBoxVDMAThreadMarkTerminating(PVBOXVDMATHREAD pThread)
759{
760 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATED);
761 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
762}
763
764void VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread)
765{
766 int rc;
767 if (ASMAtomicReadU32(&pThread->u32State) != VBOXVDMATHREAD_STATE_TERMINATING)
768 {
769 VBoxVDMAThreadMarkTerminating(pThread);
770 rc = VBoxVDMAThreadEventNotify(pThread);
771 AssertRC(rc);
772 }
773 rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
774 AssertRC(rc);
775 RTSemEventDestroy(pThread->hClientEvent);
776 RTSemEventDestroy(pThread->hEvent);
777}
778
779static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
780
781#ifdef VBOX_WITH_CRHGSMI
782
783typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
784typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
785
786typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
787{
788 uint32_t cRefs;
789 int32_t rc;
790 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
791 void *pvCompletion;
792 VBOXVDMACMD_CHROMIUM_CTL Cmd;
793} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
794
795#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
796
797static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
798{
799 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
800 Assert(pHdr);
801 if (pHdr)
802 {
803 pHdr->cRefs = 1;
804 pHdr->rc = VERR_NOT_IMPLEMENTED;
805 pHdr->Cmd.enmType = enmCmd;
806 pHdr->Cmd.cbCmd = cbCmd;
807 return &pHdr->Cmd;
808 }
809
810 return NULL;
811}
812
813DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
814{
815 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
816 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
817 if(!cRefs)
818 {
819 RTMemFree(pHdr);
820 }
821}
822
823DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
824{
825 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
826 ASMAtomicIncU32(&pHdr->cRefs);
827}
828
829DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
830{
831 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
832 return pHdr->rc;
833}
834
835static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
836{
837 RTSemEventSignal((RTSEMEVENT)pvContext);
838}
839
840static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
841{
842 vboxVDMACrCtlRelease(pCmd);
843}
844
845
846static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
847{
848 if ( pVGAState->pDrv
849 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
850 {
851 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
852 pHdr->pfnCompletion = pfnCompletion;
853 pHdr->pvCompletion = pvCompletion;
854 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
855 return VINF_SUCCESS;
856 }
857#ifdef DEBUG_misha
858 Assert(0);
859#endif
860 return VERR_NOT_SUPPORTED;
861}
862
863static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
864{
865 RTSEMEVENT hComplEvent;
866 int rc = RTSemEventCreate(&hComplEvent);
867 AssertRC(rc);
868 if(RT_SUCCESS(rc))
869 {
870 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
871#ifdef DEBUG_misha
872 AssertRC(rc);
873#endif
874 if (RT_SUCCESS(rc))
875 {
876 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
877 AssertRC(rc);
878 if(RT_SUCCESS(rc))
879 {
880 RTSemEventDestroy(hComplEvent);
881 }
882 }
883 else
884 {
885 /* the command is completed */
886 RTSemEventDestroy(hComplEvent);
887 }
888 }
889 return rc;
890}
891
892typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
893{
894 int rc;
895 RTSEMEVENT hEvent;
896} VDMA_VBVA_CTL_CYNC_COMPLETION;
897
898static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
899{
900 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
901 pData->rc = rc;
902 rc = RTSemEventSignal(pData->hEvent);
903 if (!RT_SUCCESS(rc))
904 WARN(("RTSemEventSignal failed %d\n", rc));
905}
906
907static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
908{
909 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
910 Data.rc = VERR_NOT_IMPLEMENTED;
911 int rc = RTSemEventCreate(&Data.hEvent);
912 if (!RT_SUCCESS(rc))
913 {
914 WARN(("RTSemEventCreate failed %d\n", rc));
915 return rc;
916 }
917
918 PVGASTATE pVGAState = pVdma->pVGAState;
919 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
920 if (RT_SUCCESS(rc))
921 {
922 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
923 if (RT_SUCCESS(rc))
924 {
925 rc = Data.rc;
926 if (!RT_SUCCESS(rc))
927 {
928 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
929 }
930
931 }
932 else
933 WARN(("RTSemEventWait failed %d\n", rc));
934 }
935 else
936 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
937
938
939 RTSemEventDestroy(Data.hEvent);
940
941 return rc;
942}
943
944static DECLCALLBACK(VBOXCRCMDCTL*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
945{
946 struct VBOXVDMAHOST *pVdma = hClient;
947 if (!pVdma->pCurRemainingHostCtl)
948 {
949 /* disable VBVA, all subsequent host commands will go HGCM way */
950 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
951 }
952 else
953 {
954 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
955 }
956
957 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
958 if (pVdma->pCurRemainingHostCtl)
959 {
960 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
961 return (VBOXCRCMDCTL*)pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
962 }
963
964 *pcbCtl = 0;
965 return NULL;
966}
967
968static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
969{
970 VBOXCRCMDCTL_ENABLE Enable;
971 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
972 Enable.hRHCmd = pVdma;
973 Enable.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
974
975 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
976 Assert(!pVdma->pCurRemainingHostCtl);
977 if (RT_SUCCESS(rc))
978 {
979 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
980 return VINF_SUCCESS;
981 }
982
983 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
984 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
985
986 return rc;
987}
988
989static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
990{
991 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
992 {
993 WARN(("vdma VBVA is already enabled\n"));
994 return VERR_INVALID_STATE;
995 }
996
997 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
998 if (!pVBVA)
999 {
1000 WARN(("invalid offset %d\n", u32Offset));
1001 return VERR_INVALID_PARAMETER;
1002 }
1003
1004 if (!pVdma->CrSrvInfo.pfnEnable)
1005 {
1006#ifdef DEBUG_misha
1007 WARN(("pfnEnable is NULL\n"));
1008 return VERR_NOT_SUPPORTED;
1009#endif
1010 }
1011
1012 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1013 if (RT_SUCCESS(rc))
1014 {
1015 VBOXCRCMDCTL Ctl;
1016 Ctl.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1017 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Ctl, sizeof (Ctl));
1018 if (RT_SUCCESS(rc))
1019 {
1020 PVGASTATE pVGAState = pVdma->pVGAState;
1021 VBOXCRCMD_SVRENABLE_INFO Info;
1022 Info.hCltScr = pVGAState->pDrv;
1023 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1024 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1025 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1026 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1027 if (RT_SUCCESS(rc))
1028 return VINF_SUCCESS;
1029 else
1030 WARN(("pfnEnable failed %d\n", rc));
1031
1032 vboxVDMACrHgcmHandleEnable(pVdma);
1033 }
1034 else
1035 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1036
1037 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1038 }
1039 else
1040 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1041
1042 return rc;
1043}
1044
1045static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma)
1046{
1047 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1048 {
1049 Log(("vdma VBVA is already disabled\n"));
1050 return VINF_SUCCESS;
1051 }
1052
1053 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1054 if (RT_SUCCESS(rc))
1055 {
1056 /* disable is a bit tricky
1057 * we need to ensure the host ctl commands do not come out of order
1058 * and do not come over HGCM channel until after it is enabled */
1059 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1060 if (RT_SUCCESS(rc))
1061 return rc;
1062
1063 PVGASTATE pVGAState = pVdma->pVGAState;
1064 VBOXCRCMD_SVRENABLE_INFO Info;
1065 Info.hCltScr = pVGAState->pDrv;
1066 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1067 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1068 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1069 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1070 }
1071 else
1072 WARN(("pfnDisable failed %d\n", rc));
1073
1074 return rc;
1075}
1076
1077static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1078{
1079 switch (pCmd->enmType)
1080 {
1081 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1082 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1083 {
1084 WARN(("VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for disabled vdma VBVA\n"));
1085 return VERR_INVALID_STATE;
1086 }
1087 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1088 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1089 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1090 {
1091 WARN(("VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for disabled vdma VBVA\n"));
1092 return VERR_INVALID_STATE;
1093 }
1094 return pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1095 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1096 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1097 {
1098 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1099 return VERR_INVALID_STATE;
1100 }
1101 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1102 case VBVAEXHOSTCTL_TYPE_HH_TERM:
1103 {
1104 int rc = vdmaVBVADisableProcess(pVdma);
1105 if (!RT_SUCCESS(rc))
1106 {
1107 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1108 return rc;
1109 }
1110
1111 VBoxVDMAThreadMarkTerminating(&pVdma->Thread);
1112 return VINF_SUCCESS;
1113 }
1114 case VBVAEXHOSTCTL_TYPE_HH_RESET:
1115 {
1116 int rc = vdmaVBVADisableProcess(pVdma);
1117 if (!RT_SUCCESS(rc))
1118 {
1119 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1120 return rc;
1121 }
1122 return VINF_SUCCESS;
1123 }
1124 default:
1125 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1126 return VERR_INVALID_PARAMETER;
1127 }
1128}
1129
1130static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1131{
1132 switch (pCmd->enmType)
1133 {
1134 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1135 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1136 {
1137 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1138 return VERR_INVALID_STATE;
1139 }
1140 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1141 case VBVAEXHOSTCTL_TYPE_GH_ENABLE_DISABLE:
1142 {
1143 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1144 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1145 if ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE)
1146 {
1147 uint32_t u32Offset = pEnable->u32Offset;
1148 return vdmaVBVAEnableProcess(pVdma, u32Offset);
1149 }
1150
1151 return vdmaVBVADisableProcess(pVdma);
1152 }
1153 default:
1154 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1155 return VERR_INVALID_PARAMETER;
1156 }
1157}
1158
1159
1160/*
1161 * @returns
1162 *
1163 */
1164static int vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
1165{
1166 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1167 return VINF_EOF;
1168
1169 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
1170
1171 /* check if the command is cancelled */
1172 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
1173 {
1174 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
1175 return VINF_EOF;
1176 }
1177
1178 /* come commands can be handled right away? */
1179 switch (pCmd->u8OpCode)
1180 {
1181 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1182 pCmd->i8Result = 0;
1183 return VINF_EOF;
1184 default:
1185 return VINF_SUCCESS;
1186 }
1187}
1188
1189static DECLCALLBACK(int) vboxVDMACrCmdEnable(HVBOXCRCMDSVR hSvr, VBOXCRCMD_SVRENABLE_INFO *pInfo)
1190{
1191 return VINF_SUCCESS;
1192}
1193
1194static DECLCALLBACK(void) vboxVDMACrCmdDisable(HVBOXCRCMDSVR hSvr)
1195{
1196}
1197
1198static DECLCALLBACK(int) vboxVDMACrCmdCtl(HVBOXCRCMDSVR hSvr, uint8_t* pCmd, uint32_t cbCmd)
1199{
1200 return VERR_NOT_SUPPORTED;
1201}
1202
1203static DECLCALLBACK(int) vboxVDMACrCmdCmd(HVBOXCRCMDSVR hSvr, PVBOXCMDVBVA_HDR pCmd, uint32_t cbCmd)
1204{
1205 switch (pCmd->u8OpCode)
1206 {
1207#if 0
1208 case VBOXCMDVBVA_OPTYPE_BLT_OFFPRIMSZFMT_OR_ID:
1209 {
1210 crVBoxServerCrCmdBltProcess(pCmd, cbCmd);
1211 break;
1212 }
1213#endif
1214 default:
1215 WARN(("unsupported command\n"));
1216 pCmd->i8Result = -1;
1217 }
1218 return VINF_SUCCESS;
1219}
1220
1221static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
1222{
1223 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
1224 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
1225 int rc = VERR_NO_MEMORY;
1226 if (pCmd)
1227 {
1228 PVGASTATE pVGAState = pVdma->pVGAState;
1229 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
1230 pCmd->cbVRam = pVGAState->vram_size;
1231 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
1232 if (RT_SUCCESS(rc))
1233 {
1234 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
1235 if (RT_SUCCESS(rc))
1236 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
1237 else if (rc != VERR_NOT_SUPPORTED)
1238 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
1239 }
1240 else
1241 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
1242
1243 vboxVDMACrCtlRelease(&pCmd->Hdr);
1244 }
1245
1246 if (!RT_SUCCESS(rc))
1247 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
1248
1249 return rc;
1250}
1251
1252static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
1253
1254/* check if this is external cmd to be passed to chromium backend */
1255static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
1256{
1257 PVBOXVDMACMD pDmaCmd = NULL;
1258 uint32_t cbDmaCmd = 0;
1259 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1260 int rc = VINF_NOT_SUPPORTED;
1261
1262 cbDmaCmd = pCmdDr->cbBuf;
1263
1264 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1265 {
1266 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
1267 {
1268 AssertMsgFailed(("invalid buffer data!"));
1269 return VERR_INVALID_PARAMETER;
1270 }
1271
1272 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
1273 {
1274 AssertMsgFailed(("invalid command buffer data!"));
1275 return VERR_INVALID_PARAMETER;
1276 }
1277
1278 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
1279 }
1280 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1281 {
1282 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
1283 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
1284 {
1285 AssertMsgFailed(("invalid command buffer data from offset!"));
1286 return VERR_INVALID_PARAMETER;
1287 }
1288 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
1289 }
1290
1291 if (pDmaCmd)
1292 {
1293 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
1294 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
1295
1296 switch (pDmaCmd->enmType)
1297 {
1298 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
1299 {
1300 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
1301 if (cbBody < sizeof (*pCrCmd))
1302 {
1303 AssertMsgFailed(("invalid chromium command buffer size!"));
1304 return VERR_INVALID_PARAMETER;
1305 }
1306 PVGASTATE pVGAState = pVdma->pVGAState;
1307 rc = VINF_SUCCESS;
1308 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
1309 {
1310 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
1311 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
1312 break;
1313 }
1314 else
1315 {
1316 Assert(0);
1317 }
1318
1319 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
1320 AssertRC(tmpRc);
1321 break;
1322 }
1323 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
1324 {
1325 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
1326 if (cbBody < sizeof (*pTransfer))
1327 {
1328 AssertMsgFailed(("invalid bpb transfer buffer size!"));
1329 return VERR_INVALID_PARAMETER;
1330 }
1331
1332 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
1333 AssertRC(rc);
1334 if (RT_SUCCESS(rc))
1335 {
1336 pCmdDr->rc = VINF_SUCCESS;
1337 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
1338 AssertRC(rc);
1339 rc = VINF_SUCCESS;
1340 }
1341 break;
1342 }
1343 default:
1344 break;
1345 }
1346 }
1347 return rc;
1348}
1349
1350int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
1351{
1352 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1353 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
1354 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
1355 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
1356 AssertRC(rc);
1357 pDr->rc = rc;
1358
1359 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
1360 rc = VBoxSHGSMICommandComplete(pIns, pDr);
1361 AssertRC(rc);
1362 return rc;
1363}
1364
1365int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1366{
1367 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1368 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1369 pCmdPrivate->rc = rc;
1370 if (pCmdPrivate->pfnCompletion)
1371 {
1372 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
1373 }
1374 return VINF_SUCCESS;
1375}
1376
1377#endif
1378
1379#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1380/* to simplify things and to avoid extra backend if modifications we assume the VBOXVDMA_RECTL is the same as VBVACMDHDR */
1381AssertCompile(sizeof(VBOXVDMA_RECTL) == sizeof(VBVACMDHDR));
1382AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, left) == RT_SIZEOFMEMB(VBVACMDHDR, x));
1383AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, top) == RT_SIZEOFMEMB(VBVACMDHDR, y));
1384AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, width) == RT_SIZEOFMEMB(VBVACMDHDR, w));
1385AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, height) == RT_SIZEOFMEMB(VBVACMDHDR, h));
1386AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, left) == RT_OFFSETOF(VBVACMDHDR, x));
1387AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, top) == RT_OFFSETOF(VBVACMDHDR, y));
1388AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, width) == RT_OFFSETOF(VBVACMDHDR, w));
1389AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, height) == RT_OFFSETOF(VBVACMDHDR, h));
1390
1391static int vboxVDMANotifyPrimaryUpdate (PVGASTATE pVGAState, unsigned uScreenId, const VBOXVDMA_RECTL * pRectl)
1392{
1393 pVGAState->pDrv->pfnVBVAUpdateBegin (pVGAState->pDrv, uScreenId);
1394
1395 /* Updates the rectangle and sends the command to the VRDP server. */
1396 pVGAState->pDrv->pfnVBVAUpdateProcess (pVGAState->pDrv, uScreenId,
1397 (const PVBVACMDHDR)pRectl /* <- see above AssertCompile's and comments */,
1398 sizeof (VBOXVDMA_RECTL));
1399
1400 pVGAState->pDrv->pfnVBVAUpdateEnd (pVGAState->pDrv, uScreenId, pRectl->left, pRectl->top,
1401 pRectl->width, pRectl->height);
1402
1403 return VINF_SUCCESS;
1404}
1405#endif
1406
1407static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
1408 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
1409 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
1410 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
1411{
1412 /* we do not support color conversion */
1413 Assert(pDstDesc->format == pSrcDesc->format);
1414 /* we do not support stretching */
1415 Assert(pDstRectl->height == pSrcRectl->height);
1416 Assert(pDstRectl->width == pSrcRectl->width);
1417 if (pDstDesc->format != pSrcDesc->format)
1418 return VERR_INVALID_FUNCTION;
1419 if (pDstDesc->width == pDstRectl->width
1420 && pSrcDesc->width == pSrcRectl->width
1421 && pSrcDesc->width == pDstDesc->width)
1422 {
1423 Assert(!pDstRectl->left);
1424 Assert(!pSrcRectl->left);
1425 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
1426 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
1427 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
1428 }
1429 else
1430 {
1431 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
1432 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
1433 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
1434 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
1435 Assert(cbDstLine <= pDstDesc->pitch);
1436 uint32_t cbDstSkip = pDstDesc->pitch;
1437 uint8_t * pvDstStart = pvDstSurf + offDstStart;
1438
1439 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
1440 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
1441 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
1442 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
1443 Assert(cbSrcLine <= pSrcDesc->pitch);
1444 uint32_t cbSrcSkip = pSrcDesc->pitch;
1445 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
1446
1447 Assert(cbDstLine == cbSrcLine);
1448
1449 for (uint32_t i = 0; ; ++i)
1450 {
1451 memcpy (pvDstStart, pvSrcStart, cbDstLine);
1452 if (i == pDstRectl->height)
1453 break;
1454 pvDstStart += cbDstSkip;
1455 pvSrcStart += cbSrcSkip;
1456 }
1457 }
1458 return VINF_SUCCESS;
1459}
1460
1461static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
1462{
1463 if (!pRectl1->width)
1464 *pRectl1 = *pRectl2;
1465 else
1466 {
1467 int16_t x21 = pRectl1->left + pRectl1->width;
1468 int16_t x22 = pRectl2->left + pRectl2->width;
1469 if (pRectl1->left > pRectl2->left)
1470 {
1471 pRectl1->left = pRectl2->left;
1472 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
1473 }
1474 else if (x21 < x22)
1475 pRectl1->width = x22 - pRectl1->left;
1476
1477 x21 = pRectl1->top + pRectl1->height;
1478 x22 = pRectl2->top + pRectl2->height;
1479 if (pRectl1->top > pRectl2->top)
1480 {
1481 pRectl1->top = pRectl2->top;
1482 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
1483 }
1484 else if (x21 < x22)
1485 pRectl1->height = x22 - pRectl1->top;
1486 }
1487}
1488
1489/*
1490 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
1491 */
1492static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
1493{
1494 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
1495 Assert(cbBlt <= cbBuffer);
1496 if (cbBuffer < cbBlt)
1497 return VERR_INVALID_FUNCTION;
1498
1499 /* we do not support stretching for now */
1500 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
1501 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
1502 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
1503 return VERR_INVALID_FUNCTION;
1504 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
1505 return VERR_INVALID_FUNCTION;
1506 Assert(pBlt->cDstSubRects);
1507
1508 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1509 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
1510
1511 if (pBlt->cDstSubRects)
1512 {
1513 VBOXVDMA_RECTL dstRectl, srcRectl;
1514 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
1515 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
1516 {
1517 pDstRectl = &pBlt->aDstSubRects[i];
1518 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
1519 {
1520 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
1521 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
1522 dstRectl.width = pDstRectl->width;
1523 dstRectl.height = pDstRectl->height;
1524 pDstRectl = &dstRectl;
1525 }
1526
1527 pSrcRectl = &pBlt->aDstSubRects[i];
1528 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
1529 {
1530 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
1531 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
1532 srcRectl.width = pSrcRectl->width;
1533 srcRectl.height = pSrcRectl->height;
1534 pSrcRectl = &srcRectl;
1535 }
1536
1537 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
1538 &pBlt->dstDesc, &pBlt->srcDesc,
1539 pDstRectl,
1540 pSrcRectl);
1541 AssertRC(rc);
1542 if (!RT_SUCCESS(rc))
1543 return rc;
1544
1545 vboxVDMARectlUnite(&updateRectl, pDstRectl);
1546 }
1547 }
1548 else
1549 {
1550 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
1551 &pBlt->dstDesc, &pBlt->srcDesc,
1552 &pBlt->dstRectl,
1553 &pBlt->srcRectl);
1554 AssertRC(rc);
1555 if (!RT_SUCCESS(rc))
1556 return rc;
1557
1558 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
1559 }
1560
1561#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1562 int iView = 0;
1563 /* @todo: fixme: check if update is needed and get iView */
1564 vboxVDMANotifyPrimaryUpdate (pVdma->pVGAState, iView, &updateRectl);
1565#endif
1566
1567 return cbBlt;
1568}
1569
1570static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
1571{
1572 if (cbBuffer < sizeof (*pTransfer))
1573 return VERR_INVALID_PARAMETER;
1574
1575 PVGASTATE pVGAState = pVdma->pVGAState;
1576 uint8_t * pvRam = pVGAState->vram_ptrR3;
1577 PGMPAGEMAPLOCK SrcLock;
1578 PGMPAGEMAPLOCK DstLock;
1579 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1580 const void * pvSrc;
1581 void * pvDst;
1582 int rc = VINF_SUCCESS;
1583 uint32_t cbTransfer = pTransfer->cbTransferSize;
1584 uint32_t cbTransfered = 0;
1585 bool bSrcLocked = false;
1586 bool bDstLocked = false;
1587 do
1588 {
1589 uint32_t cbSubTransfer = cbTransfer;
1590 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
1591 {
1592 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
1593 }
1594 else
1595 {
1596 RTGCPHYS phPage = pTransfer->Src.phBuf;
1597 phPage += cbTransfered;
1598 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
1599 AssertRC(rc);
1600 if (RT_SUCCESS(rc))
1601 {
1602 bSrcLocked = true;
1603 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
1604 }
1605 else
1606 {
1607 break;
1608 }
1609 }
1610
1611 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
1612 {
1613 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
1614 }
1615 else
1616 {
1617 RTGCPHYS phPage = pTransfer->Dst.phBuf;
1618 phPage += cbTransfered;
1619 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
1620 AssertRC(rc);
1621 if (RT_SUCCESS(rc))
1622 {
1623 bDstLocked = true;
1624 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
1625 }
1626 else
1627 {
1628 break;
1629 }
1630 }
1631
1632 if (RT_SUCCESS(rc))
1633 {
1634 memcpy(pvDst, pvSrc, cbSubTransfer);
1635 cbTransfer -= cbSubTransfer;
1636 cbTransfered += cbSubTransfer;
1637 }
1638 else
1639 {
1640 cbTransfer = 0; /* to break */
1641 }
1642
1643 if (bSrcLocked)
1644 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
1645 if (bDstLocked)
1646 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
1647 } while (cbTransfer);
1648
1649 if (RT_SUCCESS(rc))
1650 return sizeof (*pTransfer);
1651 return rc;
1652}
1653
1654static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
1655{
1656 do
1657 {
1658 Assert(pvBuffer);
1659 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
1660
1661 if (!pvBuffer)
1662 return VERR_INVALID_PARAMETER;
1663 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
1664 return VERR_INVALID_PARAMETER;
1665
1666 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
1667 uint32_t cbCmd = 0;
1668 switch (pCmd->enmType)
1669 {
1670 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
1671 {
1672#ifdef VBOXWDDM_TEST_UHGSMI
1673 static int count = 0;
1674 static uint64_t start, end;
1675 if (count==0)
1676 {
1677 start = RTTimeNanoTS();
1678 }
1679 ++count;
1680 if (count==100000)
1681 {
1682 end = RTTimeNanoTS();
1683 float ems = (end-start)/1000000.f;
1684 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
1685 }
1686#endif
1687 /* todo: post the buffer to chromium */
1688 return VINF_SUCCESS;
1689 }
1690 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
1691 {
1692 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
1693 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
1694 Assert(cbBlt >= 0);
1695 Assert((uint32_t)cbBlt <= cbBuffer);
1696 if (cbBlt >= 0)
1697 {
1698 if ((uint32_t)cbBlt == cbBuffer)
1699 return VINF_SUCCESS;
1700 else
1701 {
1702 cbBuffer -= (uint32_t)cbBlt;
1703 pvBuffer -= cbBlt;
1704 }
1705 }
1706 else
1707 return cbBlt; /* error */
1708 break;
1709 }
1710 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
1711 {
1712 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
1713 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
1714 Assert(cbTransfer >= 0);
1715 Assert((uint32_t)cbTransfer <= cbBuffer);
1716 if (cbTransfer >= 0)
1717 {
1718 if ((uint32_t)cbTransfer == cbBuffer)
1719 return VINF_SUCCESS;
1720 else
1721 {
1722 cbBuffer -= (uint32_t)cbTransfer;
1723 pvBuffer -= cbTransfer;
1724 }
1725 }
1726 else
1727 return cbTransfer; /* error */
1728 break;
1729 }
1730 case VBOXVDMACMD_TYPE_DMA_NOP:
1731 return VINF_SUCCESS;
1732 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
1733 return VINF_SUCCESS;
1734 default:
1735 AssertBreakpoint();
1736 return VERR_INVALID_FUNCTION;
1737 }
1738 } while (1);
1739
1740 /* we should not be here */
1741 AssertBreakpoint();
1742 return VERR_INVALID_STATE;
1743}
1744
1745static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
1746{
1747 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
1748 PVGASTATE pVGAState = pVdma->pVGAState;
1749 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
1750 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1751 uint8_t *pCmd;
1752 uint32_t cbCmd;
1753
1754 int rc = VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread);
1755 if (!RT_SUCCESS(rc))
1756 {
1757 WARN(("VBoxVDMAThreadNotifyConstructSucceeded failed %d\n", rc));
1758 return rc;
1759 }
1760
1761 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
1762 {
1763 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
1764 switch (enmType)
1765 {
1766 case VBVAEXHOST_DATA_TYPE_CMD:
1767 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
1768 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
1769 VBVARaiseIrqNoWait(pVGAState, 0);
1770 break;
1771 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
1772 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
1773 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
1774 break;
1775 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
1776 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
1777 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
1778 break;
1779 case VBVAEXHOST_DATA_TYPE_NO_DATA:
1780 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
1781 AssertRC(rc);
1782 break;
1783 default:
1784 WARN(("unexpected type %d\n", enmType));
1785 break;
1786 }
1787 }
1788
1789 return VINF_SUCCESS;
1790}
1791
1792static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1793{
1794 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1795 const uint8_t * pvBuf;
1796 PGMPAGEMAPLOCK Lock;
1797 int rc;
1798 bool bReleaseLocked = false;
1799
1800 do
1801 {
1802 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1803
1804 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1805 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
1806 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1807 {
1808 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1809 pvBuf = pvRam + pCmd->Location.offVramBuf;
1810 }
1811 else
1812 {
1813 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
1814 uint32_t offset = pCmd->Location.phBuf & 0xfff;
1815 Assert(offset + pCmd->cbBuf <= 0x1000);
1816 if (offset + pCmd->cbBuf > 0x1000)
1817 {
1818 /* @todo: more advanced mechanism of command buffer proc is actually needed */
1819 rc = VERR_INVALID_PARAMETER;
1820 break;
1821 }
1822
1823 const void * pvPageBuf;
1824 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
1825 AssertRC(rc);
1826 if (!RT_SUCCESS(rc))
1827 {
1828 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
1829 break;
1830 }
1831
1832 pvBuf = (const uint8_t *)pvPageBuf;
1833 pvBuf += offset;
1834
1835 bReleaseLocked = true;
1836 }
1837
1838 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
1839 AssertRC(rc);
1840
1841 if (bReleaseLocked)
1842 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1843 } while (0);
1844
1845 pCmd->rc = rc;
1846
1847 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1848 AssertRC(rc);
1849}
1850
1851static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
1852{
1853 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1854 pCmd->i32Result = VINF_SUCCESS;
1855 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1856 AssertRC(rc);
1857}
1858
1859#ifdef VBOX_VDMA_WITH_WATCHDOG
1860static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
1861{
1862 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
1863 PVGASTATE pVGAState = pVdma->pVGAState;
1864 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
1865}
1866
1867static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
1868{
1869 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1870 if (cMillis)
1871 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
1872 else
1873 TMTimerStop(pVdma->WatchDogTimer);
1874 return VINF_SUCCESS;
1875}
1876#endif
1877
1878int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
1879{
1880 int rc;
1881#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1882 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(RT_OFFSETOF(VBOXVDMAHOST, CmdPool.aCmds[cPipeElements]));
1883#else
1884 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
1885#endif
1886 Assert(pVdma);
1887 if (pVdma)
1888 {
1889 pVdma->pHgsmi = pVGAState->pHGSMI;
1890 pVdma->pVGAState = pVGAState;
1891
1892#ifdef VBOX_VDMA_WITH_WATCHDOG
1893 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
1894 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
1895 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
1896 AssertRC(rc);
1897#endif
1898 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
1899 if (RT_SUCCESS(rc))
1900 {
1901 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma);
1902 if (RT_SUCCESS(rc))
1903 {
1904 pVGAState->pVdma = pVdma;
1905#ifdef VBOX_WITH_CRHGSMI
1906 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
1907#endif
1908 return VINF_SUCCESS;
1909 }
1910 else
1911 WARN(("VBoxVDMAThreadCreate faile %d\n", rc));
1912
1913 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
1914 }
1915 else
1916 WARN(("VBoxVBVAExHSInit faile %d\n", rc));
1917
1918 RTMemFree(pVdma);
1919 }
1920 else
1921 rc = VERR_OUT_OF_RESOURCES;
1922
1923 return rc;
1924}
1925
1926int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
1927{
1928 VBVAEXHOSTCTL Ctl;
1929 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_RESET;
1930 int rc = vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
1931 if (!RT_SUCCESS(rc))
1932 {
1933 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1934 return rc;
1935 }
1936 return VINF_SUCCESS;
1937}
1938
1939int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
1940{
1941 VBVAEXHOSTCTL Ctl;
1942 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_TERM;
1943 int rc = vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
1944 if (!RT_SUCCESS(rc))
1945 {
1946 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1947 return rc;
1948 }
1949 VBoxVDMAThreadTerm(&pVdma->Thread);
1950 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
1951 RTMemFree(pVdma);
1952 return VINF_SUCCESS;
1953}
1954
1955int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1956{
1957#ifdef VBOX_WITH_CRHGSMI
1958 PVGASTATE pVGAState = pVdma->pVGAState;
1959 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1960 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
1961 Assert(pCmd);
1962 if (pCmd)
1963 {
1964 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1965 AssertRC(rc);
1966 if (RT_SUCCESS(rc))
1967 {
1968 rc = vboxVDMACrCtlGetRc(pCmd);
1969 }
1970 vboxVDMACrCtlRelease(pCmd);
1971 return rc;
1972 }
1973 return VERR_NO_MEMORY;
1974#else
1975 return VINF_SUCCESS;
1976#endif
1977}
1978
1979int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1980{
1981#ifdef VBOX_WITH_CRHGSMI
1982 PVGASTATE pVGAState = pVdma->pVGAState;
1983 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1984 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
1985 Assert(pCmd);
1986 if (pCmd)
1987 {
1988 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1989 AssertRC(rc);
1990 if (RT_SUCCESS(rc))
1991 {
1992 rc = vboxVDMACrCtlGetRc(pCmd);
1993 }
1994 vboxVDMACrCtlRelease(pCmd);
1995 return rc;
1996 }
1997 return VERR_NO_MEMORY;
1998#else
1999 return VINF_SUCCESS;
2000#endif
2001}
2002
2003void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2004{
2005#if 1
2006 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2007
2008 switch (pCmd->enmCtl)
2009 {
2010 case VBOXVDMA_CTL_TYPE_ENABLE:
2011 pCmd->i32Result = VINF_SUCCESS;
2012 break;
2013 case VBOXVDMA_CTL_TYPE_DISABLE:
2014 pCmd->i32Result = VINF_SUCCESS;
2015 break;
2016 case VBOXVDMA_CTL_TYPE_FLUSH:
2017 pCmd->i32Result = VINF_SUCCESS;
2018 break;
2019#ifdef VBOX_VDMA_WITH_WATCHDOG
2020 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2021 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2022 break;
2023#endif
2024 default:
2025 AssertBreakpoint();
2026 pCmd->i32Result = VERR_NOT_SUPPORTED;
2027 }
2028
2029 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2030 AssertRC(rc);
2031#else
2032 /* test asinch completion */
2033 VBOXVDMACMD_SUBMIT_CONTEXT Context;
2034 Context.pVdma = pVdma;
2035 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACTL;
2036 Context.Cmd.u.pCtl = pCmd;
2037
2038 int rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
2039 AssertRC(rc);
2040 if (RT_SUCCESS(rc))
2041 {
2042 Assert(Context.bQueued);
2043 if (Context.bQueued)
2044 {
2045 /* success */
2046 return;
2047 }
2048 rc = VERR_OUT_OF_RESOURCES;
2049 }
2050
2051 /* failure */
2052 Assert(RT_FAILURE(rc));
2053 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2054 pCmd->i32Result = rc;
2055 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
2056 AssertRC(tmpRc);
2057
2058#endif
2059}
2060
2061void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2062{
2063 int rc = VERR_NOT_IMPLEMENTED;
2064
2065#ifdef VBOX_WITH_CRHGSMI
2066 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2067 * this is why we process them specially */
2068 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2069 if (rc == VINF_SUCCESS)
2070 return;
2071
2072 if (RT_FAILURE(rc))
2073 {
2074 pCmd->rc = rc;
2075 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2076 AssertRC(rc);
2077 return;
2078 }
2079#endif
2080
2081#ifndef VBOX_VDMA_WITH_WORKERTHREAD
2082 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2083#else
2084
2085# ifdef DEBUG_misha
2086 Assert(0);
2087# endif
2088
2089 VBOXVDMACMD_SUBMIT_CONTEXT Context;
2090 Context.pVdma = pVdma;
2091 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACMD;
2092 Context.Cmd.u.pDr = pCmd;
2093
2094 rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
2095 AssertRC(rc);
2096 if (RT_SUCCESS(rc))
2097 {
2098 Assert(Context.bQueued);
2099 if (Context.bQueued)
2100 {
2101 /* success */
2102 return;
2103 }
2104 rc = VERR_OUT_OF_RESOURCES;
2105 }
2106 /* failure */
2107 Assert(RT_FAILURE(rc));
2108 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2109 pCmd->rc = rc;
2110 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
2111 AssertRC(tmpRc);
2112#endif
2113}
2114
2115/**/
2116
2117static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2118{
2119 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2120 if (RT_SUCCESS(rc))
2121 {
2122 if (rc == VINF_SUCCESS)
2123 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2124 else
2125 Assert(rc == VINF_ALREADY_INITIALIZED);
2126 }
2127 else
2128 WARN(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2129
2130 return rc;
2131}
2132
2133static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2134{
2135 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2136 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2137 AssertRC(rc);
2138 pGCtl->i32Result = rc;
2139
2140 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2141 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2142 AssertRC(rc);
2143
2144 VBoxVBVAExHCtlFree(pVbva, pCtl);
2145}
2146
2147static int vdmaVBVACtlOpaqueSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2148{
2149 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE);
2150 if (!pHCtl)
2151 {
2152 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2153 return VERR_NO_MEMORY;
2154 }
2155
2156 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2157 pHCtl->u.cmd.cbCmd = cbCmd;
2158 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2159 if (!RT_SUCCESS(rc))
2160 {
2161 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2162 return rc;;
2163 }
2164 return VINF_SUCCESS;
2165}
2166
2167static int vdmaVBVACtlOpaqueGuestSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2168{
2169 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2170 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2171 int rc = vdmaVBVACtlOpaqueSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2172 if (RT_SUCCESS(rc))
2173 return VINF_SUCCESS;
2174
2175 WARN(("vdmaVBVACtlOpaqueSubmit failed %d\n", rc));
2176 pCtl->i32Result = rc;
2177 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2178 AssertRC(rc);
2179 return VINF_SUCCESS;
2180}
2181
2182static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2183{
2184 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2185 if (pVboxCtl->pfnInternal)
2186 ((PFNCRCTLCOMPLETION)pVboxCtl->pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2187 VBoxVBVAExHCtlFree(pVbva, pCtl);
2188}
2189
2190static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2191 PFNCRCTLCOMPLETION pfnCompletion,
2192 void *pvCompletion)
2193{
2194 pCmd->pfnInternal = (void(*)())pfnCompletion;
2195 int rc = vdmaVBVACtlOpaqueSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST_ENABLED, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2196 if (!RT_SUCCESS(rc))
2197 {
2198 if (rc == VERR_INVALID_STATE)
2199 {
2200 pCmd->pfnInternal = NULL;
2201 PVGASTATE pVGAState = pVdma->pVGAState;
2202 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2203 if (!RT_SUCCESS(rc))
2204 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2205
2206 return rc;
2207 }
2208 WARN(("vdmaVBVACtlOpaqueSubmit failed %d\n", rc));
2209 return rc;
2210 }
2211
2212 return VINF_SUCCESS;
2213}
2214
2215static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2216{
2217 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GH_ENABLE_DISABLE);
2218 if (!pHCtl)
2219 {
2220 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2221 return VERR_NO_MEMORY;
2222 }
2223
2224 pHCtl->u.cmd.pu8Cmd = (uint8_t*)&pEnable->Enable;
2225 pHCtl->u.cmd.cbCmd = sizeof (pEnable->Enable);
2226 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
2227 if (!RT_SUCCESS(rc))
2228 {
2229 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2230 return rc;;
2231 }
2232 return VINF_SUCCESS;
2233}
2234
2235static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
2236{
2237 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
2238 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, pEnable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2239 if (RT_SUCCESS(rc))
2240 return VINF_SUCCESS;
2241
2242 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
2243 pEnable->Hdr.i32Result = rc;
2244 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
2245 AssertRC(rc);
2246 return VINF_SUCCESS;
2247}
2248
2249static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2250{
2251 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
2252 pData->rc = rc;
2253 rc = RTSemEventSignal(pData->hEvent);
2254 if (!RT_SUCCESS(rc))
2255 WARN(("RTSemEventSignal failed %d\n", rc));
2256}
2257
2258static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
2259{
2260 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2261 Data.rc = VERR_NOT_IMPLEMENTED;
2262 int rc = RTSemEventCreate(&Data.hEvent);
2263 if (!RT_SUCCESS(rc))
2264 {
2265 WARN(("RTSemEventCreate failed %d\n", rc));
2266 return rc;
2267 }
2268
2269 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
2270 if (RT_SUCCESS(rc))
2271 {
2272 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2273 if (RT_SUCCESS(rc))
2274 {
2275 rc = Data.rc;
2276 if (!RT_SUCCESS(rc))
2277 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2278 }
2279 else
2280 WARN(("RTSemEventWait failed %d\n", rc));
2281 }
2282 else
2283 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
2284
2285 RTSemEventDestroy(Data.hEvent);
2286
2287 return rc;
2288}
2289
2290static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
2291{
2292 VBVAEXHOSTCTL Ctl;
2293 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
2294 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
2295}
2296
2297static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
2298{
2299 VBVAEXHOSTCTL Ctl;
2300 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
2301 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
2302}
2303
2304static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
2305{
2306 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
2307 switch (rc)
2308 {
2309 case VINF_SUCCESS:
2310 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2311 case VINF_ALREADY_INITIALIZED:
2312 case VINF_EOF:
2313 case VERR_INVALID_STATE:
2314 return VINF_SUCCESS;
2315 default:
2316 Assert(!RT_FAILURE(rc));
2317 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
2318 }
2319}
2320
2321
2322int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
2323 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2324 PFNCRCTLCOMPLETION pfnCompletion,
2325 void *pvCompletion)
2326{
2327 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2328 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2329 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
2330}
2331
2332int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2333{
2334 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2335 int rc = VINF_SUCCESS;
2336 switch (pCtl->u32Type)
2337 {
2338 case VBOXCMDVBVACTL_TYPE_3DCTL:
2339 return vdmaVBVACtlOpaqueGuestSubmit(pVdma, pCtl, cbCtl);
2340 case VBOXCMDVBVACTL_TYPE_ENABLE:
2341 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
2342 {
2343 WARN(("incorrect enable size\n"));
2344 rc = VERR_INVALID_PARAMETER;
2345 break;
2346 }
2347 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
2348 default:
2349 WARN(("unsupported type\n"));
2350 rc = VERR_INVALID_PARAMETER;
2351 break;
2352 }
2353
2354 pCtl->i32Result = rc;
2355 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2356 AssertRC(rc);
2357 return VINF_SUCCESS;
2358}
2359
2360int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
2361{
2362 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2363 {
2364 WARN(("vdma VBVA is disabled\n"));
2365 return VERR_INVALID_STATE;
2366 }
2367
2368 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2369}
2370
2371int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
2372{
2373 WARN(("flush\n"));
2374 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2375 {
2376 WARN(("vdma VBVA is disabled\n"));
2377 return VERR_INVALID_STATE;
2378 }
2379 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2380}
2381
2382void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
2383{
2384 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2385 return;
2386 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2387}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette