VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp@ 50928

Last change on this file since 50928 was 50928, checked in by vboxsync, 11 years ago

wddm/DevVga/crOpenGL: new command submission working for 3D

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 41.3 KB
Line 
1/* $Id: VBoxMPVbva.cpp 50928 2014-03-31 14:14:11Z vboxsync $ */
2
3/** @file
4 * VBox WDDM Miniport driver
5 */
6
7/*
8 * Copyright (C) 2011-2012 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#include "VBoxMPWddm.h"
20#include "common/VBoxMPCommon.h"
21
22/*
23 * Public hardware buffer methods.
24 */
25int vboxVbvaEnable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
26{
27 if (VBoxVBVAEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx,
28 pVbva->Vbva.pVBVA, pVbva->srcId))
29 return VINF_SUCCESS;
30
31 WARN(("VBoxVBVAEnable failed!"));
32 return VERR_GENERAL_FAILURE;
33}
34
35int vboxVbvaDisable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
36{
37 VBoxVBVADisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->srcId);
38 return VINF_SUCCESS;
39}
40
41int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
42{
43 memset(pVbva, 0, sizeof(VBOXVBVAINFO));
44
45 KeInitializeSpinLock(&pVbva->Lock);
46
47 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
48 (void**)&pVbva->Vbva.pVBVA,
49 offBuffer,
50 cbBuffer);
51 if (RT_SUCCESS(rc))
52 {
53 Assert(pVbva->Vbva.pVBVA);
54 VBoxVBVASetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer);
55 pVbva->srcId = srcId;
56 }
57 else
58 {
59 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
60 }
61
62
63 return rc;
64}
65
66int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
67{
68 int rc = VINF_SUCCESS;
69 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
70 memset(pVbva, 0, sizeof (VBOXVBVAINFO));
71 return rc;
72}
73
74int vboxVbvaReportDirtyRect (PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSrc, RECT *pRectOrig)
75{
76 VBVACMDHDR hdr;
77
78 RECT rect = *pRectOrig;
79
80// if (rect.left < 0) rect.left = 0;
81// if (rect.top < 0) rect.top = 0;
82// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
83// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
84
85 hdr.x = (int16_t)rect.left;
86 hdr.y = (int16_t)rect.top;
87 hdr.w = (uint16_t)(rect.right - rect.left);
88 hdr.h = (uint16_t)(rect.bottom - rect.top);
89
90 hdr.x += (int16_t)pSrc->VScreenPos.x;
91 hdr.y += (int16_t)pSrc->VScreenPos.y;
92
93 if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
94 return VINF_SUCCESS;
95
96 WARN(("VBoxVBVAWrite failed"));
97 return VERR_GENERAL_FAILURE;
98}
99
100/* command vbva ring buffer */
101
102/* customized VBVA implementation */
103
104/* Forward declarations of internal functions. */
105static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
106 uint32_t cb, uint32_t offset);
107static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
108 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
109 const void *p, uint32_t cb);
110
111DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
112{
113 pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
114}
115
116static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
117{
118 VBoxVideoCmnPortWriteUlong(pHGSMICtx->port, offDr);
119 return VINF_SUCCESS;
120}
121#define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
122
123static VBOXCMDVBVA_CTL * vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
124{
125 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
126 return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
127}
128
129static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
130{
131 VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
132}
133
134static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
135{
136 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
137 if (!pHdr)
138 {
139 WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
140 return VERR_INVALID_PARAMETER;
141 }
142
143 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
144 if (offCmd == HGSMIOFFSET_VOID)
145 {
146 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
147 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
148 return VERR_INVALID_PARAMETER;
149 }
150
151 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
152 if (RT_SUCCESS(rc))
153 {
154 rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
155 if (RT_SUCCESS(rc))
156 {
157 rc = pCtl->i32Result;
158 if (!RT_SUCCESS(rc))
159 WARN(("pCtl->i32Result %d", pCtl->i32Result));
160
161 return rc;
162 }
163 else
164 WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
165 }
166 else
167 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
168
169 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
170
171 return rc;
172}
173
174static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
175{
176 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynch(&pHGSMICtx->heapCtx, pCtl, pfnCompletion, pvCompletion, VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ);
177 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
178 if (offCmd == HGSMIOFFSET_VOID)
179 {
180 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
181 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
182 return VERR_INVALID_PARAMETER;
183 }
184
185 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
186 if (RT_SUCCESS(rc))
187 {
188 VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
189 return rc;
190 }
191 else
192 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
193
194 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
195
196 return rc;
197}
198
199static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
200{
201 VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
202 if (!pCtl)
203 {
204 WARN(("vboxCmdVbvaCtlCreate failed"));
205 return VERR_NO_MEMORY;
206 }
207
208 pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
209 pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
210 memset(&pCtl->Enable, 0, sizeof (pCtl->Enable));
211 pCtl->Enable.u32Flags = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
212 pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
213 pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
214 pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
215
216 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
217 if (RT_SUCCESS(rc))
218 {
219 rc = pCtl->Hdr.i32Result;
220 if (!RT_SUCCESS(rc))
221 WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
222 }
223 else
224 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
225
226 vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
227
228 return rc;
229}
230
231/*
232 * Public hardware buffer methods.
233 */
234RTDECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
235 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
236 VBVABUFFER *pVBVA)
237{
238 int rc = VERR_GENERAL_FAILURE;
239
240 LogFlowFunc(("pVBVA %p\n", pVBVA));
241
242#if 0 /* All callers check this */
243 if (ppdev->bHGSMISupported)
244#endif
245 {
246 LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
247
248 pVBVA->hostFlags.u32HostEvents = 0;
249 pVBVA->hostFlags.u32SupportedOrders = 0;
250 pVBVA->off32Data = 0;
251 pVBVA->off32Free = 0;
252 memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
253 pVBVA->indexRecordFirst = 0;
254 pVBVA->indexRecordFree = 0;
255 pVBVA->cbPartialWriteThreshold = 256;
256 pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
257
258 pCtx->fHwBufferOverflow = false;
259 pCtx->pRecord = NULL;
260 pCtx->pVBVA = pVBVA;
261
262 rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
263 }
264
265 if (!RT_SUCCESS(rc))
266 {
267 WARN(("enable failed %d", rc));
268 VBoxVBVAExDisable(pCtx, pHGSMICtx);
269 }
270
271 return rc;
272}
273
274RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
275 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
276{
277 LogFlowFunc(("\n"));
278
279 pCtx->fHwBufferOverflow = false;
280 pCtx->pRecord = NULL;
281 pCtx->pVBVA = NULL;
282
283 vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
284
285 return;
286}
287
288RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
289 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
290{
291 bool bRc = false;
292
293 // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
294
295 Assert(pCtx->pVBVA);
296 /* we do not use u32HostEvents & VBVA_F_MODE_ENABLED,
297 * VBVA stays enabled once ENABLE call succeeds, until it is disabled with DISABLED call */
298// if ( pCtx->pVBVA
299// && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
300 {
301 uint32_t indexRecordNext;
302
303 Assert(!pCtx->fHwBufferOverflow);
304 Assert(pCtx->pRecord == NULL);
305
306 indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
307
308 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
309 {
310 /* All slots in the records queue are used. */
311 vboxVBVAExFlush (pCtx, pHGSMICtx);
312 }
313
314 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
315 {
316 /* Even after flush there is no place. Fail the request. */
317 LogFunc(("no space in the queue of records!!! first %d, last %d\n",
318 indexRecordNext, pCtx->pVBVA->indexRecordFree));
319 }
320 else
321 {
322 /* Initialize the record. */
323 VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
324
325 pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
326
327 pCtx->pVBVA->indexRecordFree = indexRecordNext;
328
329 // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
330
331 /* Remember which record we are using. */
332 pCtx->pRecord = pRecord;
333
334 bRc = true;
335 }
336 }
337
338 return bRc;
339}
340
341RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
342{
343 VBVARECORD *pRecord;
344
345 // LogFunc(("\n"));
346
347 Assert(pCtx->pVBVA);
348
349 pRecord = pCtx->pRecord;
350 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
351
352 /* Mark the record completed. */
353 pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
354
355 pCtx->fHwBufferOverflow = false;
356 pCtx->pRecord = NULL;
357
358 return;
359}
360
361DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
362{
363 return ( u32First != u32Free
364 && (
365 (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
366 || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
367 )
368 );
369}
370
371DECLINLINE(bool) vboxVBVAExIsEntryInRangeOrEmpty(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
372{
373 return vboxVBVAExIsEntryInRange(u32First, u32Entry, u32Free)
374 || ( u32First == u32Entry
375 && u32Entry == u32Free);
376}
377#ifdef DEBUG
378
379DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
380{
381 VBVABUFFER *pVBVA = pCtx->pVBVA;
382 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
383 {
384 WARN(("invalid record set"));
385 }
386
387 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
388 {
389 WARN(("invalid data set"));
390 }
391}
392#endif
393
394/*
395 * Private operations.
396 */
397static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
398{
399 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
400
401 return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
402}
403
404static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
405{
406 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
407
408 return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
409}
410
411static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
412 uint32_t cb, uint32_t offset)
413{
414 VBVABUFFER *pVBVA = pCtx->pVBVA;
415 uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
416 uint8_t *dst = &pVBVA->au8Data[offset];
417 int32_t i32Diff = cb - u32BytesTillBoundary;
418
419 if (i32Diff <= 0)
420 {
421 /* Chunk will not cross buffer boundary. */
422 memcpy (dst, p, cb);
423 }
424 else
425 {
426 /* Chunk crosses buffer boundary. */
427 memcpy (dst, p, u32BytesTillBoundary);
428 memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
429 }
430
431 return;
432}
433
434static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
435 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
436 const void *p, uint32_t cb)
437{
438 VBVARECORD *pRecord;
439 uint32_t cbHwBufferAvail;
440
441 uint32_t cbWritten = 0;
442
443 VBVABUFFER *pVBVA = pCtx->pVBVA;
444 Assert(pVBVA);
445
446 if (!pVBVA || pCtx->fHwBufferOverflow)
447 {
448 return false;
449 }
450
451 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
452 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
453
454 pRecord = pCtx->pRecord;
455 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
456
457 // LogFunc(("%d\n", cb));
458
459 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
460
461 while (cb > 0)
462 {
463 uint32_t cbChunk = cb;
464
465 // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
466 // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
467
468 if (cbChunk >= cbHwBufferAvail)
469 {
470 LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
471
472 vboxVBVAExFlush(pCtx, pHGSMICtx);
473
474 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
475
476 if (cbChunk >= cbHwBufferAvail)
477 {
478 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
479 cb, cbHwBufferAvail));
480
481 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
482 {
483 WARN(("Buffer overflow!!!\n"));
484 pCtx->fHwBufferOverflow = true;
485 Assert(false);
486 return false;
487 }
488
489 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
490 }
491 }
492
493 Assert(cbChunk <= cb);
494 Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
495
496 vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
497
498 pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
499 pRecord->cbRecord += cbChunk;
500 cbHwBufferAvail -= cbChunk;
501
502 cb -= cbChunk;
503 cbWritten += cbChunk;
504 }
505
506 return true;
507}
508
509/*
510 * Public writer to the hardware buffer.
511 */
512RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
513{
514 VBVABUFFER *pVBVA = pCtx->pVBVA;
515 if (pVBVA->off32Data <= pVBVA->off32Free)
516 return pVBVA->cbData - pVBVA->off32Free;
517 return 0;
518}
519
520RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
521{
522 VBVARECORD *pRecord;
523 uint32_t cbHwBufferContiguousAvail;
524 uint32_t offset;
525
526 VBVABUFFER *pVBVA = pCtx->pVBVA;
527 Assert(pVBVA);
528
529 if (!pVBVA || pCtx->fHwBufferOverflow)
530 {
531 return NULL;
532 }
533
534 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
535 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
536
537 pRecord = pCtx->pRecord;
538 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
539
540 // LogFunc(("%d\n", cb));
541
542 if (pVBVA->cbData < cb)
543 {
544 WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
545 return NULL;
546 }
547
548 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
549
550 if (cbHwBufferContiguousAvail < cb)
551 {
552 if (cb < pVBVA->cbData - pVBVA->off32Free)
553 {
554 /* the entire contiguous part is smaller than the requested buffer */
555 return NULL;
556 }
557
558 vboxVBVAExFlush(pCtx, pHGSMICtx);
559
560 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
561 if (cbHwBufferContiguousAvail < cb)
562 {
563 /* this is really bad - the host did not clean up buffer even after we requested it to flush */
564 WARN(("Host did not clean up the buffer!"));
565 return NULL;
566 }
567 }
568
569 offset = pVBVA->off32Free;
570
571 pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
572 pRecord->cbRecord += cb;
573
574 return &pVBVA->au8Data[offset];
575}
576
577RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
578{
579 uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
580 return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
581}
582
583RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
584{
585 VBVABUFFER *pVBVA = pCtx->pVBVA;
586 uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
587 pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
588 pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
589}
590
591RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
592 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
593 const void *pv, uint32_t cb)
594{
595 return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
596}
597
598RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
599{
600 VBVABUFFER *pVBVA = pCtx->pVBVA;
601
602 if (!pVBVA)
603 {
604 return false;
605 }
606
607 if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
608 {
609 return true;
610 }
611
612 return false;
613}
614
615RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
616 uint32_t offVRAMBuffer,
617 uint32_t cbBuffer,
618 PFNVBVAEXBUFFERFLUSH pfnFlush,
619 void *pvFlush)
620{
621 memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
622 pCtx->offVRAMBuffer = offVRAMBuffer;
623 pCtx->cbBuffer = cbBuffer;
624 pCtx->pfnFlush = pfnFlush;
625 pCtx->pvFlush = pvFlush;
626}
627
628static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
629{
630 uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
631 if (cbRecord == VBVA_F_RECORD_PARTIAL)
632 return NULL;
633 if (pcbBuffer)
634 *pcbBuffer = cbRecord;
635 if (pfProcessed)
636 *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
637 return &pVBVA->au8Data[pIter->off32CurCmd];
638}
639
640DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
641{
642 int32_t result = (int32_t)(x - val);
643 return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
644}
645
646RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
647{
648 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
649 pIter->Base.pCtx = pCtx;
650 uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
651 if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
652 {
653 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
654 * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
655 * and we are in a submitter context now */
656 pIter->Base.iCurRecord = iCurRecord;
657 pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
658 }
659 else
660 {
661 /* no data */
662 pIter->Base.iCurRecord = pVBVA->indexRecordFree;
663 pIter->Base.off32CurCmd = pVBVA->off32Free;
664 }
665}
666
667RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
668{
669 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
670 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
671 uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
672 if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
673 return NULL;
674
675 void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
676 AssertRelease(pvBuffer);
677
678 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
679 * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
680 * and we are in a submitter context now */
681 pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
682 pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
683
684 return pvBuffer;
685}
686
687RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
688{
689 pIter->Base.pCtx = pCtx;
690 pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
691 pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
692}
693
694RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
695{
696 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
697 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
698 uint32_t indexRecordFree = pVBVA->indexRecordFree;
699 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
700 return NULL;
701
702 uint32_t cbBuffer;
703 void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
704 if (!pvData)
705 return NULL;
706
707 pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
708 pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
709
710 if (pcbBuffer)
711 *pcbBuffer = cbBuffer;
712
713 return pvData;
714}
715
716/**/
717
718int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
719{
720 return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
721}
722
723int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
724{
725 VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
726 return VINF_SUCCESS;
727}
728
729int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
730{
731 int rc = VINF_SUCCESS;
732 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
733 memset(pVbva, 0, sizeof (*pVbva));
734 return rc;
735}
736
737static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
738{
739 DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
740 memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
741 switch (enmComplType)
742 {
743 case DXGK_INTERRUPT_DMA_COMPLETED:
744 notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
745 notify.DmaCompleted.SubmissionFenceId = u32FenceId;
746 notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
747 break;
748
749 case DXGK_INTERRUPT_DMA_PREEMPTED:
750 notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
751 notify.DmaPreempted.PreemptionFenceId = u32FenceId;
752 notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
753 notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
754 break;
755
756 case DXGK_INTERRUPT_DMA_FAULTED:
757 Assert(0);
758 notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
759 notify.DmaFaulted.FaultedFenceId = u32FenceId;
760 notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /* @todo: better status ? */
761 notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
762 break;
763
764 default:
765 WARN(("unrecognized completion type %d", enmComplType));
766 break;
767 }
768
769 pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
770}
771
772typedef struct VBOXCMDVBVA_NOTIFYCOMPLETED_CB
773{
774 PVBOXMP_DEVEXT pDevExt;
775 VBOXCMDVBVA *pVbva;
776 volatile UINT *pu32FenceId;
777 DXGK_INTERRUPT_TYPE enmComplType;
778} VBOXCMDVBVA_NOTIFYCOMPLETED_CB, *PVBOXCMDVBVA_NOTIFYCOMPLETED_CB;
779
780static BOOLEAN vboxCmdVbvaDdiNotifyCompleteCb(PVOID pvContext)
781{
782 PVBOXCMDVBVA_NOTIFYCOMPLETED_CB pData = (PVBOXCMDVBVA_NOTIFYCOMPLETED_CB)pvContext;
783 if (*pData->pu32FenceId)
784 {
785 UINT u32FenceId = *pData->pu32FenceId;
786 *pData->pu32FenceId = 0;
787
788 vboxCmdVbvaDdiNotifyCompleteIrq(pData->pDevExt, pData->pVbva, u32FenceId, pData->enmComplType);
789
790 pData->pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pData->pDevExt->u.primary.DxgkInterface.DeviceHandle);
791
792 return TRUE;
793 }
794
795 return FALSE;
796}
797
798static int vboxCmdVbvaDdiNotifyComplete(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, volatile UINT *pu32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
799{
800 VBOXCMDVBVA_NOTIFYCOMPLETED_CB Data;
801 Data.pDevExt = pDevExt;
802 Data.pVbva = pVbva;
803 Data.pu32FenceId = pu32FenceId;
804 Data.enmComplType = enmComplType;
805 BOOLEAN bDummy;
806 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
807 pDevExt->u.primary.DxgkInterface.DeviceHandle,
808 vboxCmdVbvaDdiNotifyCompleteCb,
809 &Data,
810 0, /* IN ULONG MessageNumber */
811 &bDummy);
812 if (!NT_SUCCESS(Status))
813 {
814 WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
815 return VERR_GENERAL_FAILURE;
816 }
817 return Status;
818}
819
820static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
821{
822 /* Issue the flush command. */
823 VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
824 sizeof (VBVACMDVBVAFLUSH),
825 HGSMI_CH_VBVA,
826 VBVA_CMDVBVA_FLUSH);
827 if (!pFlush)
828 {
829 WARN(("VBoxHGSMIBufferAlloc failed\n"));
830 return VERR_OUT_OF_RESOURCES;
831 }
832
833 pFlush->u32Flags = fBufferOverflow ? VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
834
835 VBoxHGSMIBufferSubmit(pCtx, pFlush);
836
837 VBoxHGSMIBufferFree(pCtx, pFlush);
838
839 return VINF_SUCCESS;
840}
841
842typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
843{
844 PVBOXMP_DEVEXT pDevExt;
845 VBOXCMDVBVA *pVbva;
846 uint32_t u32FenceID;
847} VBOXCMDVBVA_CHECK_COMPLETED_CB;
848
849static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
850{
851 VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
852 BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
853 if (pCompleted->pVbva)
854 pCompleted->u32FenceID = pCompleted->pVbva->u32FenceCompleted;
855 return bRc;
856}
857
858
859static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
860{
861 if (fPingHost)
862 vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
863
864 VBOXCMDVBVA_CHECK_COMPLETED_CB context;
865 context.pDevExt = pDevExt;
866 context.pVbva = pVbva;
867 context.u32FenceID = 0;
868 BOOLEAN bRet;
869 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
870 pDevExt->u.primary.DxgkInterface.DeviceHandle,
871 vboxCmdVbvaCheckCompletedIrqCb,
872 &context,
873 0, /* IN ULONG MessageNumber */
874 &bRet);
875 Assert(Status == STATUS_SUCCESS);
876
877 return context.u32FenceID;
878}
879
880DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
881{
882 PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
883
884 vboxCmdVbvaCheckCompleted(pDevExt, NULL, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/);
885}
886
887int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
888{
889 memset(pVbva, 0, sizeof (*pVbva));
890
891 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
892 (void**)&pVbva->Vbva.pVBVA,
893 offBuffer,
894 cbBuffer);
895 if (RT_SUCCESS(rc))
896 {
897 Assert(pVbva->Vbva.pVBVA);
898 VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
899 }
900 else
901 {
902 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
903 }
904
905 return rc;
906}
907
908int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
909{
910 int rc = VINF_SUCCESS;
911
912 Assert(pCmd->u32FenceID);
913
914 pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
915 pVbva->u32FenceSubmitted = pCmd->u32FenceID;
916
917 if (VBoxVBVAExGetSize(&pVbva->Vbva) < cbCmd)
918 {
919 WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
920 return VERR_NOT_SUPPORTED;
921 }
922
923 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
924 {
925 WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
926 return VERR_GENERAL_FAILURE;
927 }
928
929 void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
930 if (!pvBuffer)
931 {
932 WARN(("failed to allocate contiguous buffer, trying nopping the tail"));
933 uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
934 if (!cbTail)
935 {
936 WARN(("this is not a free tail case, cbTail is NULL"));
937 return VERR_BUFFER_OVERFLOW;
938 }
939
940 Assert(cbTail < cbCmd);
941
942 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
943
944 Assert(pvBuffer);
945
946 *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
947
948 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
949
950 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
951 {
952 WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
953 return VERR_GENERAL_FAILURE;
954 }
955
956 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
957 if (!pvBuffer)
958 {
959 WARN(("failed to allocate contiguous buffer, failing"));
960 return VERR_GENERAL_FAILURE;
961 }
962 }
963
964 Assert(pvBuffer);
965
966 memcpy(pvBuffer, pCmd, cbCmd);
967
968 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
969
970 if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
971 {
972 /* Issue the submit command. */
973 HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
974 VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
975 sizeof (VBVACMDVBVASUBMIT),
976 HGSMI_CH_VBVA,
977 VBVA_CMDVBVA_SUBMIT);
978 if (!pSubmit)
979 {
980 WARN(("VBoxHGSMIBufferAlloc failed\n"));
981 return VERR_OUT_OF_RESOURCES;
982 }
983
984 pSubmit->u32Reserved = 0;
985
986 VBoxHGSMIBufferSubmit(pCtx, pSubmit);
987
988 VBoxHGSMIBufferFree(pCtx, pSubmit);
989 }
990
991 return VINF_SUCCESS;
992}
993
994bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
995{
996 VBVAEXBUFFERBACKWARDITER Iter;
997 VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
998
999 uint32_t cbBuffer;
1000 bool fProcessed;
1001 uint8_t* pu8Cmd;
1002
1003 while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
1004 {
1005 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1006 continue;
1007
1008 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1009
1010 if (pCmd->u32FenceID != u32FenceID)
1011 continue;
1012
1013 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED))
1014 {
1015 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
1016 break;
1017 }
1018
1019 /* we have canceled the command successfully */
1020 vboxCmdVbvaDdiNotifyComplete(pDevExt, pVbva, &pCmd->u32FenceID, DXGK_INTERRUPT_DMA_PREEMPTED);
1021 return true;
1022 }
1023
1024 return false;
1025}
1026
1027bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
1028{
1029 VBVAEXBUFFERFORWARDITER Iter;
1030 VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
1031
1032 bool fHasCommandsCompletedPreempted = false;
1033 bool fProcessed;
1034 uint8_t* pu8Cmd;
1035
1036
1037 while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
1038 {
1039 if (!fProcessed)
1040 break;
1041
1042 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1043 continue;
1044
1045 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1046 uint8_t u8State = pCmd->u8State;
1047 uint32_t u32FenceID = pCmd->u32FenceID;
1048
1049 Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
1050 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
1051 Assert(u32FenceID);
1052 VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
1053 DXGK_INTERRUPT_TYPE enmDdiNotify;
1054
1055 if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
1056 {
1057 if (u32FenceID)
1058 pVbva->u32FenceCompleted = u32FenceID;
1059 enmDdiNotify = DXGK_INTERRUPT_DMA_COMPLETED;
1060 }
1061 else
1062 {
1063 Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
1064 enmDdiNotify = DXGK_INTERRUPT_DMA_PREEMPTED;
1065 /* to prevent concurrent notifications from DdiPreemptCommand */
1066 pCmd->u32FenceID = 0;
1067 }
1068
1069 if (u32FenceID)
1070 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, enmDdiNotify);
1071
1072 fHasCommandsCompletedPreempted = true;
1073 }
1074
1075#ifdef DEBUG
1076 vboxHwBufferVerifyCompleted(&pVbva->Vbva);
1077#endif
1078
1079 return fHasCommandsCompletedPreempted;
1080}
1081
1082uint32_t VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost)
1083{
1084 return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */);
1085}
1086
1087#if 0
1088static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
1089{
1090 PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
1091 uint32_t cbEl = sizeof (*pEl);
1092 uint32_t cStoredPages = 1;
1093 PFN_NUMBER next;
1094 pEl->iPage1 = (uint32_t)(cur & 0xfffff);
1095 pEl->iPage2 = (uint32_t)(cur >> 20);
1096 --cPages;
1097 for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
1098 {
1099 next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
1100 if (next != cur+1)
1101 break;
1102 }
1103
1104 Assert(cStoredPages);
1105 pEl->cPagesAfterFirst = cStoredPages - 1;
1106
1107 return cPages;
1108}
1109
1110uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1111{
1112 uint32_t cInitPages = cPages;
1113 uint32_t cbInitBuffer = cbBuffer;
1114 uint32_t cEls = 0;
1115 VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
1116
1117 Assert(cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1118
1119 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1120
1121 for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
1122 {
1123 cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
1124 }
1125
1126 *pcPagesWritten = cInitPages - cPages;
1127 return cbInitBuffer - cbBuffer;
1128}
1129#endif
1130
1131uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1132{
1133 uint32_t cbInitBuffer = cbBuffer;
1134 uint32_t i = 0;
1135 VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers;
1136
1137 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1138
1139 for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers))
1140 {
1141 pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]);
1142 }
1143
1144 *pcPagesWritten = i;
1145 Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i]));
1146 return cbInitBuffer - cbBuffer;
1147}
1148
1149
1150int vboxCmdVbvaConConnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
1151 uint32_t crVersionMajor, uint32_t crVersionMinor,
1152 uint32_t *pu32ClientID)
1153{
1154 VBOXCMDVBVA_CTL_3DCTL_CONNECT *pConnect = (VBOXCMDVBVA_CTL_3DCTL_CONNECT*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CONNECT));
1155 if (!pConnect)
1156 {
1157 WARN(("vboxCmdVbvaCtlCreate failed"));
1158 return VERR_OUT_OF_RESOURCES;
1159 }
1160 pConnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1161 pConnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1162 pConnect->Connect.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CONNECT;
1163 pConnect->Connect.Hdr.u32CmdClientId = 0;
1164 pConnect->Connect.u32MajorVersion = crVersionMajor;
1165 pConnect->Connect.u32MinorVersion = crVersionMinor;
1166 pConnect->Connect.u64Pid = (uint64_t)PsGetCurrentProcessId();
1167
1168 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pConnect->Hdr);
1169 if (RT_SUCCESS(rc))
1170 {
1171 rc = pConnect->Hdr.i32Result;
1172 if (RT_SUCCESS(rc))
1173 {
1174 Assert(pConnect->Connect.Hdr.u32CmdClientId);
1175 *pu32ClientID = pConnect->Connect.Hdr.u32CmdClientId;
1176 }
1177 else
1178 WARN(("VBOXCMDVBVA3DCTL_TYPE_CONNECT Disable failed %d", rc));
1179 }
1180 else
1181 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1182
1183 vboxCmdVbvaCtlFree(pHGSMICtx, &pConnect->Hdr);
1184
1185 return rc;
1186}
1187
1188int vboxCmdVbvaConDisconnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t u32ClientID)
1189{
1190 VBOXCMDVBVA_CTL_3DCTL *pDisconnect = (VBOXCMDVBVA_CTL_3DCTL*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL));
1191 if (!pDisconnect)
1192 {
1193 WARN(("vboxCmdVbvaCtlCreate failed"));
1194 return VERR_OUT_OF_RESOURCES;
1195 }
1196 pDisconnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1197 pDisconnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1198 pDisconnect->Ctl.u32Type = VBOXCMDVBVA3DCTL_TYPE_DISCONNECT;
1199 pDisconnect->Ctl.u32CmdClientId = u32ClientID;
1200
1201 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pDisconnect->Hdr);
1202 if (RT_SUCCESS(rc))
1203 {
1204 rc = pDisconnect->Hdr.i32Result;
1205 if (!RT_SUCCESS(rc))
1206 WARN(("VBOXCMDVBVA3DCTL_TYPE_DISCONNECT Disable failed %d", rc));
1207 }
1208 else
1209 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1210
1211 vboxCmdVbvaCtlFree(pHGSMICtx, &pDisconnect->Hdr);
1212
1213 return rc;
1214}
1215
1216int VBoxCmdVbvaConConnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva,
1217 uint32_t crVersionMajor, uint32_t crVersionMinor,
1218 uint32_t *pu32ClientID)
1219{
1220 return vboxCmdVbvaConConnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, crVersionMajor, crVersionMinor, pu32ClientID);
1221}
1222
1223int VBoxCmdVbvaConDisconnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32ClientID)
1224{
1225 return vboxCmdVbvaConDisconnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, u32ClientID);
1226}
1227
1228VBOXCMDVBVA_CRCMD_CMD* vboxCmdVbvaConCmdAlloc(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCmd)
1229{
1230 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CMD) + cbCmd);
1231 if (!pCmd)
1232 {
1233 WARN(("vboxCmdVbvaCtlCreate failed"));
1234 return NULL;
1235 }
1236 pCmd->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1237 pCmd->Hdr.i32Result = VERR_NOT_SUPPORTED;
1238 pCmd->Cmd.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CMD;
1239 pCmd->Cmd.Hdr.u32CmdClientId = 0;
1240 pCmd->Cmd.Cmd.u8OpCode = VBOXCMDVBVA_OPTYPE_CRCMD;
1241 pCmd->Cmd.Cmd.u8Flags = 0;
1242 pCmd->Cmd.Cmd.u8State = VBOXCMDVBVA_STATE_SUBMITTED;
1243 pCmd->Cmd.Cmd.u.i8Result = -1;
1244 pCmd->Cmd.Cmd.u32FenceID = 0;
1245
1246 return (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1247}
1248
1249void vboxCmdVbvaConCmdFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1250{
1251 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1252 vboxCmdVbvaCtlFree(pHGSMICtx, &pHdr->Hdr);
1253}
1254
1255int vboxCmdVbvaConSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1256{
1257 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1258 return vboxCmdVbvaCtlSubmitAsync(pHGSMICtx, &pHdr->Hdr, pfnCompletion, pvCompletion);
1259}
1260
1261VBOXCMDVBVA_CRCMD_CMD* VBoxCmdVbvaConCmdAlloc(PVBOXMP_DEVEXT pDevExt, uint32_t cbCmd)
1262{
1263 return vboxCmdVbvaConCmdAlloc(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1264}
1265
1266void VBoxCmdVbvaConCmdFree(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1267{
1268 vboxCmdVbvaConCmdFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd);
1269}
1270
1271int VBoxCmdVbvaConCmdSubmitAsync(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1272{
1273 return vboxCmdVbvaConSubmitAsync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd, pfnCompletion, pvCompletion);
1274}
1275
1276int VBoxCmdVbvaConCmdCompletionData(void *pvCmd, VBOXCMDVBVA_CRCMD_CMD **ppCmd)
1277{
1278 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)pvCmd;
1279 if (ppCmd)
1280 *ppCmd = (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1281 return pCmd->Hdr.i32Result;
1282}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette