VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp@ 50913

Last change on this file since 50913 was 50913, checked in by vboxsync, 11 years ago

wddm/graphics: new command submission working for 2D, more testing needed

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 41.3 KB
Line 
1/* $Id: VBoxMPVbva.cpp 50913 2014-03-27 17:56:50Z vboxsync $ */
2
3/** @file
4 * VBox WDDM Miniport driver
5 */
6
7/*
8 * Copyright (C) 2011-2012 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#include "VBoxMPWddm.h"
20#include "common/VBoxMPCommon.h"
21
22/*
23 * Public hardware buffer methods.
24 */
25int vboxVbvaEnable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
26{
27 if (VBoxVBVAEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx,
28 pVbva->Vbva.pVBVA, pVbva->srcId))
29 return VINF_SUCCESS;
30
31 WARN(("VBoxVBVAEnable failed!"));
32 return VERR_GENERAL_FAILURE;
33}
34
35int vboxVbvaDisable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
36{
37 VBoxVBVADisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->srcId);
38 return VINF_SUCCESS;
39}
40
41int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
42{
43 memset(pVbva, 0, sizeof(VBOXVBVAINFO));
44
45 KeInitializeSpinLock(&pVbva->Lock);
46
47 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
48 (void**)&pVbva->Vbva.pVBVA,
49 offBuffer,
50 cbBuffer);
51 if (RT_SUCCESS(rc))
52 {
53 Assert(pVbva->Vbva.pVBVA);
54 VBoxVBVASetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer);
55 pVbva->srcId = srcId;
56 }
57 else
58 {
59 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
60 }
61
62
63 return rc;
64}
65
66int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
67{
68 int rc = VINF_SUCCESS;
69 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
70 memset(pVbva, 0, sizeof (VBOXVBVAINFO));
71 return rc;
72}
73
74int vboxVbvaReportDirtyRect (PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSrc, RECT *pRectOrig)
75{
76 VBVACMDHDR hdr;
77
78 RECT rect = *pRectOrig;
79
80// if (rect.left < 0) rect.left = 0;
81// if (rect.top < 0) rect.top = 0;
82// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
83// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
84
85 hdr.x = (int16_t)rect.left;
86 hdr.y = (int16_t)rect.top;
87 hdr.w = (uint16_t)(rect.right - rect.left);
88 hdr.h = (uint16_t)(rect.bottom - rect.top);
89
90 hdr.x += (int16_t)pSrc->VScreenPos.x;
91 hdr.y += (int16_t)pSrc->VScreenPos.y;
92
93 if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
94 return VINF_SUCCESS;
95
96 WARN(("VBoxVBVAWrite failed"));
97 return VERR_GENERAL_FAILURE;
98}
99
100/* command vbva ring buffer */
101
102/* customized VBVA implementation */
103
104/* Forward declarations of internal functions. */
105static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
106 uint32_t cb, uint32_t offset);
107static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
108 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
109 const void *p, uint32_t cb);
110
111DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
112{
113 pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
114}
115
116static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
117{
118 VBoxVideoCmnPortWriteUlong(pHGSMICtx->port, offDr);
119 return VINF_SUCCESS;
120}
121#define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
122
123static VBOXCMDVBVA_CTL * vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
124{
125 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
126 return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
127}
128
129static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
130{
131 VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
132}
133
134static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
135{
136 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
137 if (!pHdr)
138 {
139 WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
140 return VERR_INVALID_PARAMETER;
141 }
142
143 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
144 if (offCmd == HGSMIOFFSET_VOID)
145 {
146 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
147 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
148 return VERR_INVALID_PARAMETER;
149 }
150
151 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
152 if (RT_SUCCESS(rc))
153 {
154 rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
155 if (RT_SUCCESS(rc))
156 {
157 rc = pCtl->i32Result;
158 if (!RT_SUCCESS(rc))
159 WARN(("pCtl->i32Result %d", pCtl->i32Result));
160
161 return rc;
162 }
163 else
164 WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
165 }
166 else
167 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
168
169 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
170
171 return rc;
172}
173
174static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
175{
176 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynch(&pHGSMICtx->heapCtx, pCtl, pfnCompletion, pvCompletion, VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ);
177 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
178 if (offCmd == HGSMIOFFSET_VOID)
179 {
180 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
181 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
182 return VERR_INVALID_PARAMETER;
183 }
184
185 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
186 if (RT_SUCCESS(rc))
187 {
188 VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
189 return rc;
190 }
191 else
192 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
193
194 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
195
196 return rc;
197}
198
199static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
200{
201 VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
202 if (!pCtl)
203 {
204 WARN(("vboxCmdVbvaCtlCreate failed"));
205 return VERR_NO_MEMORY;
206 }
207
208 pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
209 pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
210 memset(&pCtl->Enable, 0, sizeof (pCtl->Enable));
211 pCtl->Enable.u32Flags = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
212 pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
213 pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
214 pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
215
216 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
217 if (RT_SUCCESS(rc))
218 {
219 rc = pCtl->Hdr.i32Result;
220 if (!RT_SUCCESS(rc))
221 WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
222 }
223 else
224 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
225
226 vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
227
228 return rc;
229}
230
231/*
232 * Public hardware buffer methods.
233 */
234RTDECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
235 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
236 VBVABUFFER *pVBVA)
237{
238 int rc = VERR_GENERAL_FAILURE;
239
240 LogFlowFunc(("pVBVA %p\n", pVBVA));
241
242#if 0 /* All callers check this */
243 if (ppdev->bHGSMISupported)
244#endif
245 {
246 LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
247
248 pVBVA->hostFlags.u32HostEvents = 0;
249 pVBVA->hostFlags.u32SupportedOrders = 0;
250 pVBVA->off32Data = 0;
251 pVBVA->off32Free = 0;
252 memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
253 pVBVA->indexRecordFirst = 0;
254 pVBVA->indexRecordFree = 0;
255 pVBVA->cbPartialWriteThreshold = 256;
256 pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
257
258 pCtx->fHwBufferOverflow = false;
259 pCtx->pRecord = NULL;
260 pCtx->pVBVA = pVBVA;
261
262 rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
263 }
264
265 if (!RT_SUCCESS(rc))
266 {
267 WARN(("enable failed %d", rc));
268 VBoxVBVAExDisable(pCtx, pHGSMICtx);
269 }
270
271 return rc;
272}
273
274RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
275 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
276{
277 LogFlowFunc(("\n"));
278
279 pCtx->fHwBufferOverflow = false;
280 pCtx->pRecord = NULL;
281 pCtx->pVBVA = NULL;
282
283 vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
284
285 return;
286}
287
288RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
289 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
290{
291 bool bRc = false;
292
293 // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
294
295 Assert(pCtx->pVBVA);
296 /* we do not use u32HostEvents & VBVA_F_MODE_ENABLED,
297 * VBVA stays enabled once ENABLE call succeeds, until it is disabled with DISABLED call */
298// if ( pCtx->pVBVA
299// && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
300 {
301 uint32_t indexRecordNext;
302
303 Assert(!pCtx->fHwBufferOverflow);
304 Assert(pCtx->pRecord == NULL);
305
306 indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
307
308 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
309 {
310 /* All slots in the records queue are used. */
311 vboxVBVAExFlush (pCtx, pHGSMICtx);
312 }
313
314 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
315 {
316 /* Even after flush there is no place. Fail the request. */
317 LogFunc(("no space in the queue of records!!! first %d, last %d\n",
318 indexRecordNext, pCtx->pVBVA->indexRecordFree));
319 }
320 else
321 {
322 /* Initialize the record. */
323 VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
324
325 pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
326
327 pCtx->pVBVA->indexRecordFree = indexRecordNext;
328
329 // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
330
331 /* Remember which record we are using. */
332 pCtx->pRecord = pRecord;
333
334 bRc = true;
335 }
336 }
337
338 return bRc;
339}
340
341RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
342{
343 VBVARECORD *pRecord;
344
345 // LogFunc(("\n"));
346
347 Assert(pCtx->pVBVA);
348
349 pRecord = pCtx->pRecord;
350 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
351
352 /* Mark the record completed. */
353 pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
354
355 pCtx->fHwBufferOverflow = false;
356 pCtx->pRecord = NULL;
357
358 return;
359}
360
361DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
362{
363 return ( u32First != u32Free
364 && (
365 (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
366 || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
367 )
368 );
369}
370
371DECLINLINE(bool) vboxVBVAExIsEntryInRangeOrEmpty(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
372{
373 return vboxVBVAExIsEntryInRange(u32First, u32Entry, u32Free)
374 || ( u32First == u32Entry
375 && u32Entry == u32Free);
376}
377#ifdef DEBUG
378
379DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
380{
381 VBVABUFFER *pVBVA = pCtx->pVBVA;
382 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
383 {
384 WARN(("invalid record set"));
385 }
386
387 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
388 {
389 WARN(("invalid data set"));
390 }
391}
392#endif
393
394/*
395 * Private operations.
396 */
397static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
398{
399 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
400
401 return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
402}
403
404static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
405{
406 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
407
408 return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
409}
410
411static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
412 uint32_t cb, uint32_t offset)
413{
414 VBVABUFFER *pVBVA = pCtx->pVBVA;
415 uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
416 uint8_t *dst = &pVBVA->au8Data[offset];
417 int32_t i32Diff = cb - u32BytesTillBoundary;
418
419 if (i32Diff <= 0)
420 {
421 /* Chunk will not cross buffer boundary. */
422 memcpy (dst, p, cb);
423 }
424 else
425 {
426 /* Chunk crosses buffer boundary. */
427 memcpy (dst, p, u32BytesTillBoundary);
428 memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
429 }
430
431 return;
432}
433
434static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
435 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
436 const void *p, uint32_t cb)
437{
438 VBVARECORD *pRecord;
439 uint32_t cbHwBufferAvail;
440
441 uint32_t cbWritten = 0;
442
443 VBVABUFFER *pVBVA = pCtx->pVBVA;
444 Assert(pVBVA);
445
446 if (!pVBVA || pCtx->fHwBufferOverflow)
447 {
448 return false;
449 }
450
451 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
452 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
453
454 pRecord = pCtx->pRecord;
455 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
456
457 // LogFunc(("%d\n", cb));
458
459 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
460
461 while (cb > 0)
462 {
463 uint32_t cbChunk = cb;
464
465 // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
466 // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
467
468 if (cbChunk >= cbHwBufferAvail)
469 {
470 LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
471
472 vboxVBVAExFlush(pCtx, pHGSMICtx);
473
474 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
475
476 if (cbChunk >= cbHwBufferAvail)
477 {
478 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
479 cb, cbHwBufferAvail));
480
481 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
482 {
483 WARN(("Buffer overflow!!!\n"));
484 pCtx->fHwBufferOverflow = true;
485 Assert(false);
486 return false;
487 }
488
489 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
490 }
491 }
492
493 Assert(cbChunk <= cb);
494 Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
495
496 vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
497
498 pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
499 pRecord->cbRecord += cbChunk;
500 cbHwBufferAvail -= cbChunk;
501
502 cb -= cbChunk;
503 cbWritten += cbChunk;
504 }
505
506 return true;
507}
508
509/*
510 * Public writer to the hardware buffer.
511 */
512RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
513{
514 VBVABUFFER *pVBVA = pCtx->pVBVA;
515 if (pVBVA->off32Data <= pVBVA->off32Free)
516 return pVBVA->cbData - pVBVA->off32Free;
517 return 0;
518}
519
520RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
521{
522 VBVARECORD *pRecord;
523 uint32_t cbHwBufferContiguousAvail;
524 uint32_t offset;
525
526 VBVABUFFER *pVBVA = pCtx->pVBVA;
527 Assert(pVBVA);
528
529 if (!pVBVA || pCtx->fHwBufferOverflow)
530 {
531 return NULL;
532 }
533
534 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
535 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
536
537 pRecord = pCtx->pRecord;
538 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
539
540 // LogFunc(("%d\n", cb));
541
542 if (pVBVA->cbData < cb)
543 {
544 WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
545 return NULL;
546 }
547
548 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
549
550 if (cbHwBufferContiguousAvail < cb)
551 {
552 if (cb < pVBVA->cbData - pVBVA->off32Free)
553 {
554 /* the entire contiguous part is smaller than the requested buffer */
555 return NULL;
556 }
557
558 vboxVBVAExFlush(pCtx, pHGSMICtx);
559
560 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
561 if (cbHwBufferContiguousAvail < cb)
562 {
563 /* this is really bad - the host did not clean up buffer even after we requested it to flush */
564 WARN(("Host did not clean up the buffer!"));
565 return NULL;
566 }
567 }
568
569 offset = pVBVA->off32Free;
570
571 pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
572 pRecord->cbRecord += cb;
573
574 return &pVBVA->au8Data[offset];
575}
576
577RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
578{
579 uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
580 return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
581}
582
583RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
584{
585 VBVABUFFER *pVBVA = pCtx->pVBVA;
586 uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
587 pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
588 pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
589#ifdef DEBUG
590 vboxHwBufferVerifyCompleted(pCtx);
591#endif
592}
593
594RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
595 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
596 const void *pv, uint32_t cb)
597{
598 return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
599}
600
601RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
602{
603 VBVABUFFER *pVBVA = pCtx->pVBVA;
604
605 if (!pVBVA)
606 {
607 return false;
608 }
609
610 if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
611 {
612 return true;
613 }
614
615 return false;
616}
617
618RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
619 uint32_t offVRAMBuffer,
620 uint32_t cbBuffer,
621 PFNVBVAEXBUFFERFLUSH pfnFlush,
622 void *pvFlush)
623{
624 memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
625 pCtx->offVRAMBuffer = offVRAMBuffer;
626 pCtx->cbBuffer = cbBuffer;
627 pCtx->pfnFlush = pfnFlush;
628 pCtx->pvFlush = pvFlush;
629}
630
631static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
632{
633 uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
634 if (cbRecord == VBVA_F_RECORD_PARTIAL)
635 return NULL;
636 if (pcbBuffer)
637 *pcbBuffer = cbRecord;
638 if (pfProcessed)
639 *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
640 return &pVBVA->au8Data[pIter->off32CurCmd];
641}
642
643DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
644{
645 int32_t result = (int32_t)(x - val);
646 return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
647}
648
649RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
650{
651 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
652 pIter->Base.pCtx = pCtx;
653 uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
654 if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
655 {
656 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
657 * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
658 * and we are in a submitter context now */
659 pIter->Base.iCurRecord = iCurRecord;
660 pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
661 }
662 else
663 {
664 /* no data */
665 pIter->Base.iCurRecord = pVBVA->indexRecordFree;
666 pIter->Base.off32CurCmd = pVBVA->off32Free;
667 }
668}
669
670RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
671{
672 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
673 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
674 uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
675 if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
676 return NULL;
677
678 void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
679 AssertRelease(pvBuffer);
680
681 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
682 * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
683 * and we are in a submitter context now */
684 pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
685 pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
686
687 return pvBuffer;
688}
689
690RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
691{
692 pIter->Base.pCtx = pCtx;
693 pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
694 pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
695}
696
697RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
698{
699 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
700 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
701 uint32_t indexRecordFree = pVBVA->indexRecordFree;
702 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
703 return NULL;
704
705 uint32_t cbBuffer;
706 void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
707 if (!pvData)
708 return NULL;
709
710 pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
711 pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
712
713 if (pcbBuffer)
714 *pcbBuffer = cbBuffer;
715
716 return pvData;
717}
718
719/**/
720
721int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
722{
723 return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
724}
725
726int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
727{
728 VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
729 return VINF_SUCCESS;
730}
731
732int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
733{
734 int rc = VINF_SUCCESS;
735 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
736 memset(pVbva, 0, sizeof (*pVbva));
737 return rc;
738}
739
740static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
741{
742 DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
743 memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
744 switch (enmComplType)
745 {
746 case DXGK_INTERRUPT_DMA_COMPLETED:
747 notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
748 notify.DmaCompleted.SubmissionFenceId = u32FenceId;
749 notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
750 break;
751
752 case DXGK_INTERRUPT_DMA_PREEMPTED:
753 notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
754 notify.DmaPreempted.PreemptionFenceId = u32FenceId;
755 notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
756 notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
757 break;
758
759 case DXGK_INTERRUPT_DMA_FAULTED:
760 Assert(0);
761 notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
762 notify.DmaFaulted.FaultedFenceId = u32FenceId;
763 notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /* @todo: better status ? */
764 notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
765 break;
766
767 default:
768 WARN(("unrecognized completion type %d", enmComplType));
769 break;
770 }
771
772 pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
773}
774
775typedef struct VBOXCMDVBVA_NOTIFYCOMPLETED_CB
776{
777 PVBOXMP_DEVEXT pDevExt;
778 VBOXCMDVBVA *pVbva;
779 volatile UINT *pu32FenceId;
780 DXGK_INTERRUPT_TYPE enmComplType;
781} VBOXCMDVBVA_NOTIFYCOMPLETED_CB, *PVBOXCMDVBVA_NOTIFYCOMPLETED_CB;
782
783static BOOLEAN vboxCmdVbvaDdiNotifyCompleteCb(PVOID pvContext)
784{
785 PVBOXCMDVBVA_NOTIFYCOMPLETED_CB pData = (PVBOXCMDVBVA_NOTIFYCOMPLETED_CB)pvContext;
786 if (*pData->pu32FenceId)
787 {
788 UINT u32FenceId = *pData->pu32FenceId;
789 *pData->pu32FenceId = 0;
790
791 vboxCmdVbvaDdiNotifyCompleteIrq(pData->pDevExt, pData->pVbva, u32FenceId, pData->enmComplType);
792
793 pData->pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pData->pDevExt->u.primary.DxgkInterface.DeviceHandle);
794
795 return TRUE;
796 }
797
798 return FALSE;
799}
800
801static int vboxCmdVbvaDdiNotifyComplete(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, volatile UINT *pu32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
802{
803 VBOXCMDVBVA_NOTIFYCOMPLETED_CB Data;
804 Data.pDevExt = pDevExt;
805 Data.pVbva = pVbva;
806 Data.pu32FenceId = pu32FenceId;
807 Data.enmComplType = enmComplType;
808 BOOLEAN bDummy;
809 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
810 pDevExt->u.primary.DxgkInterface.DeviceHandle,
811 vboxCmdVbvaDdiNotifyCompleteCb,
812 &Data,
813 0, /* IN ULONG MessageNumber */
814 &bDummy);
815 if (!NT_SUCCESS(Status))
816 {
817 WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
818 return VERR_GENERAL_FAILURE;
819 }
820 return Status;
821}
822
823static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
824{
825 /* Issue the flush command. */
826 VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
827 sizeof (VBVACMDVBVAFLUSH),
828 HGSMI_CH_VBVA,
829 VBVA_CMDVBVA_FLUSH);
830 if (!pFlush)
831 {
832 WARN(("VBoxHGSMIBufferAlloc failed\n"));
833 return VERR_OUT_OF_RESOURCES;
834 }
835
836 pFlush->u32Flags = fBufferOverflow ? VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
837
838 VBoxHGSMIBufferSubmit(pCtx, pFlush);
839
840 VBoxHGSMIBufferFree(pCtx, pFlush);
841
842 return VINF_SUCCESS;
843}
844
845typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
846{
847 PVBOXMP_DEVEXT pDevExt;
848 VBOXCMDVBVA *pVbva;
849 uint32_t u32FenceID;
850} VBOXCMDVBVA_CHECK_COMPLETED_CB;
851
852static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
853{
854 VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
855 BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
856 if (pCompleted->pVbva)
857 pCompleted->u32FenceID = pCompleted->pVbva->u32FenceCompleted;
858 return bRc;
859}
860
861
862static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
863{
864 if (fPingHost)
865 vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
866
867 VBOXCMDVBVA_CHECK_COMPLETED_CB context;
868 context.pDevExt = pDevExt;
869 context.pVbva = pVbva;
870 context.u32FenceID = 0;
871 BOOLEAN bRet;
872 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
873 pDevExt->u.primary.DxgkInterface.DeviceHandle,
874 vboxCmdVbvaCheckCompletedIrqCb,
875 &context,
876 0, /* IN ULONG MessageNumber */
877 &bRet);
878 Assert(Status == STATUS_SUCCESS);
879
880 return context.u32FenceID;
881}
882
883DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
884{
885 PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
886
887 vboxCmdVbvaCheckCompleted(pDevExt, NULL, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/);
888}
889
890int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
891{
892 memset(pVbva, 0, sizeof (*pVbva));
893
894 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
895 (void**)&pVbva->Vbva.pVBVA,
896 offBuffer,
897 cbBuffer);
898 if (RT_SUCCESS(rc))
899 {
900 Assert(pVbva->Vbva.pVBVA);
901 VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
902 }
903 else
904 {
905 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
906 }
907
908 return rc;
909}
910
911int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
912{
913 int rc = VINF_SUCCESS;
914
915 Assert(pCmd->u32FenceID);
916
917 pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
918 pVbva->u32FenceSubmitted = pCmd->u32FenceID;
919
920 if (VBoxVBVAExGetSize(&pVbva->Vbva) < cbCmd)
921 {
922 WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
923 return VERR_NOT_SUPPORTED;
924 }
925
926 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
927 {
928 WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
929 return VERR_GENERAL_FAILURE;
930 }
931
932 void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
933 if (!pvBuffer)
934 {
935 WARN(("failed to allocate contiguous buffer, trying nopping the tail"));
936 uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
937 if (!cbTail)
938 {
939 WARN(("this is not a free tail case, cbTail is NULL"));
940 return VERR_BUFFER_OVERFLOW;
941 }
942
943 Assert(cbTail < cbCmd);
944
945 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
946
947 Assert(pvBuffer);
948
949 *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
950
951 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
952
953 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
954 {
955 WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
956 return VERR_GENERAL_FAILURE;
957 }
958
959 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
960 if (!pvBuffer)
961 {
962 WARN(("failed to allocate contiguous buffer, failing"));
963 return VERR_GENERAL_FAILURE;
964 }
965 }
966
967 Assert(pvBuffer);
968
969 memcpy(pvBuffer, pCmd, cbCmd);
970
971 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
972
973 if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
974 {
975 /* Issue the submit command. */
976 HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
977 VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
978 sizeof (VBVACMDVBVASUBMIT),
979 HGSMI_CH_VBVA,
980 VBVA_CMDVBVA_SUBMIT);
981 if (!pSubmit)
982 {
983 WARN(("VBoxHGSMIBufferAlloc failed\n"));
984 return VERR_OUT_OF_RESOURCES;
985 }
986
987 pSubmit->u32Reserved = 0;
988
989 VBoxHGSMIBufferSubmit(pCtx, pSubmit);
990
991 VBoxHGSMIBufferFree(pCtx, pSubmit);
992 }
993
994 return VINF_SUCCESS;
995}
996
997bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
998{
999 VBVAEXBUFFERBACKWARDITER Iter;
1000 VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
1001
1002 uint32_t cbBuffer;
1003 bool fProcessed;
1004 uint8_t* pu8Cmd;
1005
1006 while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
1007 {
1008 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1009 continue;
1010
1011 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1012
1013 if (pCmd->u32FenceID != u32FenceID)
1014 continue;
1015
1016 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED))
1017 {
1018 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
1019 break;
1020 }
1021
1022 /* we have canceled the command successfully */
1023 vboxCmdVbvaDdiNotifyComplete(pDevExt, pVbva, &pCmd->u32FenceID, DXGK_INTERRUPT_DMA_PREEMPTED);
1024 return true;
1025 }
1026
1027 return false;
1028}
1029
1030bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
1031{
1032 VBVAEXBUFFERFORWARDITER Iter;
1033 VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
1034
1035 bool fHasCommandsCompletedPreempted = false;
1036 bool fProcessed;
1037 uint8_t* pu8Cmd;
1038
1039
1040 while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
1041 {
1042 if (!fProcessed)
1043 break;
1044
1045 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1046 continue;
1047
1048 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1049 uint8_t u8State = pCmd->u8State;
1050 uint32_t u32FenceID = pCmd->u32FenceID;
1051
1052 Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
1053 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
1054 Assert(u32FenceID);
1055 VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
1056 DXGK_INTERRUPT_TYPE enmDdiNotify;
1057
1058 if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
1059 {
1060 if (u32FenceID)
1061 pVbva->u32FenceCompleted = u32FenceID;
1062 enmDdiNotify = DXGK_INTERRUPT_DMA_COMPLETED;
1063 }
1064 else
1065 {
1066 Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
1067 enmDdiNotify = DXGK_INTERRUPT_DMA_PREEMPTED;
1068 /* to prevent concurrent notifications from DdiPreemptCommand */
1069 pCmd->u32FenceID = 0;
1070 }
1071
1072 if (u32FenceID)
1073 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, enmDdiNotify);
1074
1075 fHasCommandsCompletedPreempted = true;
1076 }
1077
1078 return fHasCommandsCompletedPreempted;
1079}
1080
1081uint32_t VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost)
1082{
1083 return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */);
1084}
1085
1086#if 0
1087static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
1088{
1089 PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
1090 uint32_t cbEl = sizeof (*pEl);
1091 uint32_t cStoredPages = 1;
1092 PFN_NUMBER next;
1093 pEl->iPage1 = (uint32_t)(cur & 0xfffff);
1094 pEl->iPage2 = (uint32_t)(cur >> 20);
1095 --cPages;
1096 for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
1097 {
1098 next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
1099 if (next != cur+1)
1100 break;
1101 }
1102
1103 Assert(cStoredPages);
1104 pEl->cPagesAfterFirst = cStoredPages - 1;
1105
1106 return cPages;
1107}
1108
1109uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1110{
1111 uint32_t cInitPages = cPages;
1112 uint32_t cbInitBuffer = cbBuffer;
1113 uint32_t cEls = 0;
1114 VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
1115
1116 Assert(cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1117
1118 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1119
1120 for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
1121 {
1122 cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
1123 }
1124
1125 *pcPagesWritten = cInitPages - cPages;
1126 return cbInitBuffer - cbBuffer;
1127}
1128#endif
1129
1130uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1131{
1132 uint32_t cbInitBuffer = cbBuffer;
1133 uint32_t i = 0;
1134 VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->aPageNumbers;
1135
1136 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aPageNumbers);
1137
1138 for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers))
1139 {
1140 pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]);
1141 }
1142
1143 *pcPagesWritten = i;
1144 Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aPageNumbers[i]));
1145 return cbInitBuffer - cbBuffer;
1146}
1147
1148
1149int vboxCmdVbvaConConnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
1150 uint32_t crVersionMajor, uint32_t crVersionMinor,
1151 uint32_t *pu32ClientID)
1152{
1153 VBOXCMDVBVA_CTL_3DCTL_CONNECT *pConnect = (VBOXCMDVBVA_CTL_3DCTL_CONNECT*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CONNECT));
1154 if (!pConnect)
1155 {
1156 WARN(("vboxCmdVbvaCtlCreate failed"));
1157 return VERR_OUT_OF_RESOURCES;
1158 }
1159 pConnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1160 pConnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1161 pConnect->Connect.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CONNECT;
1162 pConnect->Connect.Hdr.u32CmdClientId = 0;
1163 pConnect->Connect.u32MajorVersion = crVersionMajor;
1164 pConnect->Connect.u32MinorVersion = crVersionMinor;
1165 pConnect->Connect.u64Pid = (uint64_t)PsGetCurrentProcessId();
1166
1167 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pConnect->Hdr);
1168 if (RT_SUCCESS(rc))
1169 {
1170 rc = pConnect->Hdr.i32Result;
1171 if (RT_SUCCESS(rc))
1172 {
1173 Assert(pConnect->Connect.Hdr.u32CmdClientId);
1174 *pu32ClientID = pConnect->Connect.Hdr.u32CmdClientId;
1175 }
1176 else
1177 WARN(("VBOXCMDVBVA3DCTL_TYPE_CONNECT Disable failed %d", rc));
1178 }
1179 else
1180 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1181
1182 vboxCmdVbvaCtlFree(pHGSMICtx, &pConnect->Hdr);
1183
1184 return rc;
1185}
1186
1187int vboxCmdVbvaConDisconnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t u32ClientID)
1188{
1189 VBOXCMDVBVA_CTL_3DCTL *pDisconnect = (VBOXCMDVBVA_CTL_3DCTL*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL));
1190 if (!pDisconnect)
1191 {
1192 WARN(("vboxCmdVbvaCtlCreate failed"));
1193 return VERR_OUT_OF_RESOURCES;
1194 }
1195 pDisconnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1196 pDisconnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1197 pDisconnect->Ctl.u32Type = VBOXCMDVBVA3DCTL_TYPE_DISCONNECT;
1198 pDisconnect->Ctl.u32CmdClientId = u32ClientID;
1199
1200 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pDisconnect->Hdr);
1201 if (RT_SUCCESS(rc))
1202 {
1203 rc = pDisconnect->Hdr.i32Result;
1204 if (!RT_SUCCESS(rc))
1205 WARN(("VBOXCMDVBVA3DCTL_TYPE_DISCONNECT Disable failed %d", rc));
1206 }
1207 else
1208 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1209
1210 vboxCmdVbvaCtlFree(pHGSMICtx, &pDisconnect->Hdr);
1211
1212 return rc;
1213}
1214
1215int VBoxCmdVbvaConConnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva,
1216 uint32_t crVersionMajor, uint32_t crVersionMinor,
1217 uint32_t *pu32ClientID)
1218{
1219 return vboxCmdVbvaConConnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, crVersionMajor, crVersionMinor, pu32ClientID);
1220}
1221
1222int VBoxCmdVbvaConDisconnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32ClientID)
1223{
1224 return vboxCmdVbvaConDisconnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, u32ClientID);
1225}
1226
1227VBOXCMDVBVA_CRCMD_CMD* vboxCmdVbvaConCmdAlloc(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCmd)
1228{
1229 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CMD) + cbCmd);
1230 if (!pCmd)
1231 {
1232 WARN(("vboxCmdVbvaCtlCreate failed"));
1233 return NULL;
1234 }
1235 pCmd->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1236 pCmd->Hdr.i32Result = VERR_NOT_SUPPORTED;
1237 pCmd->Cmd.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CMD;
1238 pCmd->Cmd.Hdr.u32CmdClientId = 0;
1239 pCmd->Cmd.Cmd.u8OpCode = VBOXCMDVBVA_OPTYPE_CRCMD;
1240 pCmd->Cmd.Cmd.u8Flags = 0;
1241 pCmd->Cmd.Cmd.u8State = VBOXCMDVBVA_STATE_SUBMITTED;
1242 pCmd->Cmd.Cmd.u.i8Result = -1;
1243 pCmd->Cmd.Cmd.u32FenceID = 0;
1244
1245 return (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1246}
1247
1248void vboxCmdVbvaConCmdFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1249{
1250 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1251 vboxCmdVbvaCtlFree(pHGSMICtx, &pHdr->Hdr);
1252}
1253
1254int vboxCmdVbvaConSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1255{
1256 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1257 return vboxCmdVbvaCtlSubmitAsync(pHGSMICtx, &pHdr->Hdr, pfnCompletion, pvCompletion);
1258}
1259
1260VBOXCMDVBVA_CRCMD_CMD* VBoxCmdVbvaConCmdAlloc(PVBOXMP_DEVEXT pDevExt, uint32_t cbCmd)
1261{
1262 return vboxCmdVbvaConCmdAlloc(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1263}
1264
1265void VBoxCmdVbvaConCmdFree(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1266{
1267 vboxCmdVbvaConCmdFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd);
1268}
1269
1270int VBoxCmdVbvaConCmdSubmitAsync(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1271{
1272 return vboxCmdVbvaConSubmitAsync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd, pfnCompletion, pvCompletion);
1273}
1274
1275int VBoxCmdVbvaConCmdCompletionData(void *pvCmd, VBOXCMDVBVA_CRCMD_CMD **ppCmd)
1276{
1277 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)pvCmd;
1278 if (ppCmd)
1279 *ppCmd = (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1280 return pCmd->Hdr.i32Result;
1281}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette