VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp@ 50831

Last change on this file since 50831 was 50831, checked in by vboxsync, 11 years ago

crOpnGL: new command submission continued

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.5 KB
Line 
1/* $Id: VBoxMPVbva.cpp 50831 2014-03-20 17:40:50Z vboxsync $ */
2
3/** @file
4 * VBox WDDM Miniport driver
5 */
6
7/*
8 * Copyright (C) 2011-2012 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#include "VBoxMPWddm.h"
20#include "common/VBoxMPCommon.h"
21
22/*
23 * Public hardware buffer methods.
24 */
25int vboxVbvaEnable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
26{
27 if (VBoxVBVAEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx,
28 pVbva->Vbva.pVBVA, pVbva->srcId))
29 return VINF_SUCCESS;
30
31 WARN(("VBoxVBVAEnable failed!"));
32 return VERR_GENERAL_FAILURE;
33}
34
35int vboxVbvaDisable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
36{
37 VBoxVBVADisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->srcId);
38 return VINF_SUCCESS;
39}
40
41int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
42{
43 memset(pVbva, 0, sizeof(VBOXVBVAINFO));
44
45 KeInitializeSpinLock(&pVbva->Lock);
46
47 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
48 (void**)&pVbva->Vbva.pVBVA,
49 offBuffer,
50 cbBuffer);
51 if (RT_SUCCESS(rc))
52 {
53 Assert(pVbva->Vbva.pVBVA);
54 VBoxVBVASetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer);
55 pVbva->srcId = srcId;
56 }
57 else
58 {
59 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
60 }
61
62
63 return rc;
64}
65
66int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
67{
68 int rc = VINF_SUCCESS;
69 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
70 memset(pVbva, 0, sizeof (VBOXVBVAINFO));
71 return rc;
72}
73
74int vboxVbvaReportDirtyRect (PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSrc, RECT *pRectOrig)
75{
76 VBVACMDHDR hdr;
77
78 RECT rect = *pRectOrig;
79
80// if (rect.left < 0) rect.left = 0;
81// if (rect.top < 0) rect.top = 0;
82// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
83// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
84
85 hdr.x = (int16_t)rect.left;
86 hdr.y = (int16_t)rect.top;
87 hdr.w = (uint16_t)(rect.right - rect.left);
88 hdr.h = (uint16_t)(rect.bottom - rect.top);
89
90 hdr.x += (int16_t)pSrc->VScreenPos.x;
91 hdr.y += (int16_t)pSrc->VScreenPos.y;
92
93 if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
94 return VINF_SUCCESS;
95
96 WARN(("VBoxVBVAWrite failed"));
97 return VERR_GENERAL_FAILURE;
98}
99
100/* command vbva ring buffer */
101
102/* customized VBVA implementation */
103
104/* Forward declarations of internal functions. */
105static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
106 uint32_t cb, uint32_t offset);
107static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
108 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
109 const void *p, uint32_t cb);
110
111DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
112{
113 pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
114}
115
116static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
117{
118 VBoxVideoCmnPortWriteUlong(pHGSMICtx->port, offDr);
119 return VINF_SUCCESS;
120}
121#define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
122
123static VBOXCMDVBVA_CTL * vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
124{
125 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
126 return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
127}
128
129static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
130{
131 VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
132}
133
134static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
135{
136 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
137 if (!pHdr)
138 {
139 WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
140 return VERR_INVALID_PARAMETER;
141 }
142
143 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
144 if (offCmd == HGSMIOFFSET_VOID)
145 {
146 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
147 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
148 return VERR_INVALID_PARAMETER;
149 }
150
151 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
152 if (RT_SUCCESS(rc))
153 {
154 rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
155 if (RT_SUCCESS(rc))
156 {
157 rc = pCtl->i32Result;
158 if (!RT_SUCCESS(rc))
159 WARN(("pCtl->i32Result %d", pCtl->i32Result));
160
161 return rc;
162 }
163 else
164 WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
165 }
166 else
167 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
168
169 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
170
171 return rc;
172}
173
174static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, PFNVBOXSHGSMICMDCOMPLETION_IRQ pfnCompletionIrq, void *pvCompletionIrq)
175{
176 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynchIrq(&pHGSMICtx->heapCtx, pCtl, pfnCompletionIrq, pvCompletionIrq, VBOXSHGSMI_FLAG_GH_ASYNCH_FORCE);
177 if (!pHdr)
178 {
179 WARN(("VBoxSHGSMICommandPrepAsynchIrq returnd NULL"));
180 return VERR_INVALID_PARAMETER;
181 }
182
183 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
184 if (offCmd == HGSMIOFFSET_VOID)
185 {
186 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
187 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
188 return VERR_INVALID_PARAMETER;
189 }
190
191 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
192 if (RT_SUCCESS(rc))
193 {
194 VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
195 return rc;
196 }
197 else
198 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
199
200 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
201
202 return rc;
203}
204
205static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
206{
207 VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
208 if (!pCtl)
209 {
210 WARN(("vboxCmdVbvaCtlCreate failed"));
211 return VERR_NO_MEMORY;
212 }
213
214 pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
215 pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
216 memset(&pCtl->Enable, 0, sizeof (pCtl->Enable));
217 pCtl->Enable.u32Flags = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
218 pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
219 pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
220 pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
221
222 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
223 if (RT_SUCCESS(rc))
224 {
225 rc = pCtl->Hdr.i32Result;
226 if (!RT_SUCCESS(rc))
227 WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
228 }
229 else
230 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
231
232 vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
233
234 return rc;
235}
236
237/*
238 * Public hardware buffer methods.
239 */
240RTDECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
241 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
242 VBVABUFFER *pVBVA)
243{
244 int rc = VERR_GENERAL_FAILURE;
245
246 LogFlowFunc(("pVBVA %p\n", pVBVA));
247
248#if 0 /* All callers check this */
249 if (ppdev->bHGSMISupported)
250#endif
251 {
252 LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
253
254 pVBVA->hostFlags.u32HostEvents = 0;
255 pVBVA->hostFlags.u32SupportedOrders = 0;
256 pVBVA->off32Data = 0;
257 pVBVA->off32Free = 0;
258 memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
259 pVBVA->indexRecordFirst = 0;
260 pVBVA->indexRecordFree = 0;
261 pVBVA->cbPartialWriteThreshold = 256;
262 pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
263
264 pCtx->fHwBufferOverflow = false;
265 pCtx->pRecord = NULL;
266 pCtx->pVBVA = pVBVA;
267
268 rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
269 }
270
271 if (!RT_SUCCESS(rc))
272 {
273 WARN(("enable failed %d", rc));
274 VBoxVBVAExDisable(pCtx, pHGSMICtx);
275 }
276
277 return rc;
278}
279
280RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
281 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
282{
283 LogFlowFunc(("\n"));
284
285 pCtx->fHwBufferOverflow = false;
286 pCtx->pRecord = NULL;
287 pCtx->pVBVA = NULL;
288
289 vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
290
291 return;
292}
293
294RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
295 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
296{
297 bool bRc = false;
298
299 // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
300
301 if ( pCtx->pVBVA
302 && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
303 {
304 uint32_t indexRecordNext;
305
306 Assert(!pCtx->fHwBufferOverflow);
307 Assert(pCtx->pRecord == NULL);
308
309 indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
310
311 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
312 {
313 /* All slots in the records queue are used. */
314 vboxVBVAExFlush (pCtx, pHGSMICtx);
315 }
316
317 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
318 {
319 /* Even after flush there is no place. Fail the request. */
320 LogFunc(("no space in the queue of records!!! first %d, last %d\n",
321 indexRecordNext, pCtx->pVBVA->indexRecordFree));
322 }
323 else
324 {
325 /* Initialize the record. */
326 VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
327
328 pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
329
330 pCtx->pVBVA->indexRecordFree = indexRecordNext;
331
332 // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
333
334 /* Remember which record we are using. */
335 pCtx->pRecord = pRecord;
336
337 bRc = true;
338 }
339 }
340
341 return bRc;
342}
343
344RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
345{
346 VBVARECORD *pRecord;
347
348 // LogFunc(("\n"));
349
350 Assert(pCtx->pVBVA);
351
352 pRecord = pCtx->pRecord;
353 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
354
355 /* Mark the record completed. */
356 pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
357
358 pCtx->fHwBufferOverflow = false;
359 pCtx->pRecord = NULL;
360
361 return;
362}
363
364DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
365{
366 return ( u32First != u32Free
367 && (
368 (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
369 || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
370 )
371 );
372}
373
374#ifdef DEBUG
375
376DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
377{
378 VBVABUFFER *pVBVA = pCtx->pVBVA;
379 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
380 {
381 WARN(("invalid record set"));
382 }
383
384 if (!vboxVBVAExIsEntryInRange(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
385 {
386 WARN(("invalid data set"));
387 }
388}
389#endif
390
391/*
392 * Private operations.
393 */
394static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
395{
396 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
397
398 return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
399}
400
401static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
402{
403 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
404
405 return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
406}
407
408static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
409 uint32_t cb, uint32_t offset)
410{
411 VBVABUFFER *pVBVA = pCtx->pVBVA;
412 uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
413 uint8_t *dst = &pVBVA->au8Data[offset];
414 int32_t i32Diff = cb - u32BytesTillBoundary;
415
416 if (i32Diff <= 0)
417 {
418 /* Chunk will not cross buffer boundary. */
419 memcpy (dst, p, cb);
420 }
421 else
422 {
423 /* Chunk crosses buffer boundary. */
424 memcpy (dst, p, u32BytesTillBoundary);
425 memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
426 }
427
428 return;
429}
430
431static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
432 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
433 const void *p, uint32_t cb)
434{
435 VBVARECORD *pRecord;
436 uint32_t cbHwBufferAvail;
437
438 uint32_t cbWritten = 0;
439
440 VBVABUFFER *pVBVA = pCtx->pVBVA;
441 Assert(pVBVA);
442
443 if (!pVBVA || pCtx->fHwBufferOverflow)
444 {
445 return false;
446 }
447
448 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
449 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
450
451 pRecord = pCtx->pRecord;
452 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
453
454 // LogFunc(("%d\n", cb));
455
456 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
457
458 while (cb > 0)
459 {
460 uint32_t cbChunk = cb;
461
462 // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
463 // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
464
465 if (cbChunk >= cbHwBufferAvail)
466 {
467 LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
468
469 vboxVBVAExFlush(pCtx, pHGSMICtx);
470
471 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
472
473 if (cbChunk >= cbHwBufferAvail)
474 {
475 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
476 cb, cbHwBufferAvail));
477
478 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
479 {
480 WARN(("Buffer overflow!!!\n"));
481 pCtx->fHwBufferOverflow = true;
482 Assert(false);
483 return false;
484 }
485
486 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
487 }
488 }
489
490 Assert(cbChunk <= cb);
491 Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
492
493 vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
494
495 pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
496 pRecord->cbRecord += cbChunk;
497 cbHwBufferAvail -= cbChunk;
498
499 cb -= cbChunk;
500 cbWritten += cbChunk;
501 }
502
503 return true;
504}
505
506/*
507 * Public writer to the hardware buffer.
508 */
509RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
510{
511 VBVABUFFER *pVBVA = pCtx->pVBVA;
512 if (pVBVA->off32Data <= pVBVA->off32Free)
513 return pVBVA->cbData - pVBVA->off32Free;
514 return 0;
515}
516
517RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
518{
519 VBVARECORD *pRecord;
520 uint32_t cbHwBufferContiguousAvail;
521 uint32_t offset;
522
523 VBVABUFFER *pVBVA = pCtx->pVBVA;
524 Assert(pVBVA);
525
526 if (!pVBVA || pCtx->fHwBufferOverflow)
527 {
528 return NULL;
529 }
530
531 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
532 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
533
534 pRecord = pCtx->pRecord;
535 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
536
537 // LogFunc(("%d\n", cb));
538
539 if (pVBVA->cbData < cb)
540 {
541 WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
542 return NULL;
543 }
544
545 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
546
547 if (cbHwBufferContiguousAvail < cb)
548 {
549 if (cb < pVBVA->cbData - pVBVA->off32Free)
550 {
551 /* the entire contiguous part is smaller than the requested buffer */
552 return NULL;
553 }
554
555 vboxVBVAExFlush(pCtx, pHGSMICtx);
556
557 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
558 if (cbHwBufferContiguousAvail < cb)
559 {
560 /* this is really bad - the host did not clean up buffer even after we requested it to flush */
561 WARN(("Host did not clean up the buffer!"));
562 return NULL;
563 }
564 }
565
566 offset = pVBVA->off32Free;
567
568 pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
569 pRecord->cbRecord += cb;
570
571 return &pVBVA->au8Data[offset];
572}
573
574RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
575{
576 uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
577 return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
578}
579
580RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
581{
582 VBVABUFFER *pVBVA = pCtx->pVBVA;
583 uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
584 pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
585 pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
586#ifdef DEBUG
587 vboxHwBufferVerifyCompleted(pCtx);
588#endif
589}
590
591RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
592 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
593 const void *pv, uint32_t cb)
594{
595 return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
596}
597
598RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
599{
600 VBVABUFFER *pVBVA = pCtx->pVBVA;
601
602 if (!pVBVA)
603 {
604 return false;
605 }
606
607 if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
608 {
609 return true;
610 }
611
612 return false;
613}
614
615RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
616 uint32_t offVRAMBuffer,
617 uint32_t cbBuffer,
618 PFNVBVAEXBUFFERFLUSH pfnFlush,
619 void *pvFlush)
620{
621 memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
622 pCtx->offVRAMBuffer = offVRAMBuffer;
623 pCtx->cbBuffer = cbBuffer;
624 pCtx->pfnFlush = pfnFlush;
625 pCtx->pvFlush = pvFlush;
626}
627
628static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
629{
630 uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
631 if (cbRecord == VBVA_F_RECORD_PARTIAL)
632 return NULL;
633 if (pcbBuffer)
634 *pcbBuffer = cbRecord;
635 if (pfProcessed)
636 *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
637 return &pVBVA->au8Data[pIter->off32CurCmd];
638}
639
640DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
641{
642 int32_t result = (int32_t)(x - val);
643 return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
644}
645
646RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
647{
648 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
649 pIter->Base.pCtx = pCtx;
650 uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
651 if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
652 {
653 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
654 * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
655 * and we are in a submitter context now */
656 pIter->Base.iCurRecord = iCurRecord;
657 pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
658 }
659 else
660 {
661 /* no data */
662 pIter->Base.iCurRecord = pVBVA->indexRecordFree;
663 pIter->Base.off32CurCmd = pVBVA->off32Free;
664 }
665}
666
667RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
668{
669 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
670 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
671 uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
672 if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
673 return NULL;
674
675 void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
676 AssertRelease(pvBuffer);
677
678 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
679 * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
680 * and we are in a submitter context now */
681 pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
682 pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
683
684 return pvBuffer;
685}
686
687RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
688{
689 pIter->Base.pCtx = pCtx;
690 pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
691 pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
692}
693
694RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
695{
696 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
697 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
698 uint32_t indexRecordFree = pVBVA->indexRecordFree;
699 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
700 return NULL;
701
702 uint32_t cbBuffer;
703 void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
704 if (!pvData)
705 return NULL;
706
707 pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
708 pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
709
710 if (pcbBuffer)
711 *pcbBuffer = cbBuffer;
712
713 return pvData;
714}
715
716/**/
717
718int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
719{
720 return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
721}
722
723int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
724{
725 VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
726 return VINF_SUCCESS;
727}
728
729int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
730{
731 int rc = VINF_SUCCESS;
732 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
733 memset(pVbva, 0, sizeof (*pVbva));
734 return rc;
735}
736
737static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
738{
739 DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
740 memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
741 switch (enmComplType)
742 {
743 case DXGK_INTERRUPT_DMA_COMPLETED:
744 notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
745 notify.DmaCompleted.SubmissionFenceId = u32FenceId;
746 notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
747 break;
748
749 case DXGK_INTERRUPT_DMA_PREEMPTED:
750 notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
751 notify.DmaPreempted.PreemptionFenceId = u32FenceId;
752 notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
753 notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
754 break;
755
756 case DXGK_INTERRUPT_DMA_FAULTED:
757 Assert(0);
758 notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
759 notify.DmaFaulted.FaultedFenceId = u32FenceId;
760 notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /* @todo: better status ? */
761 notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
762 break;
763
764 default:
765 WARN(("unrecognized completion type %d", enmComplType));
766 break;
767 }
768
769 pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
770}
771
772typedef struct VBOXCMDVBVA_NOTIFYCOMPLETED_CB
773{
774 PVBOXMP_DEVEXT pDevExt;
775 VBOXCMDVBVA *pVbva;
776 volatile UINT *pu32FenceId;
777 DXGK_INTERRUPT_TYPE enmComplType;
778} VBOXCMDVBVA_NOTIFYCOMPLETED_CB, *PVBOXCMDVBVA_NOTIFYCOMPLETED_CB;
779
780static BOOLEAN vboxCmdVbvaDdiNotifyCompleteCb(PVOID pvContext)
781{
782 PVBOXCMDVBVA_NOTIFYCOMPLETED_CB pData = (PVBOXCMDVBVA_NOTIFYCOMPLETED_CB)pvContext;
783 if (*pData->pu32FenceId)
784 {
785 UINT u32FenceId = *pData->pu32FenceId;
786 *pData->pu32FenceId = 0;
787
788 vboxCmdVbvaDdiNotifyCompleteIrq(pData->pDevExt, pData->pVbva, u32FenceId, pData->enmComplType);
789
790 pData->pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pData->pDevExt->u.primary.DxgkInterface.DeviceHandle);
791
792 return TRUE;
793 }
794
795 return FALSE;
796}
797
798static int vboxCmdVbvaDdiNotifyComplete(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, volatile UINT *pu32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
799{
800 VBOXCMDVBVA_NOTIFYCOMPLETED_CB Data;
801 Data.pDevExt = pDevExt;
802 Data.pVbva = pVbva;
803 Data.pu32FenceId = pu32FenceId;
804 Data.enmComplType = enmComplType;
805 BOOLEAN bDummy;
806 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
807 pDevExt->u.primary.DxgkInterface.DeviceHandle,
808 vboxCmdVbvaDdiNotifyCompleteCb,
809 &Data,
810 0, /* IN ULONG MessageNumber */
811 &bDummy);
812 if (!NT_SUCCESS(Status))
813 {
814 WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
815 return VERR_GENERAL_FAILURE;
816 }
817 return Status;
818}
819
820static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
821{
822 /* Issue the flush command. */
823 VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
824 sizeof (VBVACMDVBVAFLUSH),
825 HGSMI_CH_VBVA,
826 VBVA_CMDVBVA_FLUSH);
827 if (!pFlush)
828 {
829 WARN(("VBoxHGSMIBufferAlloc failed\n"));
830 return VERR_OUT_OF_RESOURCES;
831 }
832
833 pFlush->u32Flags = fBufferOverflow ? VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
834
835 VBoxHGSMIBufferSubmit(pCtx, pFlush);
836
837 VBoxHGSMIBufferFree(pCtx, pFlush);
838
839 return VINF_SUCCESS;
840}
841
842typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
843{
844 PVBOXMP_DEVEXT pDevExt;
845 VBOXCMDVBVA *pVbva;
846 uint32_t u32FenceID;
847} VBOXCMDVBVA_CHECK_COMPLETED_CB;
848
849static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
850{
851 VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
852 BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
853 if (pCompleted->pVbva)
854 pCompleted->u32FenceID = pCompleted->pVbva->u32FenceCompleted;
855 return bRc;
856}
857
858
859static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
860{
861 if (fPingHost)
862 vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
863
864 VBOXCMDVBVA_CHECK_COMPLETED_CB context;
865 context.pDevExt = pDevExt;
866 context.pVbva = pVbva;
867 context.u32FenceID = 0;
868 BOOLEAN bRet;
869 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
870 pDevExt->u.primary.DxgkInterface.DeviceHandle,
871 vboxCmdVbvaCheckCompletedIrqCb,
872 &context,
873 0, /* IN ULONG MessageNumber */
874 &bRet);
875 Assert(Status == STATUS_SUCCESS);
876
877 return context.u32FenceID;
878}
879
880DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
881{
882 PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
883
884 vboxCmdVbvaCheckCompleted(pDevExt, NULL, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/);
885}
886
887int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
888{
889 memset(pVbva, 0, sizeof (*pVbva));
890
891 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
892 (void**)&pVbva->Vbva.pVBVA,
893 offBuffer,
894 cbBuffer);
895 if (RT_SUCCESS(rc))
896 {
897 Assert(pVbva->Vbva.pVBVA);
898 VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
899 }
900 else
901 {
902 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
903 }
904
905 return rc;
906}
907
908int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
909{
910 int rc = VINF_SUCCESS;
911
912 pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
913 pVbva->u32FenceSubmitted = pCmd->u32FenceID;
914
915 if (VBoxVBVAExGetSize(&pVbva->Vbva) > cbCmd)
916 {
917 WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
918 return VERR_NOT_SUPPORTED;
919 }
920
921 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
922 {
923 WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
924 return VERR_GENERAL_FAILURE;
925 }
926
927 void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
928 if (!pvBuffer)
929 {
930 WARN(("failed to allocate contiguous buffer, trying nopping the tail"));
931 uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
932 if (!cbTail)
933 {
934 WARN(("this is not a free tail case, cbTail is NULL"));
935 return VERR_BUFFER_OVERFLOW;
936 }
937
938 Assert(cbTail < cbCmd);
939
940 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
941
942 Assert(pvBuffer);
943
944 *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
945
946 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
947
948 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
949 {
950 WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
951 return VERR_GENERAL_FAILURE;
952 }
953
954 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
955 if (!pvBuffer)
956 {
957 WARN(("failed to allocate contiguous buffer, failing"));
958 return VERR_GENERAL_FAILURE;
959 }
960 }
961
962 Assert(pvBuffer);
963
964 memcpy(pvBuffer, pCmd, cbCmd);
965
966 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
967
968 if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
969 {
970 /* Issue the submit command. */
971 HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
972 VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
973 sizeof (VBVACMDVBVASUBMIT),
974 HGSMI_CH_VBVA,
975 VBVA_CMDVBVA_SUBMIT);
976 if (!pSubmit)
977 {
978 WARN(("VBoxHGSMIBufferAlloc failed\n"));
979 return VERR_OUT_OF_RESOURCES;
980 }
981
982 pSubmit->u32Reserved = 0;
983
984 VBoxHGSMIBufferSubmit(pCtx, pSubmit);
985
986 VBoxHGSMIBufferFree(pCtx, pSubmit);
987 }
988
989 return VINF_SUCCESS;
990}
991
992bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
993{
994 VBVAEXBUFFERBACKWARDITER Iter;
995 VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
996
997 uint32_t cbBuffer;
998 bool fProcessed;
999 uint8_t* pu8Cmd;
1000
1001 while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
1002 {
1003 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1004 continue;
1005
1006 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1007
1008 if (pCmd->u32FenceID != u32FenceID)
1009 continue;
1010
1011 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED))
1012 {
1013 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
1014 break;
1015 }
1016
1017 /* we have canceled the command successfully */
1018 vboxCmdVbvaDdiNotifyComplete(pDevExt, pVbva, &pCmd->u32FenceID, DXGK_INTERRUPT_DMA_PREEMPTED);
1019 return true;
1020 }
1021
1022 return false;
1023}
1024
1025bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
1026{
1027 VBVAEXBUFFERFORWARDITER Iter;
1028 VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
1029
1030 bool fHasCommandsCompletedPreempted = false;
1031 bool fProcessed;
1032 uint8_t* pu8Cmd;
1033
1034
1035 while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
1036 {
1037 if (!fProcessed)
1038 break;
1039
1040 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1041 continue;
1042
1043 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1044 uint8_t u8State = pCmd->u8State;
1045 uint32_t u32FenceID = pCmd->u32FenceID;
1046
1047 Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
1048 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
1049 Assert(u32FenceID);
1050 VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
1051 DXGK_INTERRUPT_TYPE enmDdiNotify;
1052
1053 if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
1054 {
1055 if (u32FenceID)
1056 pVbva->u32FenceCompleted = u32FenceID;
1057 enmDdiNotify = DXGK_INTERRUPT_DMA_COMPLETED;
1058 }
1059 else
1060 {
1061 Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
1062 enmDdiNotify = DXGK_INTERRUPT_DMA_PREEMPTED;
1063 /* to prevent concurrent notifications from DdiPreemptCommand */
1064 pCmd->u32FenceID = 0;
1065 }
1066
1067 if (u32FenceID)
1068 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, enmDdiNotify);
1069
1070 fHasCommandsCompletedPreempted = true;
1071 }
1072
1073 return fHasCommandsCompletedPreempted;
1074}
1075
1076uint32_t VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost)
1077{
1078 return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */);
1079}
1080
1081
1082static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
1083{
1084 PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
1085 uint32_t cbEl = sizeof (*pEl);
1086 uint32_t cStoredPages = 1;
1087 PFN_NUMBER next;
1088 pEl->iPage1 = (uint32_t)(cur & 0xfffff);
1089 pEl->iPage2 = (uint32_t)(cur >> 20);
1090 --cPages;
1091 for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
1092 {
1093 next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
1094 if (next != cur+1)
1095 break;
1096 }
1097
1098 Assert(cStoredPages);
1099 pEl->cPagesAfterFirst = cStoredPages - 1;
1100
1101 return cPages;
1102}
1103
1104uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1105{
1106 uint32_t cInitPages = cPages;
1107 uint32_t cbInitBuffer = cbBuffer;
1108 uint32_t cEls = 0;
1109 VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
1110
1111 if (cbBuffer < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1112 {
1113 WARN(("cbBuffer < sizeof (VBOXCMDVBVA_PAGING_TRANSFER)"));
1114 goto done;
1115 }
1116
1117 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1118
1119 for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
1120 {
1121 cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
1122 }
1123
1124done:
1125 *pcPagesWritten = cInitPages - cPages;
1126 return cbInitBuffer - cbBuffer;
1127}
1128
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette