VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp@ 53784

Last change on this file since 53784 was 52808, checked in by vboxsync, 10 years ago

wddm: typo fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.6 KB
Line 
1/* $Id: VBoxMPVbva.cpp 52808 2014-09-22 08:44:09Z vboxsync $ */
2
3/** @file
4 * VBox WDDM Miniport driver
5 */
6
7/*
8 * Copyright (C) 2011-2012 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#include "VBoxMPWddm.h"
20#include "common/VBoxMPCommon.h"
21
22/*
23 * Public hardware buffer methods.
24 */
25int vboxVbvaEnable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
26{
27 if (VBoxVBVAEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx,
28 pVbva->Vbva.pVBVA, pVbva->srcId))
29 return VINF_SUCCESS;
30
31 WARN(("VBoxVBVAEnable failed!"));
32 return VERR_GENERAL_FAILURE;
33}
34
35int vboxVbvaDisable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
36{
37 VBoxVBVADisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->srcId);
38 return VINF_SUCCESS;
39}
40
41int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
42{
43 memset(pVbva, 0, sizeof(VBOXVBVAINFO));
44
45 KeInitializeSpinLock(&pVbva->Lock);
46
47 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
48 (void**)&pVbva->Vbva.pVBVA,
49 offBuffer,
50 cbBuffer);
51 if (RT_SUCCESS(rc))
52 {
53 Assert(pVbva->Vbva.pVBVA);
54 VBoxVBVASetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer);
55 pVbva->srcId = srcId;
56 }
57 else
58 {
59 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
60 }
61
62
63 return rc;
64}
65
66int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
67{
68 int rc = VINF_SUCCESS;
69 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
70 memset(pVbva, 0, sizeof (VBOXVBVAINFO));
71 return rc;
72}
73
74int vboxVbvaReportDirtyRect (PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSrc, RECT *pRectOrig)
75{
76 VBVACMDHDR hdr;
77
78 RECT rect = *pRectOrig;
79
80// if (rect.left < 0) rect.left = 0;
81// if (rect.top < 0) rect.top = 0;
82// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
83// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
84
85 hdr.x = (int16_t)rect.left;
86 hdr.y = (int16_t)rect.top;
87 hdr.w = (uint16_t)(rect.right - rect.left);
88 hdr.h = (uint16_t)(rect.bottom - rect.top);
89
90 hdr.x += (int16_t)pSrc->VScreenPos.x;
91 hdr.y += (int16_t)pSrc->VScreenPos.y;
92
93 if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
94 return VINF_SUCCESS;
95
96 WARN(("VBoxVBVAWrite failed"));
97 return VERR_GENERAL_FAILURE;
98}
99
100#ifdef VBOX_WITH_CROGL
101/* command vbva ring buffer */
102
103/* customized VBVA implementation */
104
105/* Forward declarations of internal functions. */
106static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
107 uint32_t cb, uint32_t offset);
108static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
109 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
110 const void *p, uint32_t cb);
111
112DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
113{
114 pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
115}
116
117static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
118{
119 VBoxVideoCmnPortWriteUlong(pHGSMICtx->port, offDr);
120 return VINF_SUCCESS;
121}
122#define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
123
124static VBOXCMDVBVA_CTL * vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
125{
126 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
127 return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
128}
129
130static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
131{
132 VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
133}
134
135static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
136{
137 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
138 if (!pHdr)
139 {
140 WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
141 return VERR_INVALID_PARAMETER;
142 }
143
144 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
145 if (offCmd == HGSMIOFFSET_VOID)
146 {
147 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
148 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
149 return VERR_INVALID_PARAMETER;
150 }
151
152 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
153 if (RT_SUCCESS(rc))
154 {
155 rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
156 if (RT_SUCCESS(rc))
157 {
158 rc = pCtl->i32Result;
159 if (!RT_SUCCESS(rc))
160 WARN(("pCtl->i32Result %d", pCtl->i32Result));
161
162 return rc;
163 }
164 else
165 WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
166 }
167 else
168 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
169
170 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
171
172 return rc;
173}
174
175static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
176{
177 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynch(&pHGSMICtx->heapCtx, pCtl, pfnCompletion, pvCompletion, VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ);
178 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
179 if (offCmd == HGSMIOFFSET_VOID)
180 {
181 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
182 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
183 return VERR_INVALID_PARAMETER;
184 }
185
186 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
187 if (RT_SUCCESS(rc))
188 {
189 VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
190 return rc;
191 }
192 else
193 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
194
195 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
196
197 return rc;
198}
199
200static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
201{
202 VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
203 if (!pCtl)
204 {
205 WARN(("vboxCmdVbvaCtlCreate failed"));
206 return VERR_NO_MEMORY;
207 }
208
209 pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
210 pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
211 memset(&pCtl->Enable, 0, sizeof (pCtl->Enable));
212 pCtl->Enable.u32Flags = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
213 pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
214 pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
215 pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
216
217 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
218 if (RT_SUCCESS(rc))
219 {
220 rc = pCtl->Hdr.i32Result;
221 if (!RT_SUCCESS(rc))
222 WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
223 }
224 else
225 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
226
227 vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
228
229 return rc;
230}
231
232/*
233 * Public hardware buffer methods.
234 */
235RTDECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
236 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
237 VBVABUFFER *pVBVA)
238{
239 int rc = VERR_GENERAL_FAILURE;
240
241 LogFlowFunc(("pVBVA %p\n", pVBVA));
242
243#if 0 /* All callers check this */
244 if (ppdev->bHGSMISupported)
245#endif
246 {
247 LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
248
249 pVBVA->hostFlags.u32HostEvents = 0;
250 pVBVA->hostFlags.u32SupportedOrders = 0;
251 pVBVA->off32Data = 0;
252 pVBVA->off32Free = 0;
253 memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
254 pVBVA->indexRecordFirst = 0;
255 pVBVA->indexRecordFree = 0;
256 pVBVA->cbPartialWriteThreshold = 256;
257 pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
258
259 pCtx->fHwBufferOverflow = false;
260 pCtx->pRecord = NULL;
261 pCtx->pVBVA = pVBVA;
262
263 rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
264 }
265
266 if (!RT_SUCCESS(rc))
267 {
268 WARN(("enable failed %d", rc));
269 VBoxVBVAExDisable(pCtx, pHGSMICtx);
270 }
271
272 return rc;
273}
274
275RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
276 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
277{
278 LogFlowFunc(("\n"));
279
280 vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
281
282 pCtx->fHwBufferOverflow = false;
283 pCtx->pRecord = NULL;
284 pCtx->pVBVA = NULL;
285
286 return;
287}
288
289RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
290 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
291{
292 bool bRc = false;
293
294 // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
295
296 Assert(pCtx->pVBVA);
297 /* we do not use u32HostEvents & VBVA_F_MODE_ENABLED,
298 * VBVA stays enabled once ENABLE call succeeds, until it is disabled with DISABLED call */
299// if ( pCtx->pVBVA
300// && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
301 {
302 uint32_t indexRecordNext;
303
304 Assert(!pCtx->fHwBufferOverflow);
305 Assert(pCtx->pRecord == NULL);
306
307 indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
308
309 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
310 {
311 /* All slots in the records queue are used. */
312 vboxVBVAExFlush (pCtx, pHGSMICtx);
313 }
314
315 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
316 {
317 /* Even after flush there is no place. Fail the request. */
318 LogFunc(("no space in the queue of records!!! first %d, last %d\n",
319 indexRecordNext, pCtx->pVBVA->indexRecordFree));
320 }
321 else
322 {
323 /* Initialize the record. */
324 VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
325
326 pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
327
328 pCtx->pVBVA->indexRecordFree = indexRecordNext;
329
330 // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
331
332 /* Remember which record we are using. */
333 pCtx->pRecord = pRecord;
334
335 bRc = true;
336 }
337 }
338
339 return bRc;
340}
341
342RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
343{
344 VBVARECORD *pRecord;
345
346 // LogFunc(("\n"));
347
348 Assert(pCtx->pVBVA);
349
350 pRecord = pCtx->pRecord;
351 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
352
353 /* Mark the record completed. */
354 pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
355
356 pCtx->fHwBufferOverflow = false;
357 pCtx->pRecord = NULL;
358
359 return;
360}
361
362DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
363{
364 return ( u32First != u32Free
365 && (
366 (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
367 || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
368 )
369 );
370}
371
372DECLINLINE(bool) vboxVBVAExIsEntryInRangeOrEmpty(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
373{
374 return vboxVBVAExIsEntryInRange(u32First, u32Entry, u32Free)
375 || ( u32First == u32Entry
376 && u32Entry == u32Free);
377}
378#ifdef DEBUG
379
380DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
381{
382 VBVABUFFER *pVBVA = pCtx->pVBVA;
383 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
384 {
385 WARN(("invalid record set"));
386 }
387
388 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
389 {
390 WARN(("invalid data set"));
391 }
392}
393#endif
394
395/*
396 * Private operations.
397 */
398static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
399{
400 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
401
402 return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
403}
404
405static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
406{
407 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
408
409 return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
410}
411
412static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
413 uint32_t cb, uint32_t offset)
414{
415 VBVABUFFER *pVBVA = pCtx->pVBVA;
416 uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
417 uint8_t *dst = &pVBVA->au8Data[offset];
418 int32_t i32Diff = cb - u32BytesTillBoundary;
419
420 if (i32Diff <= 0)
421 {
422 /* Chunk will not cross buffer boundary. */
423 memcpy (dst, p, cb);
424 }
425 else
426 {
427 /* Chunk crosses buffer boundary. */
428 memcpy (dst, p, u32BytesTillBoundary);
429 memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
430 }
431
432 return;
433}
434
435static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
436 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
437 const void *p, uint32_t cb)
438{
439 VBVARECORD *pRecord;
440 uint32_t cbHwBufferAvail;
441
442 uint32_t cbWritten = 0;
443
444 VBVABUFFER *pVBVA = pCtx->pVBVA;
445 Assert(pVBVA);
446
447 if (!pVBVA || pCtx->fHwBufferOverflow)
448 {
449 return false;
450 }
451
452 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
453 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
454
455 pRecord = pCtx->pRecord;
456 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
457
458 // LogFunc(("%d\n", cb));
459
460 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
461
462 while (cb > 0)
463 {
464 uint32_t cbChunk = cb;
465
466 // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
467 // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
468
469 if (cbChunk >= cbHwBufferAvail)
470 {
471 LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
472
473 vboxVBVAExFlush(pCtx, pHGSMICtx);
474
475 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
476
477 if (cbChunk >= cbHwBufferAvail)
478 {
479 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
480 cb, cbHwBufferAvail));
481
482 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
483 {
484 WARN(("Buffer overflow!!!\n"));
485 pCtx->fHwBufferOverflow = true;
486 Assert(false);
487 return false;
488 }
489
490 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
491 }
492 }
493
494 Assert(cbChunk <= cb);
495 Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
496
497 vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
498
499 pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
500 pRecord->cbRecord += cbChunk;
501 cbHwBufferAvail -= cbChunk;
502
503 cb -= cbChunk;
504 cbWritten += cbChunk;
505 }
506
507 return true;
508}
509
510/*
511 * Public writer to the hardware buffer.
512 */
513RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
514{
515 VBVABUFFER *pVBVA = pCtx->pVBVA;
516 if (pVBVA->off32Data <= pVBVA->off32Free)
517 return pVBVA->cbData - pVBVA->off32Free;
518 return 0;
519}
520
521RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
522{
523 VBVARECORD *pRecord;
524 uint32_t cbHwBufferContiguousAvail;
525 uint32_t offset;
526
527 VBVABUFFER *pVBVA = pCtx->pVBVA;
528 Assert(pVBVA);
529
530 if (!pVBVA || pCtx->fHwBufferOverflow)
531 {
532 return NULL;
533 }
534
535 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
536 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
537
538 pRecord = pCtx->pRecord;
539 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
540
541 // LogFunc(("%d\n", cb));
542
543 if (pVBVA->cbData < cb)
544 {
545 WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
546 return NULL;
547 }
548
549 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
550
551 if (cbHwBufferContiguousAvail < cb)
552 {
553 if (cb < pVBVA->cbData - pVBVA->off32Free)
554 {
555 /* the entire contiguous part is smaller than the requested buffer */
556 return NULL;
557 }
558
559 vboxVBVAExFlush(pCtx, pHGSMICtx);
560
561 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
562 if (cbHwBufferContiguousAvail < cb)
563 {
564 /* this is really bad - the host did not clean up buffer even after we requested it to flush */
565 WARN(("Host did not clean up the buffer!"));
566 return NULL;
567 }
568 }
569
570 offset = pVBVA->off32Free;
571
572 pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
573 pRecord->cbRecord += cb;
574
575 return &pVBVA->au8Data[offset];
576}
577
578RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
579{
580 uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
581 return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
582}
583
584RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
585{
586 VBVABUFFER *pVBVA = pCtx->pVBVA;
587 uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
588 pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
589 pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
590}
591
592RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
593 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
594 const void *pv, uint32_t cb)
595{
596 return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
597}
598
599RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
600{
601 VBVABUFFER *pVBVA = pCtx->pVBVA;
602
603 if (!pVBVA)
604 {
605 return false;
606 }
607
608 if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
609 {
610 return true;
611 }
612
613 return false;
614}
615
616RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
617 uint32_t offVRAMBuffer,
618 uint32_t cbBuffer,
619 PFNVBVAEXBUFFERFLUSH pfnFlush,
620 void *pvFlush)
621{
622 memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
623 pCtx->offVRAMBuffer = offVRAMBuffer;
624 pCtx->cbBuffer = cbBuffer;
625 pCtx->pfnFlush = pfnFlush;
626 pCtx->pvFlush = pvFlush;
627}
628
629static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
630{
631 uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
632 if (cbRecord == VBVA_F_RECORD_PARTIAL)
633 return NULL;
634 if (pcbBuffer)
635 *pcbBuffer = cbRecord;
636 if (pfProcessed)
637 *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
638 return &pVBVA->au8Data[pIter->off32CurCmd];
639}
640
641DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
642{
643 int32_t result = (int32_t)(x - val);
644 return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
645}
646
647RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
648{
649 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
650 pIter->Base.pCtx = pCtx;
651 uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
652 if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
653 {
654 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
655 * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
656 * and we are in a submitter context now */
657 pIter->Base.iCurRecord = iCurRecord;
658 pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
659 }
660 else
661 {
662 /* no data */
663 pIter->Base.iCurRecord = pVBVA->indexRecordFree;
664 pIter->Base.off32CurCmd = pVBVA->off32Free;
665 }
666}
667
668RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
669{
670 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
671 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
672 uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
673 if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
674 return NULL;
675
676 void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
677 AssertRelease(pvBuffer);
678
679 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
680 * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
681 * and we are in a submitter context now */
682 pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
683 pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
684
685 return pvBuffer;
686}
687
688RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
689{
690 pIter->Base.pCtx = pCtx;
691 pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
692 pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
693}
694
695RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
696{
697 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
698 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
699 uint32_t indexRecordFree = pVBVA->indexRecordFree;
700 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
701 return NULL;
702
703 uint32_t cbBuffer;
704 void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
705 if (!pvData)
706 return NULL;
707
708 pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
709 pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
710
711 if (pcbBuffer)
712 *pcbBuffer = cbBuffer;
713
714 return pvData;
715}
716
717/**/
718
719int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
720{
721 return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
722}
723
724int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
725{
726 VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
727 return VINF_SUCCESS;
728}
729
730int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
731{
732 int rc = VINF_SUCCESS;
733 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
734 memset(pVbva, 0, sizeof (*pVbva));
735 return rc;
736}
737
738static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
739{
740 DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
741 memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
742 switch (enmComplType)
743 {
744 case DXGK_INTERRUPT_DMA_COMPLETED:
745 notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
746 notify.DmaCompleted.SubmissionFenceId = u32FenceId;
747 notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
748 break;
749
750 case DXGK_INTERRUPT_DMA_PREEMPTED:
751 notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
752 notify.DmaPreempted.PreemptionFenceId = u32FenceId;
753 notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
754 notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
755 break;
756
757 case DXGK_INTERRUPT_DMA_FAULTED:
758 Assert(0);
759 notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
760 notify.DmaFaulted.FaultedFenceId = u32FenceId;
761 notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /* @todo: better status ? */
762 notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
763 break;
764
765 default:
766 WARN(("unrecognized completion type %d", enmComplType));
767 break;
768 }
769
770 pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
771}
772
773typedef struct VBOXCMDVBVA_NOTIFYPREEMPT_CB
774{
775 PVBOXMP_DEVEXT pDevExt;
776 VBOXCMDVBVA *pVbva;
777 int rc;
778 UINT u32SubmitFenceId;
779 UINT u32PreemptFenceId;
780} VBOXCMDVBVA_NOTIFYPREEMPT_CB;
781
782static BOOLEAN vboxCmdVbvaDdiNotifyPreemptCb(PVOID pvContext)
783{
784 VBOXCMDVBVA_NOTIFYPREEMPT_CB* pData = (VBOXCMDVBVA_NOTIFYPREEMPT_CB*)pvContext;
785 PVBOXMP_DEVEXT pDevExt = pData->pDevExt;
786 VBOXCMDVBVA *pVbva = pData->pVbva;
787 Assert(pVbva->u32FenceProcessed >= pVbva->u32FenceCompleted);
788 if (!pData->u32SubmitFenceId || pVbva->u32FenceProcessed == pData->u32SubmitFenceId)
789 {
790 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pData->u32PreemptFenceId, DXGK_INTERRUPT_DMA_PREEMPTED);
791
792 pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pDevExt->u.primary.DxgkInterface.DeviceHandle);
793 }
794 else
795 {
796 Assert(pVbva->u32FenceProcessed < pData->u32SubmitFenceId);
797 Assert(pVbva->cPreempt <= VBOXCMDVBVA_PREEMPT_EL_SIZE);
798 if (pVbva->cPreempt == VBOXCMDVBVA_PREEMPT_EL_SIZE)
799 {
800 WARN(("no more free elements in preempt map"));
801 pData->rc = VERR_BUFFER_OVERFLOW;
802 return FALSE;
803 }
804 uint32_t iNewEl = (pVbva->iCurPreempt + pVbva->cPreempt) % VBOXCMDVBVA_PREEMPT_EL_SIZE;
805 Assert(iNewEl < VBOXCMDVBVA_PREEMPT_EL_SIZE);
806 pVbva->aPreempt[iNewEl].u32SubmitFence = pData->u32SubmitFenceId;
807 pVbva->aPreempt[iNewEl].u32PreemptFence = pData->u32PreemptFenceId;
808 ++pVbva->cPreempt;
809 }
810
811 pData->rc = VINF_SUCCESS;
812 return TRUE;
813}
814
815static int vboxCmdVbvaDdiNotifyPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32SubmitFenceId, UINT u32PreemptFenceId)
816{
817 VBOXCMDVBVA_NOTIFYPREEMPT_CB Data;
818 Data.pDevExt = pDevExt;
819 Data.pVbva = pVbva;
820 Data.rc = VERR_INTERNAL_ERROR;
821 Data.u32SubmitFenceId = u32SubmitFenceId;
822 Data.u32PreemptFenceId = u32PreemptFenceId;
823 BOOLEAN bDummy;
824 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
825 pDevExt->u.primary.DxgkInterface.DeviceHandle,
826 vboxCmdVbvaDdiNotifyPreemptCb,
827 &Data,
828 0, /* IN ULONG MessageNumber */
829 &bDummy);
830 if (!NT_SUCCESS(Status))
831 {
832 WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
833 return VERR_GENERAL_FAILURE;
834 }
835
836 if (!RT_SUCCESS(Data.rc))
837 {
838 WARN(("vboxCmdVbvaDdiNotifyPreemptCb failed rc %d", Data.rc));
839 return Data.rc;
840 }
841
842 return VINF_SUCCESS;
843}
844
845static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
846{
847 /* Issue the flush command. */
848 VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
849 sizeof (VBVACMDVBVAFLUSH),
850 HGSMI_CH_VBVA,
851 VBVA_CMDVBVA_FLUSH);
852 if (!pFlush)
853 {
854 WARN(("VBoxHGSMIBufferAlloc failed\n"));
855 return VERR_OUT_OF_RESOURCES;
856 }
857
858 pFlush->u32Flags = fBufferOverflow ? VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
859
860 VBoxHGSMIBufferSubmit(pCtx, pFlush);
861
862 VBoxHGSMIBufferFree(pCtx, pFlush);
863
864 return VINF_SUCCESS;
865}
866
867typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
868{
869 PVBOXMP_DEVEXT pDevExt;
870 VBOXCMDVBVA *pVbva;
871 /* last completted fence id */
872 uint32_t u32FenceCompleted;
873 /* last submitted fence id */
874 uint32_t u32FenceSubmitted;
875 /* last processed fence id (i.e. either completed or cancelled) */
876 uint32_t u32FenceProcessed;
877} VBOXCMDVBVA_CHECK_COMPLETED_CB;
878
879static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
880{
881 VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
882 BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
883 if (pCompleted->pVbva)
884 {
885 pCompleted->u32FenceCompleted = pCompleted->pVbva->u32FenceCompleted;
886 pCompleted->u32FenceSubmitted = pCompleted->pVbva->u32FenceSubmitted;
887 pCompleted->u32FenceProcessed = pCompleted->pVbva->u32FenceProcessed;
888 }
889 else
890 {
891 WARN(("no vbva"));
892 pCompleted->u32FenceCompleted = 0;
893 pCompleted->u32FenceSubmitted = 0;
894 pCompleted->u32FenceProcessed = 0;
895 }
896 return bRc;
897}
898
899
900static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
901{
902 if (fPingHost)
903 vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
904
905 VBOXCMDVBVA_CHECK_COMPLETED_CB context;
906 context.pDevExt = pDevExt;
907 context.pVbva = pVbva;
908 context.u32FenceCompleted = 0;
909 context.u32FenceSubmitted = 0;
910 context.u32FenceProcessed = 0;
911 BOOLEAN bRet;
912 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
913 pDevExt->u.primary.DxgkInterface.DeviceHandle,
914 vboxCmdVbvaCheckCompletedIrqCb,
915 &context,
916 0, /* IN ULONG MessageNumber */
917 &bRet);
918 Assert(Status == STATUS_SUCCESS);
919
920 if (pu32FenceSubmitted)
921 *pu32FenceSubmitted = context.u32FenceSubmitted;
922
923 if (pu32FenceProcessed)
924 *pu32FenceProcessed = context.u32FenceProcessed;
925
926 return context.u32FenceCompleted;
927}
928
929DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
930{
931 PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
932
933 vboxCmdVbvaCheckCompleted(pDevExt, NULL, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/, NULL, NULL);
934}
935
936int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
937{
938 memset(pVbva, 0, sizeof (*pVbva));
939
940 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
941 (void**)&pVbva->Vbva.pVBVA,
942 offBuffer,
943 cbBuffer);
944 if (RT_SUCCESS(rc))
945 {
946 Assert(pVbva->Vbva.pVBVA);
947 VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
948 }
949 else
950 {
951 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
952 }
953
954 return rc;
955}
956
957void VBoxCmdVbvaSubmitUnlock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, VBOXCMDVBVA_HDR* pCmd, uint32_t u32FenceID)
958{
959 if (u32FenceID)
960 pVbva->u32FenceSubmitted = u32FenceID;
961 else
962 WARN(("no cmd fence specified"));
963
964 pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
965
966 pCmd->u2.u32FenceID = u32FenceID;
967
968 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
969
970 if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
971 {
972 /* Issue the submit command. */
973 HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
974 VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
975 sizeof (VBVACMDVBVASUBMIT),
976 HGSMI_CH_VBVA,
977 VBVA_CMDVBVA_SUBMIT);
978 if (!pSubmit)
979 {
980 WARN(("VBoxHGSMIBufferAlloc failed\n"));
981 return;
982 }
983
984 pSubmit->u32Reserved = 0;
985
986 VBoxHGSMIBufferSubmit(pCtx, pSubmit);
987
988 VBoxHGSMIBufferFree(pCtx, pSubmit);
989 }
990}
991
992VBOXCMDVBVA_HDR* VBoxCmdVbvaSubmitLock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t cbCmd)
993{
994 if (VBoxVBVAExGetSize(&pVbva->Vbva) < cbCmd)
995 {
996 WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
997 return NULL;
998 }
999
1000 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
1001 {
1002 WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
1003 return NULL;
1004 }
1005
1006 void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1007 if (!pvBuffer)
1008 {
1009 WARN(("failed to allocate contiguous buffer, trying nopping the tail"));
1010 uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
1011 if (!cbTail)
1012 {
1013 WARN(("this is not a free tail case, cbTail is NULL"));
1014 return NULL;
1015 }
1016
1017 Assert(cbTail < cbCmd);
1018
1019 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
1020
1021 Assert(pvBuffer);
1022
1023 *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
1024
1025 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
1026
1027 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
1028 {
1029 WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
1030 return NULL;
1031 }
1032
1033 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1034 if (!pvBuffer)
1035 {
1036 WARN(("failed to allocate contiguous buffer, failing"));
1037 return NULL;
1038 }
1039 }
1040
1041 Assert(pvBuffer);
1042
1043 return (VBOXCMDVBVA_HDR*)pvBuffer;
1044}
1045
1046int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t u32FenceID, uint32_t cbCmd)
1047{
1048 VBOXCMDVBVA_HDR* pHdr = VBoxCmdVbvaSubmitLock(pDevExt, pVbva, cbCmd);
1049
1050 if (!pHdr)
1051 {
1052 WARN(("VBoxCmdVbvaSubmitLock failed"));
1053 return VERR_GENERAL_FAILURE;
1054 }
1055
1056 memcpy(pHdr, pCmd, cbCmd);
1057
1058 VBoxCmdVbvaSubmitUnlock(pDevExt, pVbva, pCmd, u32FenceID);
1059
1060 return VINF_SUCCESS;
1061}
1062
1063bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
1064{
1065 VBVAEXBUFFERBACKWARDITER Iter;
1066 VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
1067
1068 uint32_t cbBuffer;
1069 bool fProcessed;
1070 uint8_t* pu8Cmd;
1071 uint32_t u32SubmitFence = 0;
1072
1073 /* we can do it right here */
1074 while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
1075 {
1076 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1077 continue;
1078
1079 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1080
1081 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED)
1082 || pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED)
1083 continue;
1084
1085 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
1086
1087 u32SubmitFence = pCmd->u2.u32FenceID;
1088 break;
1089 }
1090
1091 vboxCmdVbvaDdiNotifyPreempt(pDevExt, pVbva, u32SubmitFence, u32FenceID);
1092
1093 return false;
1094}
1095
1096bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
1097{
1098 VBVAEXBUFFERFORWARDITER Iter;
1099 VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
1100
1101 bool fHasCommandsCompletedPreempted = false;
1102 bool fProcessed;
1103 uint8_t* pu8Cmd;
1104
1105
1106 while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
1107 {
1108 if (!fProcessed)
1109 break;
1110
1111 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1112 continue;
1113
1114 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1115 uint8_t u8State = pCmd->u8State;
1116 uint32_t u32FenceID = pCmd->u2.u32FenceID;
1117
1118 Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
1119 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
1120 Assert(u32FenceID);
1121 VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
1122
1123 if (!u32FenceID)
1124 {
1125 WARN(("fence is NULL"));
1126 continue;
1127 }
1128
1129 pVbva->u32FenceProcessed = u32FenceID;
1130
1131 if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
1132 pVbva->u32FenceCompleted = u32FenceID;
1133 else
1134 {
1135 Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
1136 continue;
1137 }
1138
1139 Assert(u32FenceID);
1140 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, DXGK_INTERRUPT_DMA_COMPLETED);
1141
1142 if (pVbva->cPreempt && pVbva->aPreempt[pVbva->iCurPreempt].u32SubmitFence == u32FenceID)
1143 {
1144 Assert(pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence);
1145 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence, DXGK_INTERRUPT_DMA_PREEMPTED);
1146 --pVbva->cPreempt;
1147 if (!pVbva->cPreempt)
1148 pVbva->iCurPreempt = 0;
1149 else
1150 {
1151 ++pVbva->iCurPreempt;
1152 pVbva->iCurPreempt %= VBOXCMDVBVA_PREEMPT_EL_SIZE;
1153 }
1154 }
1155
1156 fHasCommandsCompletedPreempted = true;
1157 }
1158
1159#ifdef DEBUG
1160 vboxHwBufferVerifyCompleted(&pVbva->Vbva);
1161#endif
1162
1163 return fHasCommandsCompletedPreempted;
1164}
1165
1166uint32_t VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
1167{
1168 return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */, pu32FenceSubmitted, pu32FenceProcessed);
1169}
1170
1171#if 0
1172static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
1173{
1174 PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
1175 uint32_t cbEl = sizeof (*pEl);
1176 uint32_t cStoredPages = 1;
1177 PFN_NUMBER next;
1178 pEl->iPage1 = (uint32_t)(cur & 0xfffff);
1179 pEl->iPage2 = (uint32_t)(cur >> 20);
1180 --cPages;
1181 for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
1182 {
1183 next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
1184 if (next != cur+1)
1185 break;
1186 }
1187
1188 Assert(cStoredPages);
1189 pEl->cPagesAfterFirst = cStoredPages - 1;
1190
1191 return cPages;
1192}
1193
1194uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1195{
1196 uint32_t cInitPages = cPages;
1197 uint32_t cbInitBuffer = cbBuffer;
1198 uint32_t cEls = 0;
1199 VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
1200
1201 Assert(cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1202
1203 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1204
1205 for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
1206 {
1207 cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
1208 }
1209
1210 *pcPagesWritten = cInitPages - cPages;
1211 return cbInitBuffer - cbBuffer;
1212}
1213#endif
1214
1215uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1216{
1217 uint32_t cbInitBuffer = cbBuffer;
1218 uint32_t i = 0;
1219 VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers;
1220
1221 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1222
1223 for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers))
1224 {
1225 pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]);
1226 }
1227
1228 *pcPagesWritten = i;
1229 Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i]));
1230 Assert(cbInitBuffer - cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1231 return cbInitBuffer - cbBuffer;
1232}
1233
1234
1235int vboxCmdVbvaConConnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
1236 uint32_t crVersionMajor, uint32_t crVersionMinor,
1237 uint32_t *pu32ClientID)
1238{
1239 VBOXCMDVBVA_CTL_3DCTL_CONNECT *pConnect = (VBOXCMDVBVA_CTL_3DCTL_CONNECT*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CONNECT));
1240 if (!pConnect)
1241 {
1242 WARN(("vboxCmdVbvaCtlCreate failed"));
1243 return VERR_OUT_OF_RESOURCES;
1244 }
1245 pConnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1246 pConnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1247 pConnect->Connect.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CONNECT;
1248 pConnect->Connect.Hdr.u32CmdClientId = 0;
1249 pConnect->Connect.u32MajorVersion = crVersionMajor;
1250 pConnect->Connect.u32MinorVersion = crVersionMinor;
1251 pConnect->Connect.u64Pid = (uint64_t)PsGetCurrentProcessId();
1252
1253 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pConnect->Hdr);
1254 if (RT_SUCCESS(rc))
1255 {
1256 rc = pConnect->Hdr.i32Result;
1257 if (RT_SUCCESS(rc))
1258 {
1259 Assert(pConnect->Connect.Hdr.u32CmdClientId);
1260 *pu32ClientID = pConnect->Connect.Hdr.u32CmdClientId;
1261 }
1262 else
1263 WARN(("VBOXCMDVBVA3DCTL_TYPE_CONNECT Disable failed %d", rc));
1264 }
1265 else
1266 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1267
1268 vboxCmdVbvaCtlFree(pHGSMICtx, &pConnect->Hdr);
1269
1270 return rc;
1271}
1272
1273int vboxCmdVbvaConDisconnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t u32ClientID)
1274{
1275 VBOXCMDVBVA_CTL_3DCTL *pDisconnect = (VBOXCMDVBVA_CTL_3DCTL*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL));
1276 if (!pDisconnect)
1277 {
1278 WARN(("vboxCmdVbvaCtlCreate failed"));
1279 return VERR_OUT_OF_RESOURCES;
1280 }
1281 pDisconnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1282 pDisconnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1283 pDisconnect->Ctl.u32Type = VBOXCMDVBVA3DCTL_TYPE_DISCONNECT;
1284 pDisconnect->Ctl.u32CmdClientId = u32ClientID;
1285
1286 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pDisconnect->Hdr);
1287 if (RT_SUCCESS(rc))
1288 {
1289 rc = pDisconnect->Hdr.i32Result;
1290 if (!RT_SUCCESS(rc))
1291 WARN(("VBOXCMDVBVA3DCTL_TYPE_DISCONNECT Disable failed %d", rc));
1292 }
1293 else
1294 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1295
1296 vboxCmdVbvaCtlFree(pHGSMICtx, &pDisconnect->Hdr);
1297
1298 return rc;
1299}
1300
1301int VBoxCmdVbvaConConnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva,
1302 uint32_t crVersionMajor, uint32_t crVersionMinor,
1303 uint32_t *pu32ClientID)
1304{
1305 return vboxCmdVbvaConConnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, crVersionMajor, crVersionMinor, pu32ClientID);
1306}
1307
1308int VBoxCmdVbvaConDisconnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32ClientID)
1309{
1310 return vboxCmdVbvaConDisconnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, u32ClientID);
1311}
1312
1313VBOXCMDVBVA_CRCMD_CMD* vboxCmdVbvaConCmdAlloc(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCmd)
1314{
1315 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CMD) + cbCmd);
1316 if (!pCmd)
1317 {
1318 WARN(("vboxCmdVbvaCtlCreate failed"));
1319 return NULL;
1320 }
1321 pCmd->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1322 pCmd->Hdr.i32Result = VERR_NOT_SUPPORTED;
1323 pCmd->Cmd.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CMD;
1324 pCmd->Cmd.Hdr.u32CmdClientId = 0;
1325 pCmd->Cmd.Cmd.u8OpCode = VBOXCMDVBVA_OPTYPE_CRCMD;
1326 pCmd->Cmd.Cmd.u8Flags = 0;
1327 pCmd->Cmd.Cmd.u8State = VBOXCMDVBVA_STATE_SUBMITTED;
1328 pCmd->Cmd.Cmd.u.i8Result = -1;
1329 pCmd->Cmd.Cmd.u2.u32FenceID = 0;
1330
1331 return (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1332}
1333
1334void vboxCmdVbvaConCmdFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1335{
1336 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1337 vboxCmdVbvaCtlFree(pHGSMICtx, &pHdr->Hdr);
1338}
1339
1340int vboxCmdVbvaConSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1341{
1342 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1343 return vboxCmdVbvaCtlSubmitAsync(pHGSMICtx, &pHdr->Hdr, pfnCompletion, pvCompletion);
1344}
1345
1346VBOXCMDVBVA_CRCMD_CMD* VBoxCmdVbvaConCmdAlloc(PVBOXMP_DEVEXT pDevExt, uint32_t cbCmd)
1347{
1348 return vboxCmdVbvaConCmdAlloc(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1349}
1350
1351void VBoxCmdVbvaConCmdFree(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1352{
1353 vboxCmdVbvaConCmdFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd);
1354}
1355
1356int VBoxCmdVbvaConCmdSubmitAsync(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1357{
1358 return vboxCmdVbvaConSubmitAsync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd, pfnCompletion, pvCompletion);
1359}
1360
1361int VBoxCmdVbvaConCmdCompletionData(void *pvCmd, VBOXCMDVBVA_CRCMD_CMD **ppCmd)
1362{
1363 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)pvCmd;
1364 if (ppCmd)
1365 *ppCmd = (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1366 return pCmd->Hdr.i32Result;
1367}
1368
1369int VBoxCmdVbvaConCmdResize(PVBOXMP_DEVEXT pDevExt, const VBOXWDDM_ALLOC_DATA *pAllocData, const uint32_t *pTargetMap, const POINT * pVScreenPos, uint16_t fFlags)
1370{
1371 Assert(KeGetCurrentIrql() < DISPATCH_LEVEL);
1372
1373 VBOXCMDVBVA_CTL_RESIZE *pResize = (VBOXCMDVBVA_CTL_RESIZE*)vboxCmdVbvaCtlCreate(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, sizeof (VBOXCMDVBVA_CTL_RESIZE));
1374 if (!pResize)
1375 {
1376 WARN(("vboxCmdVbvaCtlCreate failed"));
1377 return VERR_OUT_OF_RESOURCES;
1378 }
1379
1380 pResize->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_RESIZE;
1381 pResize->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
1382
1383 int rc = vboxWddmScreenInfoInit(&pResize->Resize.aEntries[0].Screen, pAllocData, pVScreenPos, fFlags);
1384 if (RT_SUCCESS(rc))
1385 {
1386 memcpy(&pResize->Resize.aEntries[0].aTargetMap, pTargetMap, sizeof (pResize->Resize.aEntries[0].aTargetMap));
1387 rc = vboxCmdVbvaCtlSubmitSync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &pResize->Hdr);
1388 if (RT_SUCCESS(rc))
1389 {
1390 rc = pResize->Hdr.i32Result;
1391 if (RT_FAILURE(rc))
1392 WARN(("VBOXCMDVBVACTL_TYPE_RESIZE failed %d", rc));
1393 }
1394 else
1395 WARN(("vboxCmdVbvaCtlSubmitSync failed %d", rc));
1396 }
1397 else
1398 WARN(("vboxWddmScreenInfoInit failed %d", rc));
1399
1400 vboxCmdVbvaCtlFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &pResize->Hdr);
1401
1402 return rc;
1403}
1404#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette