VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp@ 62522

Last change on this file since 62522 was 62522, checked in by vboxsync, 9 years ago

(C) 2016

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.7 KB
Line 
1/* $Id: VBoxMPVbva.cpp 62522 2016-07-22 19:17:25Z vboxsync $ */
2
3/** @file
4 * VBox WDDM Miniport driver
5 */
6
7/*
8 * Copyright (C) 2011-2016 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#include "VBoxMPWddm.h"
20#include "common/VBoxMPCommon.h"
21
22/*
23 * Public hardware buffer methods.
24 */
25int vboxVbvaEnable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
26{
27 if (VBoxVBVAEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx,
28 pVbva->Vbva.pVBVA, pVbva->srcId))
29 return VINF_SUCCESS;
30
31 WARN(("VBoxVBVAEnable failed!"));
32 return VERR_GENERAL_FAILURE;
33}
34
35int vboxVbvaDisable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
36{
37 VBoxVBVADisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->srcId);
38 return VINF_SUCCESS;
39}
40
41int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
42{
43 memset(pVbva, 0, sizeof(VBOXVBVAINFO));
44
45 KeInitializeSpinLock(&pVbva->Lock);
46
47 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
48 (void**)&pVbva->Vbva.pVBVA,
49 offBuffer,
50 cbBuffer);
51 if (RT_SUCCESS(rc))
52 {
53 Assert(pVbva->Vbva.pVBVA);
54 VBoxVBVASetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer);
55 pVbva->srcId = srcId;
56 }
57 else
58 {
59 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
60 }
61
62
63 return rc;
64}
65
66int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
67{
68 int rc = VINF_SUCCESS;
69 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
70 memset(pVbva, 0, sizeof (VBOXVBVAINFO));
71 return rc;
72}
73
74int vboxVbvaReportDirtyRect (PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSrc, RECT *pRectOrig)
75{
76 VBVACMDHDR hdr;
77
78 RECT rect = *pRectOrig;
79
80// if (rect.left < 0) rect.left = 0;
81// if (rect.top < 0) rect.top = 0;
82// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
83// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
84
85 hdr.x = (int16_t)rect.left;
86 hdr.y = (int16_t)rect.top;
87 hdr.w = (uint16_t)(rect.right - rect.left);
88 hdr.h = (uint16_t)(rect.bottom - rect.top);
89
90 hdr.x += (int16_t)pSrc->VScreenPos.x;
91 hdr.y += (int16_t)pSrc->VScreenPos.y;
92
93 if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
94 return VINF_SUCCESS;
95
96 WARN(("VBoxVBVAWrite failed"));
97 return VERR_GENERAL_FAILURE;
98}
99
100#ifdef VBOX_WITH_CROGL
101/* command vbva ring buffer */
102
103/* customized VBVA implementation */
104
105/* Forward declarations of internal functions. */
106static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
107 uint32_t cb, uint32_t offset);
108static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
109 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
110 const void *p, uint32_t cb);
111
112DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
113{
114 pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
115}
116
117static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
118{
119 VBoxVideoCmnPortWriteUlong(pHGSMICtx->port, offDr);
120 /* Make the compiler aware that the host has changed memory. */
121 ASMCompilerBarrier();
122 return VINF_SUCCESS;
123}
124#define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
125
126static VBOXCMDVBVA_CTL * vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
127{
128 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
129 return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
130}
131
132static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
133{
134 VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
135}
136
137static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
138{
139 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
140 if (!pHdr)
141 {
142 WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
143 return VERR_INVALID_PARAMETER;
144 }
145
146 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
147 if (offCmd == HGSMIOFFSET_VOID)
148 {
149 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
150 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
151 return VERR_INVALID_PARAMETER;
152 }
153
154 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
155 if (RT_SUCCESS(rc))
156 {
157 rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
158 if (RT_SUCCESS(rc))
159 {
160 rc = pCtl->i32Result;
161 if (!RT_SUCCESS(rc))
162 WARN(("pCtl->i32Result %d", pCtl->i32Result));
163
164 return rc;
165 }
166 else
167 WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
168 }
169 else
170 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
171
172 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
173
174 return rc;
175}
176
177static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
178{
179 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynch(&pHGSMICtx->heapCtx, pCtl, pfnCompletion, pvCompletion, VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ);
180 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
181 if (offCmd == HGSMIOFFSET_VOID)
182 {
183 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
184 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
185 return VERR_INVALID_PARAMETER;
186 }
187
188 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
189 if (RT_SUCCESS(rc))
190 {
191 VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
192 return rc;
193 }
194 else
195 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
196
197 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
198
199 return rc;
200}
201
202static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
203{
204 VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
205 if (!pCtl)
206 {
207 WARN(("vboxCmdVbvaCtlCreate failed"));
208 return VERR_NO_MEMORY;
209 }
210
211 pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
212 pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
213 memset(&pCtl->Enable, 0, sizeof (pCtl->Enable));
214 pCtl->Enable.u32Flags = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
215 pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
216 pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
217 pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
218
219 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
220 if (RT_SUCCESS(rc))
221 {
222 rc = pCtl->Hdr.i32Result;
223 if (!RT_SUCCESS(rc))
224 WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
225 }
226 else
227 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
228
229 vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
230
231 return rc;
232}
233
234/*
235 * Public hardware buffer methods.
236 */
237RTDECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
238 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
239 VBVABUFFER *pVBVA)
240{
241 int rc = VERR_GENERAL_FAILURE;
242
243 LogFlowFunc(("pVBVA %p\n", pVBVA));
244
245#if 0 /* All callers check this */
246 if (ppdev->bHGSMISupported)
247#endif
248 {
249 LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
250
251 pVBVA->hostFlags.u32HostEvents = 0;
252 pVBVA->hostFlags.u32SupportedOrders = 0;
253 pVBVA->off32Data = 0;
254 pVBVA->off32Free = 0;
255 memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
256 pVBVA->indexRecordFirst = 0;
257 pVBVA->indexRecordFree = 0;
258 pVBVA->cbPartialWriteThreshold = 256;
259 pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
260
261 pCtx->fHwBufferOverflow = false;
262 pCtx->pRecord = NULL;
263 pCtx->pVBVA = pVBVA;
264
265 rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
266 }
267
268 if (!RT_SUCCESS(rc))
269 {
270 WARN(("enable failed %d", rc));
271 VBoxVBVAExDisable(pCtx, pHGSMICtx);
272 }
273
274 return rc;
275}
276
277RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
278 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
279{
280 LogFlowFunc(("\n"));
281
282 vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
283
284 pCtx->fHwBufferOverflow = false;
285 pCtx->pRecord = NULL;
286 pCtx->pVBVA = NULL;
287
288 return;
289}
290
291RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
292 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
293{
294 bool bRc = false;
295
296 // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
297
298 Assert(pCtx->pVBVA);
299 /* we do not use u32HostEvents & VBVA_F_MODE_ENABLED,
300 * VBVA stays enabled once ENABLE call succeeds, until it is disabled with DISABLED call */
301// if ( pCtx->pVBVA
302// && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
303 {
304 uint32_t indexRecordNext;
305
306 Assert(!pCtx->fHwBufferOverflow);
307 Assert(pCtx->pRecord == NULL);
308
309 indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
310
311 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
312 {
313 /* All slots in the records queue are used. */
314 vboxVBVAExFlush (pCtx, pHGSMICtx);
315 }
316
317 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
318 {
319 /* Even after flush there is no place. Fail the request. */
320 LogFunc(("no space in the queue of records!!! first %d, last %d\n",
321 indexRecordNext, pCtx->pVBVA->indexRecordFree));
322 }
323 else
324 {
325 /* Initialize the record. */
326 VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
327
328 pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
329
330 pCtx->pVBVA->indexRecordFree = indexRecordNext;
331
332 // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
333
334 /* Remember which record we are using. */
335 pCtx->pRecord = pRecord;
336
337 bRc = true;
338 }
339 }
340
341 return bRc;
342}
343
344RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
345{
346 VBVARECORD *pRecord;
347
348 // LogFunc(("\n"));
349
350 Assert(pCtx->pVBVA);
351
352 pRecord = pCtx->pRecord;
353 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
354
355 /* Mark the record completed. */
356 pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
357
358 pCtx->fHwBufferOverflow = false;
359 pCtx->pRecord = NULL;
360
361 return;
362}
363
364DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
365{
366 return ( u32First != u32Free
367 && (
368 (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
369 || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
370 )
371 );
372}
373
374DECLINLINE(bool) vboxVBVAExIsEntryInRangeOrEmpty(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
375{
376 return vboxVBVAExIsEntryInRange(u32First, u32Entry, u32Free)
377 || ( u32First == u32Entry
378 && u32Entry == u32Free);
379}
380#ifdef DEBUG
381
382DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
383{
384 VBVABUFFER *pVBVA = pCtx->pVBVA;
385 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
386 {
387 WARN(("invalid record set"));
388 }
389
390 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
391 {
392 WARN(("invalid data set"));
393 }
394}
395#endif
396
397/*
398 * Private operations.
399 */
400static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
401{
402 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
403
404 return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
405}
406
407static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
408{
409 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
410
411 return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
412}
413
414static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
415 uint32_t cb, uint32_t offset)
416{
417 VBVABUFFER *pVBVA = pCtx->pVBVA;
418 uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
419 uint8_t *dst = &pVBVA->au8Data[offset];
420 int32_t i32Diff = cb - u32BytesTillBoundary;
421
422 if (i32Diff <= 0)
423 {
424 /* Chunk will not cross buffer boundary. */
425 memcpy (dst, p, cb);
426 }
427 else
428 {
429 /* Chunk crosses buffer boundary. */
430 memcpy (dst, p, u32BytesTillBoundary);
431 memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
432 }
433
434 return;
435}
436
437static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
438 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
439 const void *p, uint32_t cb)
440{
441 VBVARECORD *pRecord;
442 uint32_t cbHwBufferAvail;
443
444 uint32_t cbWritten = 0;
445
446 VBVABUFFER *pVBVA = pCtx->pVBVA;
447 Assert(pVBVA);
448
449 if (!pVBVA || pCtx->fHwBufferOverflow)
450 {
451 return false;
452 }
453
454 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
455 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
456
457 pRecord = pCtx->pRecord;
458 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
459
460 // LogFunc(("%d\n", cb));
461
462 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
463
464 while (cb > 0)
465 {
466 uint32_t cbChunk = cb;
467
468 // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
469 // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
470
471 if (cbChunk >= cbHwBufferAvail)
472 {
473 LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
474
475 vboxVBVAExFlush(pCtx, pHGSMICtx);
476
477 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
478
479 if (cbChunk >= cbHwBufferAvail)
480 {
481 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
482 cb, cbHwBufferAvail));
483
484 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
485 {
486 WARN(("Buffer overflow!!!\n"));
487 pCtx->fHwBufferOverflow = true;
488 Assert(false);
489 return false;
490 }
491
492 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
493 }
494 }
495
496 Assert(cbChunk <= cb);
497 Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
498
499 vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
500
501 pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
502 pRecord->cbRecord += cbChunk;
503 cbHwBufferAvail -= cbChunk;
504
505 cb -= cbChunk;
506 cbWritten += cbChunk;
507 }
508
509 return true;
510}
511
512/*
513 * Public writer to the hardware buffer.
514 */
515RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
516{
517 VBVABUFFER *pVBVA = pCtx->pVBVA;
518 if (pVBVA->off32Data <= pVBVA->off32Free)
519 return pVBVA->cbData - pVBVA->off32Free;
520 return 0;
521}
522
523RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
524{
525 VBVARECORD *pRecord;
526 uint32_t cbHwBufferContiguousAvail;
527 uint32_t offset;
528
529 VBVABUFFER *pVBVA = pCtx->pVBVA;
530 Assert(pVBVA);
531
532 if (!pVBVA || pCtx->fHwBufferOverflow)
533 {
534 return NULL;
535 }
536
537 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
538 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
539
540 pRecord = pCtx->pRecord;
541 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
542
543 // LogFunc(("%d\n", cb));
544
545 if (pVBVA->cbData < cb)
546 {
547 WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
548 return NULL;
549 }
550
551 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
552
553 if (cbHwBufferContiguousAvail < cb)
554 {
555 if (cb > pVBVA->cbData - pVBVA->off32Free)
556 {
557 /* the entire contiguous part is smaller than the requested buffer */
558 return NULL;
559 }
560
561 vboxVBVAExFlush(pCtx, pHGSMICtx);
562
563 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
564 if (cbHwBufferContiguousAvail < cb)
565 {
566 /* this is really bad - the host did not clean up buffer even after we requested it to flush */
567 WARN(("Host did not clean up the buffer!"));
568 return NULL;
569 }
570 }
571
572 offset = pVBVA->off32Free;
573
574 pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
575 pRecord->cbRecord += cb;
576
577 return &pVBVA->au8Data[offset];
578}
579
580RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
581{
582 uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
583 return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
584}
585
586RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
587{
588 VBVABUFFER *pVBVA = pCtx->pVBVA;
589 uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
590 pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
591 pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
592}
593
594RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
595 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
596 const void *pv, uint32_t cb)
597{
598 return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
599}
600
601RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
602{
603 VBVABUFFER *pVBVA = pCtx->pVBVA;
604
605 if (!pVBVA)
606 {
607 return false;
608 }
609
610 if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
611 {
612 return true;
613 }
614
615 return false;
616}
617
618RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
619 uint32_t offVRAMBuffer,
620 uint32_t cbBuffer,
621 PFNVBVAEXBUFFERFLUSH pfnFlush,
622 void *pvFlush)
623{
624 memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
625 pCtx->offVRAMBuffer = offVRAMBuffer;
626 pCtx->cbBuffer = cbBuffer;
627 pCtx->pfnFlush = pfnFlush;
628 pCtx->pvFlush = pvFlush;
629}
630
631static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
632{
633 uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
634 if (cbRecord == VBVA_F_RECORD_PARTIAL)
635 return NULL;
636 if (pcbBuffer)
637 *pcbBuffer = cbRecord;
638 if (pfProcessed)
639 *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
640 return &pVBVA->au8Data[pIter->off32CurCmd];
641}
642
643DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
644{
645 int32_t result = (int32_t)(x - val);
646 return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
647}
648
649RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
650{
651 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
652 pIter->Base.pCtx = pCtx;
653 uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
654 if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
655 {
656 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
657 * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
658 * and we are in a submitter context now */
659 pIter->Base.iCurRecord = iCurRecord;
660 pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
661 }
662 else
663 {
664 /* no data */
665 pIter->Base.iCurRecord = pVBVA->indexRecordFree;
666 pIter->Base.off32CurCmd = pVBVA->off32Free;
667 }
668}
669
670RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
671{
672 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
673 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
674 uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
675 if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
676 return NULL;
677
678 void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
679 AssertRelease(pvBuffer);
680
681 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
682 * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
683 * and we are in a submitter context now */
684 pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
685 pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
686
687 return pvBuffer;
688}
689
690RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
691{
692 pIter->Base.pCtx = pCtx;
693 pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
694 pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
695}
696
697RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
698{
699 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
700 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
701 uint32_t indexRecordFree = pVBVA->indexRecordFree;
702 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
703 return NULL;
704
705 uint32_t cbBuffer;
706 void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
707 if (!pvData)
708 return NULL;
709
710 pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
711 pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
712
713 if (pcbBuffer)
714 *pcbBuffer = cbBuffer;
715
716 return pvData;
717}
718
719/**/
720
721int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
722{
723 return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
724}
725
726int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
727{
728 VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
729 return VINF_SUCCESS;
730}
731
732int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
733{
734 int rc = VINF_SUCCESS;
735 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
736 memset(pVbva, 0, sizeof (*pVbva));
737 return rc;
738}
739
740static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
741{
742 DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
743 memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
744 switch (enmComplType)
745 {
746 case DXGK_INTERRUPT_DMA_COMPLETED:
747 notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
748 notify.DmaCompleted.SubmissionFenceId = u32FenceId;
749 notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
750 break;
751
752 case DXGK_INTERRUPT_DMA_PREEMPTED:
753 notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
754 notify.DmaPreempted.PreemptionFenceId = u32FenceId;
755 notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
756 notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
757 break;
758
759 case DXGK_INTERRUPT_DMA_FAULTED:
760 Assert(0);
761 notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
762 notify.DmaFaulted.FaultedFenceId = u32FenceId;
763 notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /* @todo: better status ? */
764 notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
765 break;
766
767 default:
768 WARN(("unrecognized completion type %d", enmComplType));
769 break;
770 }
771
772 pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
773}
774
775typedef struct VBOXCMDVBVA_NOTIFYPREEMPT_CB
776{
777 PVBOXMP_DEVEXT pDevExt;
778 VBOXCMDVBVA *pVbva;
779 int rc;
780 UINT u32SubmitFenceId;
781 UINT u32PreemptFenceId;
782} VBOXCMDVBVA_NOTIFYPREEMPT_CB;
783
784static BOOLEAN vboxCmdVbvaDdiNotifyPreemptCb(PVOID pvContext)
785{
786 VBOXCMDVBVA_NOTIFYPREEMPT_CB* pData = (VBOXCMDVBVA_NOTIFYPREEMPT_CB*)pvContext;
787 PVBOXMP_DEVEXT pDevExt = pData->pDevExt;
788 VBOXCMDVBVA *pVbva = pData->pVbva;
789 Assert(pVbva->u32FenceProcessed >= pVbva->u32FenceCompleted);
790 if (!pData->u32SubmitFenceId || pVbva->u32FenceProcessed == pData->u32SubmitFenceId)
791 {
792 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pData->u32PreemptFenceId, DXGK_INTERRUPT_DMA_PREEMPTED);
793
794 pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pDevExt->u.primary.DxgkInterface.DeviceHandle);
795 }
796 else
797 {
798 Assert(pVbva->u32FenceProcessed < pData->u32SubmitFenceId);
799 Assert(pVbva->cPreempt <= VBOXCMDVBVA_PREEMPT_EL_SIZE);
800 if (pVbva->cPreempt == VBOXCMDVBVA_PREEMPT_EL_SIZE)
801 {
802 WARN(("no more free elements in preempt map"));
803 pData->rc = VERR_BUFFER_OVERFLOW;
804 return FALSE;
805 }
806 uint32_t iNewEl = (pVbva->iCurPreempt + pVbva->cPreempt) % VBOXCMDVBVA_PREEMPT_EL_SIZE;
807 Assert(iNewEl < VBOXCMDVBVA_PREEMPT_EL_SIZE);
808 pVbva->aPreempt[iNewEl].u32SubmitFence = pData->u32SubmitFenceId;
809 pVbva->aPreempt[iNewEl].u32PreemptFence = pData->u32PreemptFenceId;
810 ++pVbva->cPreempt;
811 }
812
813 pData->rc = VINF_SUCCESS;
814 return TRUE;
815}
816
817static int vboxCmdVbvaDdiNotifyPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32SubmitFenceId, UINT u32PreemptFenceId)
818{
819 VBOXCMDVBVA_NOTIFYPREEMPT_CB Data;
820 Data.pDevExt = pDevExt;
821 Data.pVbva = pVbva;
822 Data.rc = VERR_INTERNAL_ERROR;
823 Data.u32SubmitFenceId = u32SubmitFenceId;
824 Data.u32PreemptFenceId = u32PreemptFenceId;
825 BOOLEAN bDummy;
826 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
827 pDevExt->u.primary.DxgkInterface.DeviceHandle,
828 vboxCmdVbvaDdiNotifyPreemptCb,
829 &Data,
830 0, /* IN ULONG MessageNumber */
831 &bDummy);
832 if (!NT_SUCCESS(Status))
833 {
834 WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
835 return VERR_GENERAL_FAILURE;
836 }
837
838 if (!RT_SUCCESS(Data.rc))
839 {
840 WARN(("vboxCmdVbvaDdiNotifyPreemptCb failed rc %d", Data.rc));
841 return Data.rc;
842 }
843
844 return VINF_SUCCESS;
845}
846
847static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
848{
849 /* Issue the flush command. */
850 VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
851 sizeof (VBVACMDVBVAFLUSH),
852 HGSMI_CH_VBVA,
853 VBVA_CMDVBVA_FLUSH);
854 if (!pFlush)
855 {
856 WARN(("VBoxHGSMIBufferAlloc failed\n"));
857 return VERR_OUT_OF_RESOURCES;
858 }
859
860 pFlush->u32Flags = fBufferOverflow ? VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
861
862 VBoxHGSMIBufferSubmit(pCtx, pFlush);
863
864 VBoxHGSMIBufferFree(pCtx, pFlush);
865
866 return VINF_SUCCESS;
867}
868
869typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
870{
871 PVBOXMP_DEVEXT pDevExt;
872 VBOXCMDVBVA *pVbva;
873 /* last completted fence id */
874 uint32_t u32FenceCompleted;
875 /* last submitted fence id */
876 uint32_t u32FenceSubmitted;
877 /* last processed fence id (i.e. either completed or cancelled) */
878 uint32_t u32FenceProcessed;
879} VBOXCMDVBVA_CHECK_COMPLETED_CB;
880
881static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
882{
883 VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
884 BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
885 if (pCompleted->pVbva)
886 {
887 pCompleted->u32FenceCompleted = pCompleted->pVbva->u32FenceCompleted;
888 pCompleted->u32FenceSubmitted = pCompleted->pVbva->u32FenceSubmitted;
889 pCompleted->u32FenceProcessed = pCompleted->pVbva->u32FenceProcessed;
890 }
891 else
892 {
893 WARN(("no vbva"));
894 pCompleted->u32FenceCompleted = 0;
895 pCompleted->u32FenceSubmitted = 0;
896 pCompleted->u32FenceProcessed = 0;
897 }
898 return bRc;
899}
900
901
902static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
903{
904 if (fPingHost)
905 vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
906
907 VBOXCMDVBVA_CHECK_COMPLETED_CB context;
908 context.pDevExt = pDevExt;
909 context.pVbva = pVbva;
910 context.u32FenceCompleted = 0;
911 context.u32FenceSubmitted = 0;
912 context.u32FenceProcessed = 0;
913 BOOLEAN bRet;
914 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
915 pDevExt->u.primary.DxgkInterface.DeviceHandle,
916 vboxCmdVbvaCheckCompletedIrqCb,
917 &context,
918 0, /* IN ULONG MessageNumber */
919 &bRet);
920 Assert(Status == STATUS_SUCCESS);
921
922 if (pu32FenceSubmitted)
923 *pu32FenceSubmitted = context.u32FenceSubmitted;
924
925 if (pu32FenceProcessed)
926 *pu32FenceProcessed = context.u32FenceProcessed;
927
928 return context.u32FenceCompleted;
929}
930
931static DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
932{
933 PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
934
935 vboxCmdVbvaCheckCompleted(pDevExt, NULL, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/, NULL, NULL);
936}
937
938int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
939{
940 memset(pVbva, 0, sizeof (*pVbva));
941
942 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
943 (void**)&pVbva->Vbva.pVBVA,
944 offBuffer,
945 cbBuffer);
946 if (RT_SUCCESS(rc))
947 {
948 Assert(pVbva->Vbva.pVBVA);
949 VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
950 }
951 else
952 {
953 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
954 }
955
956 return rc;
957}
958
959void VBoxCmdVbvaSubmitUnlock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, VBOXCMDVBVA_HDR* pCmd, uint32_t u32FenceID)
960{
961 if (u32FenceID)
962 pVbva->u32FenceSubmitted = u32FenceID;
963 else
964 WARN(("no cmd fence specified"));
965
966 pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
967
968 pCmd->u2.u32FenceID = u32FenceID;
969
970 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
971
972 if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
973 {
974 /* Issue the submit command. */
975 HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
976 VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
977 sizeof (VBVACMDVBVASUBMIT),
978 HGSMI_CH_VBVA,
979 VBVA_CMDVBVA_SUBMIT);
980 if (!pSubmit)
981 {
982 WARN(("VBoxHGSMIBufferAlloc failed\n"));
983 return;
984 }
985
986 pSubmit->u32Reserved = 0;
987
988 VBoxHGSMIBufferSubmit(pCtx, pSubmit);
989
990 VBoxHGSMIBufferFree(pCtx, pSubmit);
991 }
992}
993
994VBOXCMDVBVA_HDR* VBoxCmdVbvaSubmitLock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t cbCmd)
995{
996 if (VBoxVBVAExGetSize(&pVbva->Vbva) < cbCmd)
997 {
998 WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
999 return NULL;
1000 }
1001
1002 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
1003 {
1004 WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
1005 return NULL;
1006 }
1007
1008 void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1009 if (!pvBuffer)
1010 {
1011 LOG(("failed to allocate contiguous buffer %d bytes, trying nopping the tail", cbCmd));
1012 uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
1013 if (!cbTail)
1014 {
1015 WARN(("this is not a free tail case, cbTail is NULL"));
1016 return NULL;
1017 }
1018
1019 Assert(cbTail < cbCmd);
1020
1021 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
1022
1023 Assert(pvBuffer);
1024
1025 *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
1026
1027 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
1028
1029 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
1030 {
1031 WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
1032 return NULL;
1033 }
1034
1035 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1036 if (!pvBuffer)
1037 {
1038 WARN(("failed to allocate contiguous buffer %d bytes", cbCmd));
1039 return NULL;
1040 }
1041 }
1042
1043 Assert(pvBuffer);
1044
1045 return (VBOXCMDVBVA_HDR*)pvBuffer;
1046}
1047
1048int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t u32FenceID, uint32_t cbCmd)
1049{
1050 VBOXCMDVBVA_HDR* pHdr = VBoxCmdVbvaSubmitLock(pDevExt, pVbva, cbCmd);
1051
1052 if (!pHdr)
1053 {
1054 WARN(("VBoxCmdVbvaSubmitLock failed"));
1055 return VERR_GENERAL_FAILURE;
1056 }
1057
1058 memcpy(pHdr, pCmd, cbCmd);
1059
1060 VBoxCmdVbvaSubmitUnlock(pDevExt, pVbva, pCmd, u32FenceID);
1061
1062 return VINF_SUCCESS;
1063}
1064
1065bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
1066{
1067 VBVAEXBUFFERBACKWARDITER Iter;
1068 VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
1069
1070 uint32_t cbBuffer;
1071 bool fProcessed;
1072 uint8_t* pu8Cmd;
1073 uint32_t u32SubmitFence = 0;
1074
1075 /* we can do it right here */
1076 while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
1077 {
1078 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1079 continue;
1080
1081 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1082
1083 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED)
1084 || pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED)
1085 continue;
1086
1087 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
1088
1089 u32SubmitFence = pCmd->u2.u32FenceID;
1090 break;
1091 }
1092
1093 vboxCmdVbvaDdiNotifyPreempt(pDevExt, pVbva, u32SubmitFence, u32FenceID);
1094
1095 return false;
1096}
1097
1098bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
1099{
1100 VBVAEXBUFFERFORWARDITER Iter;
1101 VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
1102
1103 bool fHasCommandsCompletedPreempted = false;
1104 bool fProcessed;
1105 uint8_t* pu8Cmd;
1106
1107
1108 while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
1109 {
1110 if (!fProcessed)
1111 break;
1112
1113 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1114 continue;
1115
1116 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1117 uint8_t u8State = pCmd->u8State;
1118 uint32_t u32FenceID = pCmd->u2.u32FenceID;
1119
1120 Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
1121 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
1122 Assert(u32FenceID);
1123 VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
1124
1125 if (!u32FenceID)
1126 {
1127 WARN(("fence is NULL"));
1128 continue;
1129 }
1130
1131 pVbva->u32FenceProcessed = u32FenceID;
1132
1133 if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
1134 pVbva->u32FenceCompleted = u32FenceID;
1135 else
1136 {
1137 Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
1138 continue;
1139 }
1140
1141 Assert(u32FenceID);
1142 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, DXGK_INTERRUPT_DMA_COMPLETED);
1143
1144 if (pVbva->cPreempt && pVbva->aPreempt[pVbva->iCurPreempt].u32SubmitFence == u32FenceID)
1145 {
1146 Assert(pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence);
1147 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence, DXGK_INTERRUPT_DMA_PREEMPTED);
1148 --pVbva->cPreempt;
1149 if (!pVbva->cPreempt)
1150 pVbva->iCurPreempt = 0;
1151 else
1152 {
1153 ++pVbva->iCurPreempt;
1154 pVbva->iCurPreempt %= VBOXCMDVBVA_PREEMPT_EL_SIZE;
1155 }
1156 }
1157
1158 fHasCommandsCompletedPreempted = true;
1159 }
1160
1161#ifdef DEBUG
1162 vboxHwBufferVerifyCompleted(&pVbva->Vbva);
1163#endif
1164
1165 return fHasCommandsCompletedPreempted;
1166}
1167
1168uint32_t VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
1169{
1170 return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */, pu32FenceSubmitted, pu32FenceProcessed);
1171}
1172
1173#if 0
1174static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
1175{
1176 PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
1177 uint32_t cbEl = sizeof (*pEl);
1178 uint32_t cStoredPages = 1;
1179 PFN_NUMBER next;
1180 pEl->iPage1 = (uint32_t)(cur & 0xfffff);
1181 pEl->iPage2 = (uint32_t)(cur >> 20);
1182 --cPages;
1183 for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
1184 {
1185 next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
1186 if (next != cur+1)
1187 break;
1188 }
1189
1190 Assert(cStoredPages);
1191 pEl->cPagesAfterFirst = cStoredPages - 1;
1192
1193 return cPages;
1194}
1195
1196uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1197{
1198 uint32_t cInitPages = cPages;
1199 uint32_t cbInitBuffer = cbBuffer;
1200 uint32_t cEls = 0;
1201 VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
1202
1203 Assert(cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1204
1205 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1206
1207 for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
1208 {
1209 cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
1210 }
1211
1212 *pcPagesWritten = cInitPages - cPages;
1213 return cbInitBuffer - cbBuffer;
1214}
1215#endif
1216
1217uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1218{
1219 uint32_t cbInitBuffer = cbBuffer;
1220 uint32_t i = 0;
1221 VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers;
1222
1223 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1224
1225 for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers))
1226 {
1227 pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]);
1228 }
1229
1230 *pcPagesWritten = i;
1231 Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i]));
1232 Assert(cbInitBuffer - cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1233 return cbInitBuffer - cbBuffer;
1234}
1235
1236
1237int vboxCmdVbvaConConnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
1238 uint32_t crVersionMajor, uint32_t crVersionMinor,
1239 uint32_t *pu32ClientID)
1240{
1241 VBOXCMDVBVA_CTL_3DCTL_CONNECT *pConnect = (VBOXCMDVBVA_CTL_3DCTL_CONNECT*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CONNECT));
1242 if (!pConnect)
1243 {
1244 WARN(("vboxCmdVbvaCtlCreate failed"));
1245 return VERR_OUT_OF_RESOURCES;
1246 }
1247 pConnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1248 pConnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1249 pConnect->Connect.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CONNECT;
1250 pConnect->Connect.Hdr.u32CmdClientId = 0;
1251 pConnect->Connect.u32MajorVersion = crVersionMajor;
1252 pConnect->Connect.u32MinorVersion = crVersionMinor;
1253 pConnect->Connect.u64Pid = (uint64_t)PsGetCurrentProcessId();
1254
1255 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pConnect->Hdr);
1256 if (RT_SUCCESS(rc))
1257 {
1258 rc = pConnect->Hdr.i32Result;
1259 if (RT_SUCCESS(rc))
1260 {
1261 Assert(pConnect->Connect.Hdr.u32CmdClientId);
1262 *pu32ClientID = pConnect->Connect.Hdr.u32CmdClientId;
1263 }
1264 else
1265 WARN(("VBOXCMDVBVA3DCTL_TYPE_CONNECT Disable failed %d", rc));
1266 }
1267 else
1268 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1269
1270 vboxCmdVbvaCtlFree(pHGSMICtx, &pConnect->Hdr);
1271
1272 return rc;
1273}
1274
1275int vboxCmdVbvaConDisconnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t u32ClientID)
1276{
1277 VBOXCMDVBVA_CTL_3DCTL *pDisconnect = (VBOXCMDVBVA_CTL_3DCTL*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL));
1278 if (!pDisconnect)
1279 {
1280 WARN(("vboxCmdVbvaCtlCreate failed"));
1281 return VERR_OUT_OF_RESOURCES;
1282 }
1283 pDisconnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1284 pDisconnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1285 pDisconnect->Ctl.u32Type = VBOXCMDVBVA3DCTL_TYPE_DISCONNECT;
1286 pDisconnect->Ctl.u32CmdClientId = u32ClientID;
1287
1288 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pDisconnect->Hdr);
1289 if (RT_SUCCESS(rc))
1290 {
1291 rc = pDisconnect->Hdr.i32Result;
1292 if (!RT_SUCCESS(rc))
1293 WARN(("VBOXCMDVBVA3DCTL_TYPE_DISCONNECT Disable failed %d", rc));
1294 }
1295 else
1296 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1297
1298 vboxCmdVbvaCtlFree(pHGSMICtx, &pDisconnect->Hdr);
1299
1300 return rc;
1301}
1302
1303int VBoxCmdVbvaConConnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva,
1304 uint32_t crVersionMajor, uint32_t crVersionMinor,
1305 uint32_t *pu32ClientID)
1306{
1307 return vboxCmdVbvaConConnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, crVersionMajor, crVersionMinor, pu32ClientID);
1308}
1309
1310int VBoxCmdVbvaConDisconnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32ClientID)
1311{
1312 return vboxCmdVbvaConDisconnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, u32ClientID);
1313}
1314
1315VBOXCMDVBVA_CRCMD_CMD* vboxCmdVbvaConCmdAlloc(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCmd)
1316{
1317 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CMD) + cbCmd);
1318 if (!pCmd)
1319 {
1320 WARN(("vboxCmdVbvaCtlCreate failed"));
1321 return NULL;
1322 }
1323 pCmd->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1324 pCmd->Hdr.i32Result = VERR_NOT_SUPPORTED;
1325 pCmd->Cmd.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CMD;
1326 pCmd->Cmd.Hdr.u32CmdClientId = 0;
1327 pCmd->Cmd.Cmd.u8OpCode = VBOXCMDVBVA_OPTYPE_CRCMD;
1328 pCmd->Cmd.Cmd.u8Flags = 0;
1329 pCmd->Cmd.Cmd.u8State = VBOXCMDVBVA_STATE_SUBMITTED;
1330 pCmd->Cmd.Cmd.u.i8Result = -1;
1331 pCmd->Cmd.Cmd.u2.u32FenceID = 0;
1332
1333 return (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1334}
1335
1336void vboxCmdVbvaConCmdFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1337{
1338 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1339 vboxCmdVbvaCtlFree(pHGSMICtx, &pHdr->Hdr);
1340}
1341
1342int vboxCmdVbvaConSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1343{
1344 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1345 return vboxCmdVbvaCtlSubmitAsync(pHGSMICtx, &pHdr->Hdr, pfnCompletion, pvCompletion);
1346}
1347
1348VBOXCMDVBVA_CRCMD_CMD* VBoxCmdVbvaConCmdAlloc(PVBOXMP_DEVEXT pDevExt, uint32_t cbCmd)
1349{
1350 return vboxCmdVbvaConCmdAlloc(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1351}
1352
1353void VBoxCmdVbvaConCmdFree(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1354{
1355 vboxCmdVbvaConCmdFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd);
1356}
1357
1358int VBoxCmdVbvaConCmdSubmitAsync(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1359{
1360 return vboxCmdVbvaConSubmitAsync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd, pfnCompletion, pvCompletion);
1361}
1362
1363int VBoxCmdVbvaConCmdCompletionData(void *pvCmd, VBOXCMDVBVA_CRCMD_CMD **ppCmd)
1364{
1365 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)pvCmd;
1366 if (ppCmd)
1367 *ppCmd = (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1368 return pCmd->Hdr.i32Result;
1369}
1370
1371int VBoxCmdVbvaConCmdResize(PVBOXMP_DEVEXT pDevExt, const VBOXWDDM_ALLOC_DATA *pAllocData, const uint32_t *pTargetMap, const POINT * pVScreenPos, uint16_t fFlags)
1372{
1373 Assert(KeGetCurrentIrql() < DISPATCH_LEVEL);
1374
1375 VBOXCMDVBVA_CTL_RESIZE *pResize = (VBOXCMDVBVA_CTL_RESIZE*)vboxCmdVbvaCtlCreate(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, sizeof (VBOXCMDVBVA_CTL_RESIZE));
1376 if (!pResize)
1377 {
1378 WARN(("vboxCmdVbvaCtlCreate failed"));
1379 return VERR_OUT_OF_RESOURCES;
1380 }
1381
1382 pResize->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_RESIZE;
1383 pResize->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
1384
1385 int rc = vboxWddmScreenInfoInit(&pResize->Resize.aEntries[0].Screen, pAllocData, pVScreenPos, fFlags);
1386 if (RT_SUCCESS(rc))
1387 {
1388 memcpy(&pResize->Resize.aEntries[0].aTargetMap, pTargetMap, sizeof (pResize->Resize.aEntries[0].aTargetMap));
1389 rc = vboxCmdVbvaCtlSubmitSync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &pResize->Hdr);
1390 if (RT_SUCCESS(rc))
1391 {
1392 rc = pResize->Hdr.i32Result;
1393 if (RT_FAILURE(rc))
1394 WARN(("VBOXCMDVBVACTL_TYPE_RESIZE failed %d", rc));
1395 }
1396 else
1397 WARN(("vboxCmdVbvaCtlSubmitSync failed %d", rc));
1398 }
1399 else
1400 WARN(("vboxWddmScreenInfoInit failed %d", rc));
1401
1402 vboxCmdVbvaCtlFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &pResize->Hdr);
1403
1404 return rc;
1405}
1406#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette