VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp@ 69351

Last change on this file since 69351 was 69351, checked in by vboxsync, 7 years ago

WINNT/Graphics: scm cleanups

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 46.0 KB
Line 
1/* $Id: VBoxMPVbva.cpp 69351 2017-10-26 14:22:39Z vboxsync $ */
2/** @file
3 * VBox WDDM Miniport driver
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#include "VBoxMPWddm.h"
19#include "common/VBoxMPCommon.h"
20
21/*
22 * Public hardware buffer methods.
23 */
24int vboxVbvaEnable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
25{
26 if (VBoxVBVAEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx,
27 pVbva->Vbva.pVBVA, pVbva->srcId))
28 return VINF_SUCCESS;
29
30 WARN(("VBoxVBVAEnable failed!"));
31 return VERR_GENERAL_FAILURE;
32}
33
34int vboxVbvaDisable (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
35{
36 VBoxVBVADisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->srcId);
37 return VINF_SUCCESS;
38}
39
40int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
41{
42 memset(pVbva, 0, sizeof(VBOXVBVAINFO));
43
44 KeInitializeSpinLock(&pVbva->Lock);
45
46 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
47 (void**)&pVbva->Vbva.pVBVA,
48 offBuffer,
49 cbBuffer);
50 if (RT_SUCCESS(rc))
51 {
52 Assert(pVbva->Vbva.pVBVA);
53 VBoxVBVASetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer);
54 pVbva->srcId = srcId;
55 }
56 else
57 {
58 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
59 }
60
61
62 return rc;
63}
64
65int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva)
66{
67 int rc = VINF_SUCCESS;
68 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
69 memset(pVbva, 0, sizeof (VBOXVBVAINFO));
70 return rc;
71}
72
73int vboxVbvaReportDirtyRect (PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSrc, RECT *pRectOrig)
74{
75 VBVACMDHDR hdr;
76
77 RECT rect = *pRectOrig;
78
79// if (rect.left < 0) rect.left = 0;
80// if (rect.top < 0) rect.top = 0;
81// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
82// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
83
84 hdr.x = (int16_t)rect.left;
85 hdr.y = (int16_t)rect.top;
86 hdr.w = (uint16_t)(rect.right - rect.left);
87 hdr.h = (uint16_t)(rect.bottom - rect.top);
88
89 hdr.x += (int16_t)pSrc->VScreenPos.x;
90 hdr.y += (int16_t)pSrc->VScreenPos.y;
91
92 if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
93 return VINF_SUCCESS;
94
95 WARN(("VBoxVBVAWrite failed"));
96 return VERR_GENERAL_FAILURE;
97}
98
99#ifdef VBOX_WITH_CROGL
100/* command vbva ring buffer */
101
102/* customized VBVA implementation */
103
104/* Forward declarations of internal functions. */
105static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
106 uint32_t cb, uint32_t offset);
107static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
108 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
109 const void *p, uint32_t cb);
110
111DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
112{
113 pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
114}
115
116static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
117{
118 VBVO_PORT_WRITE_U32(pHGSMICtx->port, offDr);
119 /* Make the compiler aware that the host has changed memory. */
120 ASMCompilerBarrier();
121 return VINF_SUCCESS;
122}
123#define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
124
125static VBOXCMDVBVA_CTL * vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
126{
127 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
128 return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
129}
130
131static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
132{
133 VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
134}
135
136static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl)
137{
138 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
139 if (!pHdr)
140 {
141 WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
142 return VERR_INVALID_PARAMETER;
143 }
144
145 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
146 if (offCmd == HGSMIOFFSET_VOID)
147 {
148 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
149 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
150 return VERR_INVALID_PARAMETER;
151 }
152
153 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
154 if (RT_SUCCESS(rc))
155 {
156 rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
157 if (RT_SUCCESS(rc))
158 {
159 rc = pCtl->i32Result;
160 if (!RT_SUCCESS(rc))
161 WARN(("pCtl->i32Result %d", pCtl->i32Result));
162
163 return rc;
164 }
165 else
166 WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
167 }
168 else
169 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
170
171 VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
172
173 return rc;
174}
175
176static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
177{
178 const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynch(&pHGSMICtx->heapCtx, pCtl, pfnCompletion, pvCompletion, VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ);
179 HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
180 if (offCmd == HGSMIOFFSET_VOID)
181 {
182 WARN(("VBoxSHGSMICommandOffset returnd NULL"));
183 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
184 return VERR_INVALID_PARAMETER;
185 }
186
187 int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
188 if (RT_SUCCESS(rc))
189 {
190 VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
191 return rc;
192 }
193 else
194 WARN(("vboxCmdVbvaSubmit returnd %d", rc));
195
196 VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
197
198 return rc;
199}
200
201static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
202{
203 VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
204 if (!pCtl)
205 {
206 WARN(("vboxCmdVbvaCtlCreate failed"));
207 return VERR_NO_MEMORY;
208 }
209
210 pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
211 pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
212 memset(&pCtl->Enable, 0, sizeof (pCtl->Enable));
213 pCtl->Enable.u32Flags = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
214 pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
215 pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
216 pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
217
218 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
219 if (RT_SUCCESS(rc))
220 {
221 rc = pCtl->Hdr.i32Result;
222 if (!RT_SUCCESS(rc))
223 WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
224 }
225 else
226 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
227
228 vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
229
230 return rc;
231}
232
233/*
234 * Public hardware buffer methods.
235 */
236VBVAEX_DECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBVABUFFER *pVBVA)
237{
238 int rc = VERR_GENERAL_FAILURE;
239
240 LogFlowFunc(("pVBVA %p\n", pVBVA));
241
242#if 0 /* All callers check this */
243 if (ppdev->bHGSMISupported)
244#endif
245 {
246 LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
247
248 pVBVA->hostFlags.u32HostEvents = 0;
249 pVBVA->hostFlags.u32SupportedOrders = 0;
250 pVBVA->off32Data = 0;
251 pVBVA->off32Free = 0;
252 memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
253 pVBVA->indexRecordFirst = 0;
254 pVBVA->indexRecordFree = 0;
255 pVBVA->cbPartialWriteThreshold = 256;
256 pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
257
258 pCtx->fHwBufferOverflow = false;
259 pCtx->pRecord = NULL;
260 pCtx->pVBVA = pVBVA;
261
262 rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
263 }
264
265 if (!RT_SUCCESS(rc))
266 {
267 WARN(("enable failed %d", rc));
268 VBoxVBVAExDisable(pCtx, pHGSMICtx);
269 }
270
271 return rc;
272}
273
274VBVAEX_DECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
275{
276 LogFlowFunc(("\n"));
277
278 vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
279
280 pCtx->fHwBufferOverflow = false;
281 pCtx->pRecord = NULL;
282 pCtx->pVBVA = NULL;
283
284 return;
285}
286
287VBVAEX_DECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
288{
289 bool bRc = false;
290
291 // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
292
293 Assert(pCtx->pVBVA);
294 /* we do not use u32HostEvents & VBVA_F_MODE_ENABLED,
295 * VBVA stays enabled once ENABLE call succeeds, until it is disabled with DISABLED call */
296// if ( pCtx->pVBVA
297// && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
298 {
299 uint32_t indexRecordNext;
300
301 Assert(!pCtx->fHwBufferOverflow);
302 Assert(pCtx->pRecord == NULL);
303
304 indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
305
306 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
307 {
308 /* All slots in the records queue are used. */
309 vboxVBVAExFlush (pCtx, pHGSMICtx);
310 }
311
312 if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
313 {
314 /* Even after flush there is no place. Fail the request. */
315 LogFunc(("no space in the queue of records!!! first %d, last %d\n",
316 indexRecordNext, pCtx->pVBVA->indexRecordFree));
317 }
318 else
319 {
320 /* Initialize the record. */
321 VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
322
323 pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
324
325 pCtx->pVBVA->indexRecordFree = indexRecordNext;
326
327 // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
328
329 /* Remember which record we are using. */
330 pCtx->pRecord = pRecord;
331
332 bRc = true;
333 }
334 }
335
336 return bRc;
337}
338
339VBVAEX_DECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
340{
341 VBVARECORD *pRecord;
342
343 // LogFunc(("\n"));
344
345 Assert(pCtx->pVBVA);
346
347 pRecord = pCtx->pRecord;
348 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
349
350 /* Mark the record completed. */
351 pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
352
353 pCtx->fHwBufferOverflow = false;
354 pCtx->pRecord = NULL;
355
356 return;
357}
358
359DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
360{
361 return ( u32First != u32Free
362 && (
363 (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
364 || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
365 )
366 );
367}
368
369DECLINLINE(bool) vboxVBVAExIsEntryInRangeOrEmpty(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
370{
371 return vboxVBVAExIsEntryInRange(u32First, u32Entry, u32Free)
372 || ( u32First == u32Entry
373 && u32Entry == u32Free);
374}
375#ifdef DEBUG
376
377DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
378{
379 VBVABUFFER *pVBVA = pCtx->pVBVA;
380 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
381 {
382 WARN(("invalid record set"));
383 }
384
385 if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
386 {
387 WARN(("invalid data set"));
388 }
389}
390#endif
391
392/*
393 * Private operations.
394 */
395static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
396{
397 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
398
399 return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
400}
401
402static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
403{
404 int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
405
406 return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
407}
408
409static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
410 uint32_t cb, uint32_t offset)
411{
412 VBVABUFFER *pVBVA = pCtx->pVBVA;
413 uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
414 uint8_t *dst = &pVBVA->au8Data[offset];
415 int32_t i32Diff = cb - u32BytesTillBoundary;
416
417 if (i32Diff <= 0)
418 {
419 /* Chunk will not cross buffer boundary. */
420 memcpy (dst, p, cb);
421 }
422 else
423 {
424 /* Chunk crosses buffer boundary. */
425 memcpy (dst, p, u32BytesTillBoundary);
426 memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
427 }
428
429 return;
430}
431
432static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
433 PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
434 const void *p, uint32_t cb)
435{
436 VBVARECORD *pRecord;
437 uint32_t cbHwBufferAvail;
438
439 uint32_t cbWritten = 0;
440
441 VBVABUFFER *pVBVA = pCtx->pVBVA;
442 Assert(pVBVA);
443
444 if (!pVBVA || pCtx->fHwBufferOverflow)
445 {
446 return false;
447 }
448
449 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
450 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
451
452 pRecord = pCtx->pRecord;
453 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
454
455 // LogFunc(("%d\n", cb));
456
457 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
458
459 while (cb > 0)
460 {
461 uint32_t cbChunk = cb;
462
463 // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
464 // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
465
466 if (cbChunk >= cbHwBufferAvail)
467 {
468 LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
469
470 vboxVBVAExFlush(pCtx, pHGSMICtx);
471
472 cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
473
474 if (cbChunk >= cbHwBufferAvail)
475 {
476 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
477 cb, cbHwBufferAvail));
478
479 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
480 {
481 WARN(("Buffer overflow!!!\n"));
482 pCtx->fHwBufferOverflow = true;
483 Assert(false);
484 return false;
485 }
486
487 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
488 }
489 }
490
491 Assert(cbChunk <= cb);
492 Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
493
494 vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
495
496 pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
497 pRecord->cbRecord += cbChunk;
498 cbHwBufferAvail -= cbChunk;
499
500 cb -= cbChunk;
501 cbWritten += cbChunk;
502 }
503
504 return true;
505}
506
507/*
508 * Public writer to the hardware buffer.
509 */
510VBVAEX_DECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
511{
512 VBVABUFFER *pVBVA = pCtx->pVBVA;
513 if (pVBVA->off32Data <= pVBVA->off32Free)
514 return pVBVA->cbData - pVBVA->off32Free;
515 return 0;
516}
517
518VBVAEX_DECL(void *) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
519{
520 VBVARECORD *pRecord;
521 uint32_t cbHwBufferContiguousAvail;
522 uint32_t offset;
523
524 VBVABUFFER *pVBVA = pCtx->pVBVA;
525 Assert(pVBVA);
526
527 if (!pVBVA || pCtx->fHwBufferOverflow)
528 {
529 return NULL;
530 }
531
532 Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
533 Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
534
535 pRecord = pCtx->pRecord;
536 Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
537
538 // LogFunc(("%d\n", cb));
539
540 if (pVBVA->cbData < cb)
541 {
542 WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
543 return NULL;
544 }
545
546 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
547
548 if (cbHwBufferContiguousAvail < cb)
549 {
550 if (cb > pVBVA->cbData - pVBVA->off32Free)
551 {
552 /* the entire contiguous part is smaller than the requested buffer */
553 return NULL;
554 }
555
556 vboxVBVAExFlush(pCtx, pHGSMICtx);
557
558 cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
559 if (cbHwBufferContiguousAvail < cb)
560 {
561 /* this is really bad - the host did not clean up buffer even after we requested it to flush */
562 WARN(("Host did not clean up the buffer!"));
563 return NULL;
564 }
565 }
566
567 offset = pVBVA->off32Free;
568
569 pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
570 pRecord->cbRecord += cb;
571
572 return &pVBVA->au8Data[offset];
573}
574
575VBVAEX_DECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
576{
577 uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
578 return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
579}
580
581VBVAEX_DECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
582{
583 VBVABUFFER *pVBVA = pCtx->pVBVA;
584 uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
585 pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
586 pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
587}
588
589VBVAEX_DECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, const void *pv, uint32_t cb)
590{
591 return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
592}
593
594VBVAEX_DECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
595{
596 VBVABUFFER *pVBVA = pCtx->pVBVA;
597
598 if (!pVBVA)
599 {
600 return false;
601 }
602
603 if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
604 {
605 return true;
606 }
607
608 return false;
609}
610
611VBVAEX_DECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx, uint32_t offVRAMBuffer, uint32_t cbBuffer,
612 PFNVBVAEXBUFFERFLUSH pfnFlush, void *pvFlush)
613{
614 memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
615 pCtx->offVRAMBuffer = offVRAMBuffer;
616 pCtx->cbBuffer = cbBuffer;
617 pCtx->pfnFlush = pfnFlush;
618 pCtx->pvFlush = pvFlush;
619}
620
621static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
622{
623 uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
624 if (cbRecord == VBVA_F_RECORD_PARTIAL)
625 return NULL;
626 if (pcbBuffer)
627 *pcbBuffer = cbRecord;
628 if (pfProcessed)
629 *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
630 return &pVBVA->au8Data[pIter->off32CurCmd];
631}
632
633DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
634{
635 int32_t result = (int32_t)(x - val);
636 return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
637}
638
639VBVAEX_DECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
640{
641 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
642 pIter->Base.pCtx = pCtx;
643 uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
644 if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
645 {
646 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
647 * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
648 * and we are in a submitter context now */
649 pIter->Base.iCurRecord = iCurRecord;
650 pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
651 }
652 else
653 {
654 /* no data */
655 pIter->Base.iCurRecord = pVBVA->indexRecordFree;
656 pIter->Base.off32CurCmd = pVBVA->off32Free;
657 }
658}
659
660VBVAEX_DECL(void *) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
661{
662 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
663 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
664 uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
665 if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
666 return NULL;
667
668 void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
669 AssertRelease(pvBuffer);
670
671 /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
672 * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
673 * and we are in a submitter context now */
674 pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
675 pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
676
677 return pvBuffer;
678}
679
680VBVAEX_DECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
681{
682 pIter->Base.pCtx = pCtx;
683 pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
684 pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
685}
686
687VBVAEX_DECL(void *) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
688{
689 PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
690 struct VBVABUFFER *pVBVA = pCtx->pVBVA;
691 uint32_t indexRecordFree = pVBVA->indexRecordFree;
692 if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
693 return NULL;
694
695 uint32_t cbBuffer;
696 void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
697 if (!pvData)
698 return NULL;
699
700 pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
701 pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
702
703 if (pcbBuffer)
704 *pcbBuffer = cbBuffer;
705
706 return pvData;
707}
708
709/**/
710
711int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
712{
713 return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
714}
715
716int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
717{
718 VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
719 return VINF_SUCCESS;
720}
721
722int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
723{
724 int rc = VINF_SUCCESS;
725 VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
726 memset(pVbva, 0, sizeof (*pVbva));
727 return rc;
728}
729
730static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
731{
732 DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
733 memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
734 switch (enmComplType)
735 {
736 case DXGK_INTERRUPT_DMA_COMPLETED:
737 notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
738 notify.DmaCompleted.SubmissionFenceId = u32FenceId;
739 notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
740 break;
741
742 case DXGK_INTERRUPT_DMA_PREEMPTED:
743 notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
744 notify.DmaPreempted.PreemptionFenceId = u32FenceId;
745 notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
746 notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
747 break;
748
749 case DXGK_INTERRUPT_DMA_FAULTED:
750 Assert(0);
751 notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
752 notify.DmaFaulted.FaultedFenceId = u32FenceId;
753 notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /** @todo better status ? */
754 notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
755 break;
756
757 default:
758 WARN(("unrecognized completion type %d", enmComplType));
759 break;
760 }
761
762 pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
763}
764
765typedef struct VBOXCMDVBVA_NOTIFYPREEMPT_CB
766{
767 PVBOXMP_DEVEXT pDevExt;
768 VBOXCMDVBVA *pVbva;
769 int rc;
770 UINT u32SubmitFenceId;
771 UINT u32PreemptFenceId;
772} VBOXCMDVBVA_NOTIFYPREEMPT_CB;
773
774static BOOLEAN vboxCmdVbvaDdiNotifyPreemptCb(PVOID pvContext)
775{
776 VBOXCMDVBVA_NOTIFYPREEMPT_CB* pData = (VBOXCMDVBVA_NOTIFYPREEMPT_CB*)pvContext;
777 PVBOXMP_DEVEXT pDevExt = pData->pDevExt;
778 VBOXCMDVBVA *pVbva = pData->pVbva;
779 Assert(pVbva->u32FenceProcessed >= pVbva->u32FenceCompleted);
780 if (!pData->u32SubmitFenceId || pVbva->u32FenceProcessed == pData->u32SubmitFenceId)
781 {
782 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pData->u32PreemptFenceId, DXGK_INTERRUPT_DMA_PREEMPTED);
783
784 pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pDevExt->u.primary.DxgkInterface.DeviceHandle);
785 }
786 else
787 {
788 Assert(pVbva->u32FenceProcessed < pData->u32SubmitFenceId);
789 Assert(pVbva->cPreempt <= VBOXCMDVBVA_PREEMPT_EL_SIZE);
790 if (pVbva->cPreempt == VBOXCMDVBVA_PREEMPT_EL_SIZE)
791 {
792 WARN(("no more free elements in preempt map"));
793 pData->rc = VERR_BUFFER_OVERFLOW;
794 return FALSE;
795 }
796 uint32_t iNewEl = (pVbva->iCurPreempt + pVbva->cPreempt) % VBOXCMDVBVA_PREEMPT_EL_SIZE;
797 Assert(iNewEl < VBOXCMDVBVA_PREEMPT_EL_SIZE);
798 pVbva->aPreempt[iNewEl].u32SubmitFence = pData->u32SubmitFenceId;
799 pVbva->aPreempt[iNewEl].u32PreemptFence = pData->u32PreemptFenceId;
800 ++pVbva->cPreempt;
801 }
802
803 pData->rc = VINF_SUCCESS;
804 return TRUE;
805}
806
807static int vboxCmdVbvaDdiNotifyPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32SubmitFenceId, UINT u32PreemptFenceId)
808{
809 VBOXCMDVBVA_NOTIFYPREEMPT_CB Data;
810 Data.pDevExt = pDevExt;
811 Data.pVbva = pVbva;
812 Data.rc = VERR_INTERNAL_ERROR;
813 Data.u32SubmitFenceId = u32SubmitFenceId;
814 Data.u32PreemptFenceId = u32PreemptFenceId;
815 BOOLEAN bDummy;
816 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
817 pDevExt->u.primary.DxgkInterface.DeviceHandle,
818 vboxCmdVbvaDdiNotifyPreemptCb,
819 &Data,
820 0, /* IN ULONG MessageNumber */
821 &bDummy);
822 if (!NT_SUCCESS(Status))
823 {
824 WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
825 return VERR_GENERAL_FAILURE;
826 }
827
828 if (!RT_SUCCESS(Data.rc))
829 {
830 WARN(("vboxCmdVbvaDdiNotifyPreemptCb failed rc %d", Data.rc));
831 return Data.rc;
832 }
833
834 return VINF_SUCCESS;
835}
836
837static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
838{
839 RT_NOREF(pDevExt);
840
841 /* Issue the flush command. */
842 VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
843 sizeof(VBVACMDVBVAFLUSH),
844 HGSMI_CH_VBVA,
845 VBVA_CMDVBVA_FLUSH);
846 if (!pFlush)
847 {
848 WARN(("VBoxHGSMIBufferAlloc failed\n"));
849 return VERR_OUT_OF_RESOURCES;
850 }
851
852 pFlush->u32Flags = fBufferOverflow ? VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
853
854 VBoxHGSMIBufferSubmit(pCtx, pFlush);
855
856 VBoxHGSMIBufferFree(pCtx, pFlush);
857
858 return VINF_SUCCESS;
859}
860
861typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
862{
863 PVBOXMP_DEVEXT pDevExt;
864 VBOXCMDVBVA *pVbva;
865 /* last completted fence id */
866 uint32_t u32FenceCompleted;
867 /* last submitted fence id */
868 uint32_t u32FenceSubmitted;
869 /* last processed fence id (i.e. either completed or cancelled) */
870 uint32_t u32FenceProcessed;
871} VBOXCMDVBVA_CHECK_COMPLETED_CB;
872
873static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
874{
875 VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
876 BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
877 if (pCompleted->pVbva)
878 {
879 pCompleted->u32FenceCompleted = pCompleted->pVbva->u32FenceCompleted;
880 pCompleted->u32FenceSubmitted = pCompleted->pVbva->u32FenceSubmitted;
881 pCompleted->u32FenceProcessed = pCompleted->pVbva->u32FenceProcessed;
882 }
883 else
884 {
885 WARN(("no vbva"));
886 pCompleted->u32FenceCompleted = 0;
887 pCompleted->u32FenceSubmitted = 0;
888 pCompleted->u32FenceProcessed = 0;
889 }
890 return bRc;
891}
892
893
894static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
895{
896 if (fPingHost)
897 vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
898
899 VBOXCMDVBVA_CHECK_COMPLETED_CB context;
900 context.pDevExt = pDevExt;
901 context.pVbva = pVbva;
902 context.u32FenceCompleted = 0;
903 context.u32FenceSubmitted = 0;
904 context.u32FenceProcessed = 0;
905 BOOLEAN bRet;
906 NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
907 pDevExt->u.primary.DxgkInterface.DeviceHandle,
908 vboxCmdVbvaCheckCompletedIrqCb,
909 &context,
910 0, /* IN ULONG MessageNumber */
911 &bRet);
912 AssertNtStatusSuccess(Status);
913
914 if (pu32FenceSubmitted)
915 *pu32FenceSubmitted = context.u32FenceSubmitted;
916
917 if (pu32FenceProcessed)
918 *pu32FenceProcessed = context.u32FenceProcessed;
919
920 return context.u32FenceCompleted;
921}
922
923static DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
924{
925 NOREF(pCtx);
926 PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
927
928 vboxCmdVbvaCheckCompleted(pDevExt, NULL, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/, NULL, NULL);
929}
930
931int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
932{
933 memset(pVbva, 0, sizeof (*pVbva));
934
935 int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
936 (void**)&pVbva->Vbva.pVBVA,
937 offBuffer,
938 cbBuffer);
939 if (RT_SUCCESS(rc))
940 {
941 Assert(pVbva->Vbva.pVBVA);
942 VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
943 }
944 else
945 {
946 WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
947 }
948
949 return rc;
950}
951
952void VBoxCmdVbvaSubmitUnlock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, VBOXCMDVBVA_HDR* pCmd, uint32_t u32FenceID)
953{
954 if (u32FenceID)
955 pVbva->u32FenceSubmitted = u32FenceID;
956 else
957 WARN(("no cmd fence specified"));
958
959 pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
960
961 pCmd->u2.u32FenceID = u32FenceID;
962
963 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
964
965 if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
966 {
967 /* Issue the submit command. */
968 HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
969 VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
970 sizeof (VBVACMDVBVASUBMIT),
971 HGSMI_CH_VBVA,
972 VBVA_CMDVBVA_SUBMIT);
973 if (!pSubmit)
974 {
975 WARN(("VBoxHGSMIBufferAlloc failed\n"));
976 return;
977 }
978
979 pSubmit->u32Reserved = 0;
980
981 VBoxHGSMIBufferSubmit(pCtx, pSubmit);
982
983 VBoxHGSMIBufferFree(pCtx, pSubmit);
984 }
985}
986
987VBOXCMDVBVA_HDR* VBoxCmdVbvaSubmitLock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t cbCmd)
988{
989 if (VBoxVBVAExGetSize(&pVbva->Vbva) < cbCmd)
990 {
991 WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
992 return NULL;
993 }
994
995 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
996 {
997 WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
998 return NULL;
999 }
1000
1001 void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1002 if (!pvBuffer)
1003 {
1004 LOG(("failed to allocate contiguous buffer %d bytes, trying nopping the tail", cbCmd));
1005 uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
1006 if (!cbTail)
1007 {
1008 WARN(("this is not a free tail case, cbTail is NULL"));
1009 return NULL;
1010 }
1011
1012 Assert(cbTail < cbCmd);
1013
1014 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
1015
1016 Assert(pvBuffer);
1017
1018 *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
1019
1020 VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
1021
1022 if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
1023 {
1024 WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
1025 return NULL;
1026 }
1027
1028 pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1029 if (!pvBuffer)
1030 {
1031 WARN(("failed to allocate contiguous buffer %d bytes", cbCmd));
1032 return NULL;
1033 }
1034 }
1035
1036 Assert(pvBuffer);
1037
1038 return (VBOXCMDVBVA_HDR*)pvBuffer;
1039}
1040
1041int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t u32FenceID, uint32_t cbCmd)
1042{
1043 VBOXCMDVBVA_HDR* pHdr = VBoxCmdVbvaSubmitLock(pDevExt, pVbva, cbCmd);
1044
1045 if (!pHdr)
1046 {
1047 WARN(("VBoxCmdVbvaSubmitLock failed"));
1048 return VERR_GENERAL_FAILURE;
1049 }
1050
1051 memcpy(pHdr, pCmd, cbCmd);
1052
1053 VBoxCmdVbvaSubmitUnlock(pDevExt, pVbva, pCmd, u32FenceID);
1054
1055 return VINF_SUCCESS;
1056}
1057
1058bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
1059{
1060 VBVAEXBUFFERBACKWARDITER Iter;
1061 VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
1062
1063 uint32_t cbBuffer;
1064 bool fProcessed;
1065 uint8_t* pu8Cmd;
1066 uint32_t u32SubmitFence = 0;
1067
1068 /* we can do it right here */
1069 while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
1070 {
1071 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1072 continue;
1073
1074 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1075
1076 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED)
1077 || pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED)
1078 continue;
1079
1080 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
1081
1082 u32SubmitFence = pCmd->u2.u32FenceID;
1083 break;
1084 }
1085
1086 vboxCmdVbvaDdiNotifyPreempt(pDevExt, pVbva, u32SubmitFence, u32FenceID);
1087
1088 return false;
1089}
1090
1091bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
1092{
1093 VBVAEXBUFFERFORWARDITER Iter;
1094 VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
1095
1096 bool fHasCommandsCompletedPreempted = false;
1097 bool fProcessed;
1098 uint8_t* pu8Cmd;
1099
1100
1101 while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
1102 {
1103 if (!fProcessed)
1104 break;
1105
1106 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1107 continue;
1108
1109 VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
1110 uint8_t u8State = pCmd->u8State;
1111 uint32_t u32FenceID = pCmd->u2.u32FenceID;
1112
1113 Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
1114 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
1115 Assert(u32FenceID);
1116 VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
1117
1118 if (!u32FenceID)
1119 {
1120 WARN(("fence is NULL"));
1121 continue;
1122 }
1123
1124 pVbva->u32FenceProcessed = u32FenceID;
1125
1126 if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
1127 pVbva->u32FenceCompleted = u32FenceID;
1128 else
1129 {
1130 Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
1131 continue;
1132 }
1133
1134 Assert(u32FenceID);
1135 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, DXGK_INTERRUPT_DMA_COMPLETED);
1136
1137 if (pVbva->cPreempt && pVbva->aPreempt[pVbva->iCurPreempt].u32SubmitFence == u32FenceID)
1138 {
1139 Assert(pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence);
1140 vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence, DXGK_INTERRUPT_DMA_PREEMPTED);
1141 --pVbva->cPreempt;
1142 if (!pVbva->cPreempt)
1143 pVbva->iCurPreempt = 0;
1144 else
1145 {
1146 ++pVbva->iCurPreempt;
1147 pVbva->iCurPreempt %= VBOXCMDVBVA_PREEMPT_EL_SIZE;
1148 }
1149 }
1150
1151 fHasCommandsCompletedPreempted = true;
1152 }
1153
1154#ifdef DEBUG
1155 vboxHwBufferVerifyCompleted(&pVbva->Vbva);
1156#endif
1157
1158 return fHasCommandsCompletedPreempted;
1159}
1160
1161uint32_t VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
1162{
1163 return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */, pu32FenceSubmitted, pu32FenceProcessed);
1164}
1165
1166#if 0
1167static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
1168{
1169 PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
1170 uint32_t cbEl = sizeof (*pEl);
1171 uint32_t cStoredPages = 1;
1172 PFN_NUMBER next;
1173 pEl->iPage1 = (uint32_t)(cur & 0xfffff);
1174 pEl->iPage2 = (uint32_t)(cur >> 20);
1175 --cPages;
1176 for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
1177 {
1178 next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
1179 if (next != cur+1)
1180 break;
1181 }
1182
1183 Assert(cStoredPages);
1184 pEl->cPagesAfterFirst = cStoredPages - 1;
1185
1186 return cPages;
1187}
1188
1189uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1190{
1191 uint32_t cInitPages = cPages;
1192 uint32_t cbInitBuffer = cbBuffer;
1193 uint32_t cEls = 0;
1194 VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
1195
1196 Assert(cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1197
1198 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1199
1200 for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
1201 {
1202 cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
1203 }
1204
1205 *pcPagesWritten = cInitPages - cPages;
1206 return cbInitBuffer - cbBuffer;
1207}
1208#endif
1209
1210uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
1211{
1212 uint32_t cbInitBuffer = cbBuffer;
1213 uint32_t i = 0;
1214 VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers;
1215
1216 cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1217
1218 for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers))
1219 {
1220 pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]);
1221 }
1222
1223 *pcPagesWritten = i;
1224 Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i]));
1225 Assert(cbInitBuffer - cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
1226 return cbInitBuffer - cbBuffer;
1227}
1228
1229
1230int vboxCmdVbvaConConnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
1231 uint32_t crVersionMajor, uint32_t crVersionMinor,
1232 uint32_t *pu32ClientID)
1233{
1234 VBOXCMDVBVA_CTL_3DCTL_CONNECT *pConnect = (VBOXCMDVBVA_CTL_3DCTL_CONNECT*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CONNECT));
1235 if (!pConnect)
1236 {
1237 WARN(("vboxCmdVbvaCtlCreate failed"));
1238 return VERR_OUT_OF_RESOURCES;
1239 }
1240 pConnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1241 pConnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1242 pConnect->Connect.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CONNECT;
1243 pConnect->Connect.Hdr.u32CmdClientId = 0;
1244 pConnect->Connect.u32MajorVersion = crVersionMajor;
1245 pConnect->Connect.u32MinorVersion = crVersionMinor;
1246 pConnect->Connect.u64Pid = (uintptr_t)PsGetCurrentProcessId();
1247
1248 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pConnect->Hdr);
1249 if (RT_SUCCESS(rc))
1250 {
1251 rc = pConnect->Hdr.i32Result;
1252 if (RT_SUCCESS(rc))
1253 {
1254 Assert(pConnect->Connect.Hdr.u32CmdClientId);
1255 *pu32ClientID = pConnect->Connect.Hdr.u32CmdClientId;
1256 }
1257 else
1258 WARN(("VBOXCMDVBVA3DCTL_TYPE_CONNECT Disable failed %d", rc));
1259 }
1260 else
1261 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1262
1263 vboxCmdVbvaCtlFree(pHGSMICtx, &pConnect->Hdr);
1264
1265 return rc;
1266}
1267
1268int vboxCmdVbvaConDisconnect(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t u32ClientID)
1269{
1270 VBOXCMDVBVA_CTL_3DCTL *pDisconnect = (VBOXCMDVBVA_CTL_3DCTL*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL));
1271 if (!pDisconnect)
1272 {
1273 WARN(("vboxCmdVbvaCtlCreate failed"));
1274 return VERR_OUT_OF_RESOURCES;
1275 }
1276 pDisconnect->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1277 pDisconnect->Hdr.i32Result = VERR_NOT_SUPPORTED;
1278 pDisconnect->Ctl.u32Type = VBOXCMDVBVA3DCTL_TYPE_DISCONNECT;
1279 pDisconnect->Ctl.u32CmdClientId = u32ClientID;
1280
1281 int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pDisconnect->Hdr);
1282 if (RT_SUCCESS(rc))
1283 {
1284 rc = pDisconnect->Hdr.i32Result;
1285 if (!RT_SUCCESS(rc))
1286 WARN(("VBOXCMDVBVA3DCTL_TYPE_DISCONNECT Disable failed %d", rc));
1287 }
1288 else
1289 WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
1290
1291 vboxCmdVbvaCtlFree(pHGSMICtx, &pDisconnect->Hdr);
1292
1293 return rc;
1294}
1295
1296int VBoxCmdVbvaConConnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva,
1297 uint32_t crVersionMajor, uint32_t crVersionMinor,
1298 uint32_t *pu32ClientID)
1299{
1300 RT_NOREF(pVbva);
1301 return vboxCmdVbvaConConnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, crVersionMajor, crVersionMinor, pu32ClientID);
1302}
1303
1304int VBoxCmdVbvaConDisconnect(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32ClientID)
1305{
1306 RT_NOREF(pVbva);
1307 return vboxCmdVbvaConDisconnect(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, u32ClientID);
1308}
1309
1310VBOXCMDVBVA_CRCMD_CMD* vboxCmdVbvaConCmdAlloc(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCmd)
1311{
1312 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (VBOXCMDVBVA_CTL_3DCTL_CMD) + cbCmd);
1313 if (!pCmd)
1314 {
1315 WARN(("vboxCmdVbvaCtlCreate failed"));
1316 return NULL;
1317 }
1318 pCmd->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_3DCTL;
1319 pCmd->Hdr.i32Result = VERR_NOT_SUPPORTED;
1320 pCmd->Cmd.Hdr.u32Type = VBOXCMDVBVA3DCTL_TYPE_CMD;
1321 pCmd->Cmd.Hdr.u32CmdClientId = 0;
1322 pCmd->Cmd.Cmd.u8OpCode = VBOXCMDVBVA_OPTYPE_CRCMD;
1323 pCmd->Cmd.Cmd.u8Flags = 0;
1324 pCmd->Cmd.Cmd.u8State = VBOXCMDVBVA_STATE_SUBMITTED;
1325 pCmd->Cmd.Cmd.u.i8Result = -1;
1326 pCmd->Cmd.Cmd.u2.u32FenceID = 0;
1327
1328 return (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1329}
1330
1331void vboxCmdVbvaConCmdFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1332{
1333 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1334 vboxCmdVbvaCtlFree(pHGSMICtx, &pHdr->Hdr);
1335}
1336
1337int vboxCmdVbvaConSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1338{
1339 VBOXCMDVBVA_CTL_3DCTL_CMD *pHdr = ((VBOXCMDVBVA_CTL_3DCTL_CMD*)pCmd)-1;
1340 return vboxCmdVbvaCtlSubmitAsync(pHGSMICtx, &pHdr->Hdr, pfnCompletion, pvCompletion);
1341}
1342
1343VBOXCMDVBVA_CRCMD_CMD* VBoxCmdVbvaConCmdAlloc(PVBOXMP_DEVEXT pDevExt, uint32_t cbCmd)
1344{
1345 return vboxCmdVbvaConCmdAlloc(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
1346}
1347
1348void VBoxCmdVbvaConCmdFree(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD *pCmd)
1349{
1350 vboxCmdVbvaConCmdFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd);
1351}
1352
1353int VBoxCmdVbvaConCmdSubmitAsync(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA_CRCMD_CMD* pCmd, FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void *pvCompletion)
1354{
1355 return vboxCmdVbvaConSubmitAsync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pCmd, pfnCompletion, pvCompletion);
1356}
1357
1358int VBoxCmdVbvaConCmdCompletionData(void *pvCmd, VBOXCMDVBVA_CRCMD_CMD **ppCmd)
1359{
1360 VBOXCMDVBVA_CTL_3DCTL_CMD *pCmd = (VBOXCMDVBVA_CTL_3DCTL_CMD*)pvCmd;
1361 if (ppCmd)
1362 *ppCmd = (VBOXCMDVBVA_CRCMD_CMD*)(pCmd+1);
1363 return pCmd->Hdr.i32Result;
1364}
1365
1366int VBoxCmdVbvaConCmdResize(PVBOXMP_DEVEXT pDevExt, const VBOXWDDM_ALLOC_DATA *pAllocData, const uint32_t *pTargetMap, const POINT * pVScreenPos, uint16_t fFlags)
1367{
1368 Assert(KeGetCurrentIrql() < DISPATCH_LEVEL);
1369
1370 VBOXCMDVBVA_CTL_RESIZE *pResize = (VBOXCMDVBVA_CTL_RESIZE*)vboxCmdVbvaCtlCreate(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, sizeof (VBOXCMDVBVA_CTL_RESIZE));
1371 if (!pResize)
1372 {
1373 WARN(("vboxCmdVbvaCtlCreate failed"));
1374 return VERR_OUT_OF_RESOURCES;
1375 }
1376
1377 pResize->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_RESIZE;
1378 pResize->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
1379
1380 int rc = vboxWddmScreenInfoInit(&pResize->Resize.aEntries[0].Screen, pAllocData, pVScreenPos, fFlags);
1381 if (RT_SUCCESS(rc))
1382 {
1383 VBOXCMDVBVA_RESIZE_ENTRY* pEntry = &pResize->Resize.aEntries[0];
1384 memcpy(pEntry->aTargetMap, pTargetMap, sizeof (pEntry->aTargetMap));
1385 LOG(("[%d] %dx%d, TargetMap0 0x%x, flags 0x%x",
1386 pEntry->Screen.u32ViewIndex, pEntry->Screen.u32Width, pEntry->Screen.u32Height, pEntry->aTargetMap[0], pEntry->Screen.u16Flags));
1387
1388 rc = vboxCmdVbvaCtlSubmitSync(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &pResize->Hdr);
1389 if (RT_SUCCESS(rc))
1390 {
1391 rc = pResize->Hdr.i32Result;
1392 if (RT_FAILURE(rc))
1393 WARN(("VBOXCMDVBVACTL_TYPE_RESIZE failed %d", rc));
1394 }
1395 else
1396 WARN(("vboxCmdVbvaCtlSubmitSync failed %d", rc));
1397 }
1398 else
1399 WARN(("vboxWddmScreenInfoInit failed %d", rc));
1400
1401 vboxCmdVbvaCtlFree(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &pResize->Hdr);
1402
1403 return rc;
1404}
1405#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette