VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 80201

Last change on this file since 80201 was 80201, checked in by vboxsync, 6 years ago

Fix doxygen burn and fixed MMIO related macros, and notification area configuration

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 47.6 KB
Line 
1/* $Id: Virtio_1_0.cpp 80201 2019-08-08 17:36:15Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common Functions (VirtQ, VQueue, Virtio PCI)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <iprt/param.h>
26#include <iprt/assert.h>
27#include <iprt/uuid.h>
28#include <iprt/mem.h>
29#include <VBox/vmm/pdmdev.h>
30#include "Virtio_1_0_impl.h"
31#include "Virtio_1_0.h"
32
33#define INSTANCE(pState) pState->szInstance
34#define IFACE_TO_STATE(pIface, ifaceName) ((VIRTIOSTATE *)((char*)(pIface) - RT_UOFFSETOF(VIRTIOSTATE, ifaceName)))
35
36#define H2P(hVirtio) ((PVIRTIOSTATE)(hVirtio))
37
38#ifdef LOG_ENABLED
39# define QUEUENAME(s, q) (q->pcszName)
40#endif
41
42/**
43 * Formats the logging of a memory-mapped I/O input or output value
44 *
45 * @param pszFunc - To avoid displaying this function's name via __FUNCTION__ or LogFunc()
46 * @param pszMember - Name of struct member
47 * @param pv - pointer to value
48 * @param cb - size of value
49 * @param uOffset - offset into member where value starts
50 * @param fWrite - True if write I/O
51 * @param fHasIndex - True if the member is indexed
52 * @param idx - The index if fHasIndex
53 */
54void virtioLogMappedIoValue(const char *pszFunc, const char *pszMember, const void *pv, uint32_t cb,
55 uint32_t uOffset, bool fWrite, bool fHasIndex, uint32_t idx)
56{
57
58#define FMTHEX(fmtout, val, cNybs) \
59 fmtout[cNybs] = '\0'; \
60 for (uint8_t i = 0; i < cNybs; i++) \
61 fmtout[(cNybs - i) -1] = "0123456789abcdef"[(val >> (i * 4)) & 0xf];
62
63#define MAX_STRING 64
64 char pszIdx[MAX_STRING] = { 0 };
65 char pszDepiction[MAX_STRING] = { 0 };
66 char pszFormattedVal[MAX_STRING] = { 0 };
67 if (fHasIndex)
68 RTStrPrintf(pszIdx, sizeof(pszIdx), "[%d]", idx);
69 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
70 {
71 /* manually padding with 0's instead of \b due to different impl of %x precision than printf() */
72 uint64_t val = 0;
73 memcpy((char *)&val, pv, cb);
74 FMTHEX(pszFormattedVal, val, cb * 2);
75 if (uOffset != 0) /* display bounds if partial member access */
76 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s[%d:%d]",
77 pszMember, pszIdx, uOffset, uOffset + cb - 1);
78 else
79 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s", pszMember, pszIdx);
80 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%-30s", pszDepiction);
81 uint32_t first = 0;
82 for (uint8_t i = 0; i < sizeof(pszDepiction); i++)
83 if (pszDepiction[i] == ' ' && first++ != 0)
84 pszDepiction[i] = '.';
85 Log(("%s: Guest %s %s 0x%s\n", \
86 pszFunc, fWrite ? "wrote" : "read ", pszDepiction, pszFormattedVal));
87 }
88 else /* odd number or oversized access, ... log inline hex-dump style */
89 {
90 Log(("%s: Guest %s %s%s[%d:%d]: %.*Rhxs\n", \
91 pszFunc, fWrite ? "wrote" : "read ", pszMember,
92 pszIdx, uOffset, uOffset + cb, cb, pv));
93 }
94}
95
96
97void virtQueueReadDesc(PVIRTIOSTATE pState, PVIRTQ pVirtQ, uint32_t idx, PVIRTQ_DESC_T pDesc)
98{
99 //Log(("%s virtQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
100 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
101 pVirtQ->pGcPhysVirtqDescriptors + sizeof(VIRTQ_DESC_T) * (idx % pVirtQ->cbQueue),
102 pDesc, sizeof(VIRTQ_DESC_T));
103}
104
105uint16_t virtQueueReadAvail(PVIRTIOSTATE pState, PVIRTQ pVirtQ, uint32_t idx)
106{
107 uint16_t tmp;
108 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
109 pVirtQ->pGcPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[idx % pVirtQ->cbQueue]),
110 &tmp, sizeof(tmp));
111 return tmp;
112}
113
114uint16_t virtQueueReadAvailFlags(PVIRTIOSTATE pState, PVIRTQ pVirtQ)
115{
116 uint16_t tmp;
117 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
118 pVirtQ->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
119 &tmp, sizeof(tmp));
120 return tmp;
121}
122
123uint16_t virtQueueReadUsedIndex(PVIRTIOSTATE pState, PVIRTQ pVirtQ)
124{
125 uint16_t tmp;
126 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
127 pVirtQ->pGcPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
128 &tmp, sizeof(tmp));
129 return tmp;
130}
131
132void virtQueueWriteUsedIndex(PVIRTIOSTATE pState, PVIRTQ pVirtQ, uint16_t u16Value)
133{
134 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
135 pVirtQ->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
136 &u16Value, sizeof(u16Value));
137}
138
139void virtQueueWriteUsedElem(PVIRTIOSTATE pState, PVIRTQ pVirtQ, uint32_t idx, uint32_t id, uint32_t uLen)
140{
141
142 RT_NOREF5(pState, pVirtQ, idx, id, uLen);
143 /* PK TODO: Adapt to VirtIO 1.0
144 VIRTQ_USED_ELEM_T elem;
145
146 elem.id = id;
147 elem.uLen = uLen;
148 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
149 pVirtQ->pGcPhysVirtqUsed + RT_UOFFSETOF_DYN(VIRTQ_USED_T, ring[idx % pVirtQ->cbQueue]),
150 &elem, sizeof(elem));
151 */
152}
153
154void virtQueueSetNotification(PVIRTIOSTATE pState, PVIRTQ pVirtQ, bool fEnabled)
155{
156 RT_NOREF3(pState, pVirtQ, fEnabled);
157
158/* PK TODO: Adapt to VirtIO 1.0
159 uint16_t tmp;
160
161 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
162 pVirtQ->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_USED_T, uFlags),
163 &tmp, sizeof(tmp));
164
165 if (fEnabled)
166 tmp &= ~ VIRTQ_USED_T_F_NO_NOTIFY;
167 else
168 tmp |= VIRTQ_USED_T_F_NO_NOTIFY;
169
170 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
171 pVirtQ->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_USED_T, uFlags),
172 &tmp, sizeof(tmp));
173*/
174}
175
176bool virtQueueSkip(PVIRTIOSTATE pState, PVQUEUE pQueue)
177{
178
179 RT_NOREF2(pState, pQueue);
180/* PK TODO Adapt to VirtIO 1.0
181 if (virtQueueIsEmpty(pState, pQueue))
182 return false;
183
184 Log2(("%s virtQueueSkip: %s avail_idx=%u\n", INSTANCE(pState),
185 QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
186 pQueue->uNextAvailIndex++;
187*/
188 return true;
189}
190
191bool virtQueueGet(PVIRTIOSTATE pState, PVQUEUE pQueue, PVQUEUEELEM pElem, bool fRemove)
192{
193
194 RT_NOREF4(pState, pQueue, pElem, fRemove);
195
196/* PK TODO: Adapt to VirtIO 1.0
197 if (virtQueueIsEmpty(pState, pQueue))
198 return false;
199
200 pElem->nIn = pElem->nOut = 0;
201
202 Log2(("%s virtQueueGet: %s avail_idx=%u\n", INSTANCE(pState),
203 QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
204
205 VIRTQ_DESC_T desc;
206 uint16_t idx = virtQueueReadAvail(pState, &pQueue->VirtQ, pQueue->uNextAvailIndex);
207 if (fRemove)
208 pQueue->uNextAvailIndex++;
209 pElem->idx = idx;
210 do
211 {
212 VQUEUESEG *pSeg;
213
214 //
215 // Malicious guests may try to trick us into writing beyond aSegsIn or
216 // aSegsOut boundaries by linking several descriptors into a loop. We
217 // cannot possibly get a sequence of linked descriptors exceeding the
218 // total number of descriptors in the ring (see @bugref{8620}).
219 ///
220 if (pElem->nIn + pElem->nOut >= VIRTQ_MAX_SIZE)
221 {
222 static volatile uint32_t s_cMessages = 0;
223 static volatile uint32_t s_cThreshold = 1;
224 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
225 {
226 LogRel(("%s: too many linked descriptors; check if the guest arranges descriptors in a loop.\n",
227 INSTANCE(pState)));
228 if (ASMAtomicReadU32(&s_cMessages) != 1)
229 LogRel(("%s: (the above error has occured %u times so far)\n",
230 INSTANCE(pState), ASMAtomicReadU32(&s_cMessages)));
231 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
232 }
233 break;
234 }
235 RT_UNTRUSTED_VALIDATED_FENCE();
236
237 virtQueueReadDesc(pState, &pQueue->VirtQ, idx, &desc);
238 if (desc.u16Flags & VIRTQ_DESC_T_F_WRITE)
239 {
240 Log2(("%s virtQueueGet: %s IN seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
241 QUEUENAME(pState, pQueue), pElem->nIn, idx, desc.addr, desc.uLen));
242 pSeg = &pElem->aSegsIn[pElem->nIn++];
243 }
244 else
245 {
246 Log2(("%s virtQueueGet: %s OUT seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
247 QUEUENAME(pState, pQueue), pElem->nOut, idx, desc.addr, desc.uLen));
248 pSeg = &pElem->aSegsOut[pElem->nOut++];
249 }
250
251 pSeg->addr = desc.addr;
252 pSeg->cb = desc.uLen;
253 pSeg->pv = NULL;
254
255 idx = desc.next;
256 } while (desc.u16Flags & VIRTQ_DESC_T_F_NEXT);
257
258 Log2(("%s virtQueueGet: %s head_desc_idx=%u nIn=%u nOut=%u\n", INSTANCE(pState),
259 QUEUENAME(pState, pQueue), pElem->idx, pElem->nIn, pElem->nOut));
260*/
261 return true;
262}
263
264
265
266void virtQueuePut(PVIRTIOSTATE pState, PVQUEUE pQueue,
267 PVQUEUEELEM pElem, uint32_t uTotalLen, uint32_t uReserved)
268{
269
270 RT_NOREF5(pState, pQueue, pElem, uTotalLen, uReserved);
271
272/* PK TODO Re-work this for VirtIO 1.0
273 Log2(("%s virtQueuePut: %s"
274 " desc_idx=%u acb=%u (%u)\n",
275 INSTANCE(pState), QUEUENAME(pState, pQueue),
276 pElem->idx, uTotalLen, uReserved));
277
278 Assert(uReserved < uTotalLen);
279
280 uint32_t cbLen = uTotalLen - uReserved;
281 uint32_t cbSkip = uReserved;
282
283 for (unsigned i = 0; i < pElem->nIn && cbLen > 0; ++i)
284 {
285 if (cbSkip >= pElem->aSegsIn[i].cb) // segment completely skipped?
286 {
287 cbSkip -= pElem->aSegsIn[i].cb;
288 continue;
289 }
290
291 uint32_t cbSegLen = pElem->aSegsIn[i].cb - cbSkip;
292 if (cbSegLen > cbLen) // last segment only partially used?
293 cbSegLen = cbLen;
294
295 //
296 // XXX: We should assert pv != NULL, but we need to check and
297 // fix all callers first.
298 //
299 if (pElem->aSegsIn[i].pv != NULL)
300 {
301 Log2(("%s virtQueuePut: %s"
302 " used_idx=%u seg=%u addr=%p pv=%p cb=%u acb=%u\n",
303 INSTANCE(pState), QUEUENAME(pState, pQueue),
304 pQueue->uNextUsedIndex, i,
305 (void *)pElem->aSegsIn[i].addr, pElem->aSegsIn[i].pv,
306 pElem->aSegsIn[i].cb, cbSegLen));
307
308 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
309 pElem->aSegsIn[i].addr + cbSkip,
310 pElem->aSegsIn[i].pv,
311 cbSegLen);
312 }
313
314 cbSkip = 0;
315 cbLen -= cbSegLen;
316 }
317
318 Log2(("%s virtQueuePut: %s"
319 " used_idx=%u guest_used_idx=%u id=%u len=%u\n",
320 INSTANCE(pState), QUEUENAME(pState, pQueue),
321 pQueue->uNextUsedIndex, virtQueueReadUsedIndex(pState, &pQueue->VirtQ),
322 pElem->idx, uTotalLen));
323
324 virtQueueWriteUsedElem(pState, &pQueue->VirtQ,
325 pQueue->uNextUsedIndex++,
326 pElem->idx, uTotalLen);
327*/
328
329}
330
331
332void virtQueueNotify(PVIRTIOSTATE pState, PVQUEUE pQueue)
333{
334
335 RT_NOREF2(pState, pQueue);
336/* PK TODO Adapt to VirtIO 1.0
337 LogFlow(("%s virtQueueNotify: %s availFlags=%x guestFeatures=%x virtQueue is %sempty\n",
338 INSTANCE(pState), QUEUENAME(pState, pQueue),
339 virtQueueReadAvailFlags(pState, &pQueue->VirtQ),
340 pState->uGuestFeatures, virtQueueIsEmpty(pState, pQueue)?"":"not "));
341 if (!(virtQueueReadAvailFlags(pState, &pQueue->VirtQ) & VIRTQ_AVAIL_T_F_NO_INTERRUPT)
342 || ((pState->uGuestFeatures & VIRTIO_F_NOTIFY_ON_EMPTY) && virtQueueIsEmpty(pState, pQueue)))
343 {
344 int rc = virtioRaiseInterrupt(pState, VERR_INTERNAL_ERROR, VIRTIO_ISR_QUEUE);
345 if (RT_FAILURE(rc))
346 Log(("%s virtQueueNotify: Failed to raise an interrupt (%Rrc).\n", INSTANCE(pState), rc));
347 }
348*/
349}
350
351void virtQueueSync(PVIRTIOSTATE pState, PVQUEUE pQueue)
352{
353 RT_NOREF(pState, pQueue);
354/* PK TODO Adapt to VirtIO 1.0
355 Log2(("%s virtQueueSync: %s old_used_idx=%u new_used_idx=%u\n", INSTANCE(pState),
356 QUEUENAME(pState, pQueue), virtQueueReadUsedIndex(pState, &pQueue->VirtQ), pQueue->uNextUsedIndex));
357 virtQueueWriteUsedIndex(pState, &pQueue->VirtQ, pQueue->uNextUsedIndex);
358 virtQueueNotify(pState, pQueue);
359*/
360}
361
362
363
364/**
365 * Raise interrupt.
366 *
367 * @param pState The device state structure.
368 * @param rcBusy Status code to return when the critical section is busy.
369 * @param u8IntCause Interrupt cause bit mask to set in PCI ISR port.
370 */
371__attribute__((unused))
372int virtioRaiseInterrupt(VIRTIOSTATE *pState, int rcBusy, uint8_t u8IntCause)
373{
374 RT_NOREF2(pState, u8IntCause);
375 RT_NOREF_PV(rcBusy);
376 LogFlow(("%s virtioRaiseInterrupt: u8IntCause=%x\n",
377 INSTANCE(pState), u8IntCause));
378
379 pState->uISR |= u8IntCause;
380 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
381 return VINF_SUCCESS;
382}
383
384/**
385 * Lower interrupt.
386 *
387 * @param pState The device state structure.
388 */
389__attribute__((unused))
390static void virtioLowerInterrupt(VIRTIOSTATE *pState)
391{
392 LogFlow(("%s virtioLowerInterrupt\n", INSTANCE(pState)));
393 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
394}
395
396
397#ifdef IN_RING3
398/**
399 * Saves the state of device.
400 *
401 * @returns VBox status code.
402 * @param pDevIns The device instance.
403 * @param pSSM The handle to the saved state.
404 */
405int virtioSaveExec(PVIRTIOSTATE pVirtio, PSSMHANDLE pSSM)
406{
407 int rc = VINF_SUCCESS;
408 virtioDumpState(pVirtio, "virtioSaveExec");
409 RT_NOREF(pSSM);
410 /*
411 * PK TODO save guest features, queue selector, sttus ISR,
412 * and per queue info (size, address, indicies)...
413 * using calls like SSMR3PutU8(), SSMR3PutU16(), SSMR3PutU16()...
414 * and AssertRCReturn(rc, rc)
415 */
416
417 return rc;
418}
419
420/**
421 * Loads a saved device state.
422 *
423 * @returns VBox status code.
424 * @param pDevIns The device instance.
425 * @param pSSM The handle to the saved state.
426 * @param uVersion The data unit version number.
427 * @param uPass The data pass.
428 */
429int virtioLoadExec(PVIRTIOSTATE pVirtio, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass, uint32_t uNumQueues)
430{
431 RT_NOREF5(pVirtio, pSSM, uVersion, uPass, uNumQueues);
432 int rc = VINF_SUCCESS;
433 virtioDumpState(pVirtio, "virtioLoadExec");
434
435 /*
436 * PK TODO, restore everything saved in virtioSaveExect, using
437 * using calls like SSMR3PutU8(), SSMR3PutU16(), SSMR3PutU16()...
438 * and AssertRCReturn(rc, rc)
439 */
440 if (uPass == SSM_PASS_FINAL)
441 {
442 }
443 return rc;
444}
445
446/**
447 * Device relocation callback.
448 *
449 * When this callback is called the device instance data, and if the
450 * device have a GC component, is being relocated, or/and the selectors
451 * have been changed. The device must use the chance to perform the
452 * necessary pointer relocations and data updates.
453 *
454 * Before the GC code is executed the first time, this function will be
455 * called with a 0 delta so GC pointer calculations can be one in one place.
456 *
457 * @param pDevIns Pointer to the device instance.
458 * @param offDelta The relocation delta relative to the old location.
459 *
460 * @remark A relocation CANNOT fail.
461 */
462void virtioRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
463{
464 RT_NOREF(offDelta);
465 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
466
467 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
468 // TBD
469}
470
471PVQUEUE virtioAddQueue(VIRTIOSTATE* pState, unsigned cbQueue, const char *pcszName)
472{
473
474 RT_NOREF3(pState, cbQueue, pcszName);
475/* PK TODO Adapt to VirtIO 1.0
476
477 PVQUEUE pQueue = NULL;
478 // Find an empty queue slot
479 for (unsigned i = 0; i < pState->uNumQueues; i++)
480 {
481 if (pState->Queues[i].VirtQ.cbQueue == 0)
482 {
483 pQueue = &pState->Queues[i];
484 break;
485 }
486 }
487
488 if (!pQueue)
489 {
490 Log(("%s Too many queues being added, no empty slots available!\n", INSTANCE(pState)));
491 }
492 else
493 {
494 pQueue->VirtQ.cbQueue = cbQueue;
495 pQueue->VirtQ.addrDescriptors = 0;
496 pQueue->uPageNumber = 0;
497 pQueue->pfnCallback = pfnCallback;
498 pQueue->pcszName = pcszName;
499 }
500 return pQueue;
501*/
502 return NULL;// Temporary
503}
504
505
506
507__attribute__((unused))
508static void virtQueueInit(PVQUEUE pQueue, uint32_t uPageNumber)
509{
510 RT_NOREF2(pQueue, uPageNumber);
511
512/* PK TODO, re-work this for VirtIO 1.0
513 pQueue->VirtQ.addrDescriptors = (uint64_t)uPageNumber << PAGE_SHIFT;
514
515 pQueue->VirtQ.addrAvail = pQueue->VirtQ.addrDescriptors
516 + sizeof(VIRTQ_DESC_T) * pQueue->VirtQ.cbQueue;
517
518 pQueue->VirtQ.addrUsed = RT_ALIGN(pQueue->VirtQ.addrAvail
519 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, ring[pQueue->VirtQ.cbQueue])
520 + sizeof(uint16_t), // virtio 1.0 adds a 16-bit field following ring data
521 PAGE_SIZE); // The used ring must start from the next page.
522
523 pQueue->uNextAvailIndex = 0;
524 pQueue->uNextUsedIndex = 0;
525*/
526
527}
528
529
530__attribute__((unused))
531static void virtQueueReset(PVQUEUE pQueue)
532{
533 RT_NOREF(pQueue);
534/* PK TODO Adapt to VirtIO 1.0
535 pQueue->VirtQ.addrDescriptors = 0;
536 pQueue->VirtQ.addrAvail = 0;
537 pQueue->VirtQ.addrUsed = 0;
538 pQueue->uNextAvailIndex = 0;
539 pQueue->uNextUsedIndex = 0;
540 pQueue->uPageNumber = 0;
541*/
542}
543
544/**
545 * Notify driver of a configuration or queue event
546 *
547 * @param pVirtio - Pointer to instance state
548 * @param fConfigChange - True if cfg change notification else, queue notification
549 */
550static void virtioNotifyDriver(VIRTIOHANDLE hVirtio, bool fConfigChange)
551{
552 RT_NOREF(hVirtio);
553 LogFunc(("fConfigChange = %d\n", fConfigChange));
554}
555
556
557int virtioReset(VIRTIOHANDLE hVirtio) /* Part of our "custom API" */
558{
559 PVIRTIOSTATE pVirtio = H2P(hVirtio);
560
561 RT_NOREF(pVirtio);
562/* PK TODO Adapt to VirtIO 1.09
563 pState->uGuestFeatures = 0;
564 pState->uQueueSelector = 0;
565 pState->uStatus = 0;
566 pState->uISR = 0;
567
568 for (unsigned i = 0; i < pState->uNumQueues; i++)
569 virtQueueReset(&pState->Queues[i]);
570 virtioNotify(pVirtio);
571*/
572 return VINF_SUCCESS;
573}
574
575__attribute__((unused))
576static void virtioSetNeedsReset(PVIRTIOSTATE pVirtio)
577{
578 pVirtio->uDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
579 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
580 {
581 pVirtio->fGenUpdatePending = true;
582 virtioNotifyDriver(pVirtio, true);
583 }
584}
585
586static void virtioResetDevice(PVIRTIOSTATE pVirtio)
587{
588
589 LogFunc(("\n"));
590 pVirtio->uDeviceStatus = 0;
591 pVirtio->uDeviceFeaturesSelect = 0;
592 pVirtio->uDriverFeaturesSelect = 0;
593 pVirtio->uConfigGeneration = 0;
594 pVirtio->uNumQueues = VIRTIO_MAX_QUEUES;
595
596 for (uint32_t i = 0; i < pVirtio->uNumQueues; i++)
597 {
598 pVirtio->uQueueSize[i] = VIRTQ_MAX_SIZE;
599 pVirtio->uQueueNotifyOff[i] = i;
600// virtqNotify();
601 }
602}
603
604/**
605 * Handle accesses to Common Configuration capability
606 *
607 * @returns VBox status code
608 *
609 * @param pVirtio Virtio instance state
610 * @param fWrite If write access (otherwise read access)
611 * @param pv Pointer to location to write to or read from
612 * @param cb Number of bytes to read or write
613 */
614int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv)
615{
616 int rc = VINF_SUCCESS;
617 uint64_t val;
618 if (COMMON_CFG(uDeviceFeatures))
619 {
620 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
621 Log(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
622 else /* Guest READ pCommonCfg->uDeviceFeatures */
623 {
624 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures);
625 switch(pVirtio->uDeviceFeaturesSelect)
626 {
627 case 0:
628 val = pVirtio->uDeviceFeatures & 0xffffffff;
629 memcpy((void *)pv, (const void *)&val, cb);
630 LOG_ACCESSOR(uDeviceFeatures);
631 break;
632 case 1:
633 val = (pVirtio->uDeviceFeatures >> 32) & 0xffffffff;
634 uIntraOff += 4;
635 memcpy((void *)pv, (const void *)&val, cb);
636 LOG_ACCESSOR(uDeviceFeatures);
637 break;
638 default:
639 LogFunc(("Guest read uDeviceFeatures with out of range selector (%d), returning 0\n",
640 pVirtio->uDeviceFeaturesSelect));
641 return VERR_ACCESS_DENIED;
642 }
643 }
644 }
645 else if (COMMON_CFG(uDriverFeatures))
646 {
647 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
648 {
649 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
650 switch(pVirtio->uDriverFeaturesSelect)
651 {
652 case 0:
653 memcpy(&pVirtio->uDriverFeatures, pv, cb);
654 LOG_ACCESSOR(uDriverFeatures);
655 break;
656 case 1:
657 memcpy(((char *)&pVirtio->uDriverFeatures) + sizeof(uint32_t), pv, cb);
658 uIntraOff += 4;
659 LOG_ACCESSOR(uDriverFeatures);
660 break;
661 default:
662 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%d), returning 0\n",
663 pVirtio->uDriverFeaturesSelect));
664 return VERR_ACCESS_DENIED;
665 }
666 }
667 else /* Guest READ pCommonCfg->udriverFeatures */
668 {
669 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
670 switch(pVirtio->uDriverFeaturesSelect)
671 {
672 case 0:
673 val = pVirtio->uDriverFeatures & 0xffffffff;
674 memcpy((void *)pv, (const void *)&val, cb);
675 LOG_ACCESSOR(uDriverFeatures);
676 break;
677 case 1:
678 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
679 uIntraOff += 4;
680 memcpy((void *)pv, (const void *)&val, cb);
681 LOG_ACCESSOR(uDriverFeatures);
682 break;
683 default:
684 LogFunc(("Guest read uDriverFeatures with out of range selector (%d), returning 0\n",
685 pVirtio->uDriverFeaturesSelect));
686 return VERR_ACCESS_DENIED;
687 }
688 }
689 }
690 else if (COMMON_CFG(uNumQueues))
691 {
692 if (fWrite)
693 {
694 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
695 return VERR_ACCESS_DENIED;
696 }
697 else
698 {
699 uint32_t uIntraOff = 0;
700 *(uint16_t *)pv = VIRTIO_MAX_QUEUES;
701 LOG_ACCESSOR(uNumQueues);
702 }
703 }
704 else if (COMMON_CFG(uDeviceStatus))
705 {
706 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
707 {
708 pVirtio->uDeviceStatus = *(uint8_t *)pv;
709 LogFunc(("Guest wrote uDeviceStatus ................ ("));
710 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
711 Log((")\n"));
712 if (pVirtio->uDeviceStatus == 0)
713 virtioResetDevice(pVirtio);
714 }
715 else /* Guest READ pCommonCfg->uDeviceStatus */
716 {
717 LogFunc(("Guest read uDeviceStatus ................ ("));
718 *(uint32_t *)pv = pVirtio->uDeviceStatus;
719 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
720 Log((")\n"));
721 }
722 }
723 else if (COMMON_CFG(uMsixConfig))
724 {
725 ACCESSOR(uMsixConfig);
726 }
727 else if (COMMON_CFG(uDeviceFeaturesSelect))
728 {
729 ACCESSOR(uDeviceFeaturesSelect);
730 }
731 else if (COMMON_CFG(uDriverFeaturesSelect))
732 {
733 ACCESSOR(uDriverFeaturesSelect);
734 }
735 else if (COMMON_CFG(uConfigGeneration))
736 {
737 ACCESSOR_READONLY(uConfigGeneration);
738 }
739 else if (COMMON_CFG(uQueueSelect))
740 {
741 ACCESSOR(uQueueSelect);
742 }
743 else if (COMMON_CFG(uQueueSize))
744 {
745 ACCESSOR_WITH_IDX(uQueueSize, pVirtio->uQueueSelect);
746 }
747 else if (COMMON_CFG(uQueueMsixVector))
748 {
749 ACCESSOR_WITH_IDX(uQueueMsixVector, pVirtio->uQueueSelect);
750 }
751 else if (COMMON_CFG(uQueueEnable))
752 {
753 ACCESSOR_WITH_IDX(uQueueEnable, pVirtio->uQueueSelect);
754 }
755 else if (COMMON_CFG(uQueueNotifyOff))
756 {
757 ACCESSOR_READONLY_WITH_IDX(uQueueNotifyOff, pVirtio->uQueueSelect);
758 }
759 else if (COMMON_CFG(uQueueDesc))
760 {
761 ACCESSOR_WITH_IDX(uQueueDesc, pVirtio->uQueueSelect);
762 }
763 else if (COMMON_CFG(uQueueAvail))
764 {
765 ACCESSOR_WITH_IDX(uQueueAvail, pVirtio->uQueueSelect);
766 }
767 else if (COMMON_CFG(uQueueUsed))
768 {
769 ACCESSOR_WITH_IDX(uQueueUsed, pVirtio->uQueueSelect);
770 }
771 else
772 {
773 LogFunc(("Bad guest %s access to virtio_pci_common_cfg: uOffset=%d, cb=%d\n",
774 fWrite ? "write" : "read ", uOffset, cb));
775 rc = VERR_ACCESS_DENIED;
776 }
777 return rc;
778}
779
780/**
781 * Memory mapped I/O Handler for PCI Capabilities read operations.
782 *
783 * @returns VBox status code.
784 *
785 * @param pDevIns The device instance.
786 * @param pvUser User argument.
787 * @param GCPhysAddr Physical address (in GC) where the read starts.
788 * @param pv Where to store the result.
789 * @param cb Number of bytes read.
790 */
791PDMBOTHCBDECL(int) virtioR3MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
792{
793 RT_NOREF(pvUser);
794 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
795 int rc = VINF_SUCCESS;
796
797//#ifdef LOG_ENABLED
798// LogFunc(("pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n", pVirtio, GCPhysAddr, pv, cb, pv, cb));
799//#endif
800
801 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
802 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
803 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsrCap);
804
805 if (fDevSpecific)
806 {
807 uint32_t uDevSpecificDataOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
808 /**
809 * Callback to client to manage device-specific configuration and changes to it.
810 */
811 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapRead(pDevIns, uDevSpecificDataOffset, pv, cb);
812 /**
813 * Anytime any part of the device-specific configuration (which our client maintains) is read
814 * it needs to be checked to see if it changed since the last time any part was read, in
815 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
816 */
817 uint32_t fDevSpecificFieldChanged = false;
818
819 if (memcmp((char *)pv + uDevSpecificDataOffset, (char *)pVirtio->pPrevDevSpecificCap + uDevSpecificDataOffset, cb))
820 fDevSpecificFieldChanged = true;
821
822 memcpy(pVirtio->pPrevDevSpecificCap, pv, pVirtio->cbDevSpecificCap);
823 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
824 {
825 if (fDevSpecificFieldChanged)
826 LogFunc(("Dev specific config field changed since last read, gen++ = %d\n",
827 pVirtio->uConfigGeneration));
828 else
829 LogFunc(("Config generation pending flag set, gen++ = %d\n",
830 pVirtio->uConfigGeneration));
831 ++pVirtio->uConfigGeneration;
832 pVirtio->fGenUpdatePending = false;
833 }
834 }
835 else
836 if (fCommonCfg)
837 {
838 uint32_t uCommonCfgDataOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
839 virtioCommonCfgAccessed(pVirtio, 0 /* fWrite */, uCommonCfgDataOffset, cb, pv);
840 }
841 else
842 if (fIsrCap)
843 {
844 *(uint8_t *)pv = pVirtio->fQueueInterrupt | pVirtio->fDeviceConfigInterrupt << 1;
845 LogFunc(("Read 0x%s from pIsrCap\n", *(uint8_t *)pv));
846 }
847 else {
848
849 AssertMsgFailed(("virtio: Read outside of capabilities region: GCPhysAddr=%RGp cb=%RGp\n", GCPhysAddr, cb));
850 }
851 return rc;
852}
853
854/**
855 * Memory mapped I/O Handler for PCI Capabilities write operations.
856 *
857 * @returns VBox status code.
858 *
859 * @param pDevIns The device instance.
860 * @param pvUser User argument.
861 * @param GCPhysAddr Physical address (in GC) where the write starts.
862 * @param pv Where to fetch the result.
863 * @param cb Number of bytes to write.
864 */
865PDMBOTHCBDECL(int) virtioR3MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
866{
867 RT_NOREF(pvUser);
868 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
869 int rc = VINF_SUCCESS;
870
871//#ifdef LOG_ENABLED
872// LogFunc(("pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n", pVirtio, GCPhysAddr, pv, cb, pv, cb));
873//#endif
874
875 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
876 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
877 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsrCap);
878
879 if (fDevSpecific)
880 {
881 uint32_t uDevSpecificDataOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
882 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapWrite(pDevIns, uDevSpecificDataOffset, pv, cb);
883 }
884 else
885 if (fCommonCfg)
886 {
887 uint32_t uCommonCfgDataOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
888 virtioCommonCfgAccessed(pVirtio, 1 /* fWrite */, uCommonCfgDataOffset, cb, pv);
889 }
890 else
891 if (fIsrCap)
892 {
893 pVirtio->fQueueInterrupt = (*(uint8_t *)pv) & 1;
894 pVirtio->fDeviceConfigInterrupt = !!(*((uint8_t *)pv) & 2);
895 Log(("pIsrCap... setting fQueueInterrupt=%d fDeviceConfigInterrupt=%d\n",
896 pVirtio->fQueueInterrupt, pVirtio->fDeviceConfigInterrupt));
897 }
898 else
899 {
900 LogFunc(("virtio: Write outside of capabilities region:\nGCPhysAddr=%RGp cb=%RGp,", GCPhysAddr, cb));
901 }
902 return rc;
903}
904
905
906/**
907 * @callback_method_impl{FNPCIIOREGIONMAP}
908 */
909static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
910 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
911{
912 RT_NOREF3(pPciDev, iRegion, enmType);
913 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
914 int rc = VINF_SUCCESS;
915
916 Assert(cb >= 32);
917
918 if (iRegion == pVirtio->uVirtioCapBar)
919 {
920 /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
921 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
922 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
923 virtioR3MmioWrite, virtioR3MmioRead,
924 "virtio-scsi MMIO");
925
926 if (RT_FAILURE(rc))
927 {
928 LogFunc(("virtio: PCI Capabilities failed to map GCPhysAddr=%RGp cb=%RGp, region=%d\n",
929 GCPhysAddress, cb, iRegion));
930 return rc;
931 }
932 LogFunc(("virtio: PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp, region=%d\n",
933 GCPhysAddress, cb, iRegion));
934 pVirtio->GCPhysPciCapBase = GCPhysAddress;
935 pVirtio->pGcPhysCommonCfg = GCPhysAddress + pVirtio->pCommonCfgCap->uOffset;
936 pVirtio->pGcPhysNotifyCap = GCPhysAddress + pVirtio->pNotifyCap->pciCap.uOffset;
937 pVirtio->pGcPhysIsrCap = GCPhysAddress + pVirtio->pIsrCap->uOffset;
938 if (pVirtio->pDevSpecificCap)
939 pVirtio->pGcPhysDeviceCap = GCPhysAddress + pVirtio->pDeviceCap->uOffset;
940 }
941 return rc;
942}
943
944/**
945 * Callback function for reading from the PCI configuration space.
946 *
947 * @returns The register value.
948 * @param pDevIns Pointer to the device instance the PCI device
949 * belongs to.
950 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
951 * @param uAddress The configuration space register address. [0..4096]
952 * @param cb The register size. [1,2,4]
953 *
954 * @remarks Called with the PDM lock held. The device lock is NOT take because
955 * that is very likely be a lock order violation.
956 */
957static DECLCALLBACK(uint32_t) virtioPciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
958 uint32_t uAddress, unsigned cb)
959{
960// PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
961 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
962
963 if (uAddress == (uint64_t)&pVirtio->pPciCfgCap->uPciCfgData)
964 {
965 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
966 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
967 * (the virtio_pci_cfg_cap capability), and access data items. */
968 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
969 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
970 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
971 uint32_t pv = 0;
972 if (uBar == pVirtio->uVirtioCapBar)
973 (void)virtioR3MmioRead(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->GCPhysPciCapBase + uOffset),
974 &pv, uLength);
975 else
976 {
977 LogFunc(("Guest read virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
978 return 0;
979 }
980 LogFunc(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d\n",
981 uBar, uOffset, uLength, pv));
982 return pv;
983 }
984 return pVirtio->pfnPciConfigReadOld(pDevIns, pPciDev, uAddress, cb);
985}
986
987/**
988 * Callback function for writing to the PCI configuration space.
989 *
990 * @returns VINF_SUCCESS or PDMDevHlpDBGFStop status.
991 *
992 * @param pDevIns Pointer to the device instance the PCI device
993 * belongs to.
994 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
995 * @param uAddress The configuration space register address. [0..4096]
996 * @param u32Value The value that's being written. The number of bits actually used from
997 * this value is determined by the cb parameter.
998 * @param cb The register size. [1,2,4]
999 *
1000 * @remarks Called with the PDM lock held. The device lock is NOT take because
1001 * that is very likely be a lock order violation.
1002 */
1003static DECLCALLBACK(VBOXSTRICTRC) virtioPciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1004 uint32_t uAddress, uint32_t u32Value, unsigned cb)
1005{
1006 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1007
1008 if (uAddress == pVirtio->uPciCfgDataOff)
1009 {
1010 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1011 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1012 * (the virtio_pci_cfg_cap capability), and access data items. */
1013 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
1014 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
1015 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
1016 if (uBar == pVirtio->uVirtioCapBar)
1017 (void)virtioR3MmioWrite(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->GCPhysPciCapBase + uOffset),
1018 (void *)&u32Value, uLength);
1019 else
1020 {
1021 LogFunc(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
1022 return VINF_SUCCESS;
1023 }
1024 LogFunc(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d\n",
1025 uBar, uOffset, uLength, u32Value));
1026 return VINF_SUCCESS;
1027 }
1028 return pVirtio->pfnPciConfigWriteOld(pDevIns, pPciDev, uAddress, u32Value, cb);
1029}
1030
1031/**
1032 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
1033 */
1034void *virtioQueryInterface(PPDMIBASE pInterface, const char *pszIID)
1035{
1036 VIRTIOSTATE *pThis = IFACE_TO_STATE(pInterface, IBase);
1037 Assert(&pThis->IBase == pInterface);
1038
1039 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
1040 return NULL;
1041}
1042
1043/**
1044 * Get VirtIO accepted host-side features
1045 *
1046 * @returns feature bits selected or 0 if selector out of range.
1047 *
1048 * @param pState Virtio state
1049 * @param uSelect Selects which 32-bit set of feature information to return
1050 */
1051
1052__attribute__((unused))
1053static uint64_t virtioGetHostFeatures(PVIRTIOSTATE pVirtio)
1054{
1055 return pVirtio->uDriverFeatures;
1056}
1057
1058/**
1059 *
1060 * Set VirtIO available host-side features
1061 *
1062 * @returns VBox status code
1063 *
1064 * @param pState Virtio state
1065 * @param uDeviceFeatures Feature bits (0-63) to set
1066 */
1067
1068
1069void virtioSetHostFeatures(VIRTIOHANDLE hVirtio, uint64_t uDeviceFeatures)
1070{
1071 H2P(hVirtio)->uDeviceFeatures = VIRTIO_F_VERSION_1 | uDeviceFeatures;
1072}
1073
1074/**
1075 * Destruct PCI-related part of device.
1076 *
1077 * We need to free non-VM resources only.
1078 *
1079 * @returns VBox status code.
1080 * @param pState The device state structure.
1081 */
1082int virtioDestruct(VIRTIOSTATE* pState)
1083{
1084 Log(("%s Destroying PCI instance\n", INSTANCE(pState)));
1085 return VINF_SUCCESS;
1086}
1087
1088/** PK (temp note to self):
1089 *
1090 * Device needs to negotiate capabilities,
1091 * then get queue size address information from driver.
1092 *
1093 * Still need consumer to pass in:
1094 *
1095 * num_queues
1096 * config_generation
1097 * Needs to manage feature negotiation
1098 * That means consumer needs to pass in device-specific feature bits/values
1099 * Device has to provie at least one notifier capability
1100 *
1101 * ISR config value are set by the device (config interrupt vs. queue interrupt)
1102 *
1103 */
1104
1105/**
1106 * Setup PCI device controller and Virtio state
1107 *
1108 * @param pDevIns Device instance data
1109 * @param pVirtio Device State
1110 * @param iInstance Instance number
1111 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1112 * @param pcszNameFmt Device instance name (format-specifier)
1113 * @param uNumQueues Number of Virtio Queues created by consumer (driver)
1114 * @param uVirtioRegion Region number to map for PCi Capabilities structs
1115 * @param devCapReadCallback Client function to call back to handle device specific capabilities
1116 * @param devCapWriteCallback Client function to call back to handle device specific capabilities
1117 * @param cbDevSpecificCap Size of device specific struct
1118 * @param uNotifyOffMultiplier See VirtIO 1.0 spec 4.1.4.4 re: virtio_pci_notify_cap
1119 */
1120
1121int virtioConstruct(PPDMDEVINS pDevIns, PVIRTIOHANDLE phVirtio, int iInstance, PVIRTIOPCIPARAMS pPciParams,
1122 const char *pcszNameFmt, uint32_t uNumQueues, uint32_t uVirtioCapBar, uint64_t uDeviceFeatures,
1123 PFNVIRTIODEVCAPREAD devCapReadCallback, PFNVIRTIODEVCAPWRITE devCapWriteCallback,
1124 uint16_t cbDevSpecificCap, void *pDevSpecificCap, uint32_t uNotifyOffMultiplier)
1125{
1126
1127 int rc = VINF_SUCCESS;
1128
1129 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)RTMemAlloc(sizeof(VIRTIOSTATE));
1130 if (!pVirtio)
1131 {
1132 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1133 return VERR_NO_MEMORY;
1134 }
1135
1136
1137 pVirtio->uNumQueues = uNumQueues;
1138 pVirtio->uNotifyOffMultiplier = uNotifyOffMultiplier;
1139 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1 | uDeviceFeatures;
1140
1141 /* Init handles and log related stuff. */
1142 RTStrPrintf(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszNameFmt, iInstance);
1143
1144 pVirtio->pDevInsR3 = pDevIns;
1145 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
1146 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
1147 pVirtio->uDeviceStatus = 0;
1148 pVirtio->cbDevSpecificCap = cbDevSpecificCap;
1149 pVirtio->pDevSpecificCap = pDevSpecificCap;
1150 /**
1151 * Need to keep a history of this relatively small virtio device-specific
1152 * configuration buffer, which is opaque to this encapsulation of the generic
1153 * part virtio operations, to track config changes to fields, in order to
1154 * update the configuration generation each change. (See VirtIO 1.0 section 4.1.4.3.1)
1155 */
1156 pVirtio->pPrevDevSpecificCap = RTMemAlloc(cbDevSpecificCap);
1157 if (!pVirtio->pPrevDevSpecificCap)
1158 {
1159 RTMemFree(pVirtio);
1160 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1161 return VERR_NO_MEMORY;
1162 }
1163 memcpy(pVirtio->pPrevDevSpecificCap, pVirtio->pDevSpecificCap, cbDevSpecificCap);
1164 pVirtio->uVirtioCapBar = uVirtioCapBar;
1165 pVirtio->virtioCallbacks.pfnVirtioDevCapRead = devCapReadCallback;
1166 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite = devCapWriteCallback;
1167
1168 /* Set PCI config registers (assume 32-bit mode) */
1169 PCIDevSetRevisionId (&pVirtio->dev, DEVICE_PCI_REVISION_ID_VIRTIO);
1170 PCIDevSetVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1171 PCIDevSetSubSystemVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1172 PCIDevSetDeviceId (&pVirtio->dev, pPciParams->uDeviceId);
1173 PCIDevSetClassBase (&pVirtio->dev, pPciParams->uClassBase);
1174 PCIDevSetClassSub (&pVirtio->dev, pPciParams->uClassSub);
1175 PCIDevSetClassProg (&pVirtio->dev, pPciParams->uClassProg);
1176 PCIDevSetSubSystemId (&pVirtio->dev, pPciParams->uSubsystemId);
1177 PCIDevSetInterruptLine (&pVirtio->dev, pPciParams->uInterruptLine);
1178 PCIDevSetInterruptPin (&pVirtio->dev, pPciParams->uInterruptPin);
1179
1180 /* Register PCI device */
1181 rc = PDMDevHlpPCIRegister(pDevIns, &pVirtio->dev);
1182 if (RT_FAILURE(rc))
1183 {
1184 RTMemFree(pVirtio);
1185 return PDMDEV_SET_ERROR(pDevIns, rc,
1186 N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1187 }
1188
1189 pVirtio->IBase = pDevIns->IBase;
1190
1191 PDMDevHlpPCISetConfigCallbacks(pDevIns, &pVirtio->dev,
1192 virtioPciConfigRead, &pVirtio->pfnPciConfigReadOld,
1193 virtioPciConfigWrite, &pVirtio->pfnPciConfigWriteOld);
1194
1195 /** Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1196
1197#if 0 && defined(VBOX_WITH_MSI_DEVICES) /* T.B.D. */
1198 uint8_t fMsiSupport = true;
1199#else
1200 uint8_t fMsiSupport = false;
1201#endif
1202
1203 /* The following capability mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CFG_CAP_T)
1204 * as a mandatory but suboptimal alternative interface to host device capabilities, facilitating
1205 * access the memory of any BAR. If the guest uses it (the VirtIO driver on Linux doesn't),
1206 * Unlike Common, Notify, ISR and Device capabilities, it is accessed directly via PCI Config region.
1207 * therefore does not contribute to the capabilities region (BAR) the other capabilities use.
1208 */
1209#define CFGADDR2IDX(addr) ((uint64_t)addr - (uint64_t)&pVirtio->dev.abConfig)
1210
1211 PVIRTIO_PCI_CAP_T pCfg;
1212 uint32_t cbRegion = 0;
1213
1214 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1215 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[0x40];
1216 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1217 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1218 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1219 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1220 pCfg->uBar = uVirtioCapBar;
1221 pCfg->uOffset = 0;
1222 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1223 cbRegion += pCfg->uLength;
1224 pVirtio->pCommonCfgCap = pCfg;
1225
1226 /* Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on assumption
1227 * that each queue's uQueueNotifyOff is set equal to uQueueSelect's ordinal
1228 * value of the queue */
1229 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1230 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1231 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1232 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1233 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1234 pCfg->uBar = uVirtioCapBar;
1235 pCfg->uOffset = pVirtio->pCommonCfgCap->uOffset + pVirtio->pCommonCfgCap->uLength;
1236 pCfg->uLength = VIRTIO_MAX_QUEUES * uNotifyOffMultiplier + 2; /* will change in VirtIO 1.1 */
1237 cbRegion += pCfg->uLength;
1238 pVirtio->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1239 pVirtio->pNotifyCap->uNotifyOffMultiplier = uNotifyOffMultiplier;
1240
1241 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5) */
1242 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1243 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1244 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1245 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1246 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1247 pCfg->uBar = uVirtioCapBar;
1248 pCfg->uOffset = pVirtio->pNotifyCap->pciCap.uOffset + pVirtio->pNotifyCap->pciCap.uLength;
1249 pCfg->uLength = sizeof(uint32_t);
1250 cbRegion += pCfg->uLength;
1251 pVirtio->pIsrCap = pCfg;
1252
1253 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7) */
1254 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1255 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1256 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1257 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1258 pCfg->uCapNext = (fMsiSupport || pVirtio->pDevSpecificCap) ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1259 pCfg->uBar = uVirtioCapBar;
1260 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1261 pCfg->uLength = 4; /* Initialize a non-zero 4-byte aligned so Linux virtio_pci module recognizes this cap */
1262 cbRegion += pCfg->uLength;
1263 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1264
1265 if (pVirtio->pDevSpecificCap)
1266 {
1267 /* Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
1268 * device specific configuration struct and passes its params to this constructor */
1269 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1270 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1271 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1272 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1273 pCfg->uCapNext = fMsiSupport ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1274 pCfg->uBar = uVirtioCapBar;
1275 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1276 pCfg->uLength = cbDevSpecificCap;
1277 cbRegion += pCfg->uLength;
1278 pVirtio->pDeviceCap = pCfg;
1279 }
1280
1281 /* Set offset to first capability and enable PCI dev capabilities */
1282 PCIDevSetCapabilityList (&pVirtio->dev, 0x40);
1283 PCIDevSetStatus (&pVirtio->dev, VBOX_PCI_STATUS_CAP_LIST);
1284
1285 if (fMsiSupport)
1286 {
1287 PDMMSIREG aMsiReg;
1288 RT_ZERO(aMsiReg);
1289 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1290 aMsiReg.iMsixNextOffset = 0;
1291 aMsiReg.iMsixBar = 0;
1292 aMsiReg.cMsixVectors = 1;
1293 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1294 if (RT_FAILURE (rc))
1295 /* PK TODO: The following is moot, we need to flag no MSI-X support */
1296 PCIDevSetCapabilityList(&pVirtio->dev, 0x40);
1297 }
1298
1299 Log(("cbRegion = %d (0x%x)\n", cbRegion, cbRegion));
1300 rc = PDMDevHlpPCIIORegionRegister(pDevIns, uVirtioCapBar, cbRegion,
1301 PCI_ADDRESS_SPACE_MEM, virtioR3Map);
1302 if (RT_FAILURE(rc))
1303 {
1304 RTMemFree(pVirtio->pPrevDevSpecificCap);
1305 RTMemFree(pVirtio);
1306 return PDMDEV_SET_ERROR(pDevIns, rc,
1307 N_("virtio: cannot register PCI Capabilities address space"));
1308 }
1309 *phVirtio = (PVIRTIOHANDLE)pVirtio;
1310 return rc;
1311}
1312#endif /* IN_RING3 */
1313
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette