VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 80058

Last change on this file since 80058 was 80058, checked in by vboxsync, 6 years ago

Virtio 1.0 capabilities are now showing up and some negotiation hurdles are being accomplished with the Linux guest driver counterpart. (See #9440, Comment #38)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 42.8 KB
Line 
1/* $Id: Virtio_1_0.cpp 80058 2019-07-30 08:17:49Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common Functions (VirtQueue, VQueue, Virtio PCI)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <iprt/param.h>
26#include <iprt/assert.h>
27#include <iprt/uuid.h>
28#include <VBox/vmm/pdmdev.h>
29#include "Virtio_1_0.h"
30
31#define INSTANCE(pState) pState->szInstance
32#define IFACE_TO_STATE(pIface, ifaceName) ((VIRTIOSTATE *)((char*)(pIface) - RT_UOFFSETOF(VIRTIOSTATE, ifaceName)))
33
34#ifdef LOG_ENABLED
35# define QUEUENAME(s, q) (q->pcszName)
36#endif
37
38/* These ACCESSOR macros handle the most basic kinds of MMIO accesses to fields
39 * virtio 1.0 spec's virtio_pci_common_cfg avoiding a lot of visual bloat.
40 */
41
42 /**
43 * If the physical address and access length is within the mapped capability struct
44 * the ptrByType will be set to the mapped capability start. Otherwise ptrByType will be NULL.
45 *
46 * Implied parameters:
47 * GPhysAddr - Physical address accessed (via MMIO callback)
48 * cb - Number of bytes to access
49 *
50 * Actual Parameters:
51 * [IN] pCapStruct - Pointer to MMIO mapped capability struct
52 * [IN] type - Capability struct type
53 * [OUT] result - A pointer of type capType that will be set to a mapped capability
54 * if phys. addr / access len is within it's span.
55 * [OUT] offset - The offset of the physical address into the capability if applicable.
56 */
57
58#define MATCH_VIRTIO_CAP_STRUCT(pCapStruct, type, result, offset) \
59 type *result = NULL; \
60 if ( GCPhysAddr >= (RTGCPHYS)pCapStruct \
61 && GCPhysAddr < ((RTGCPHYS)pCapStruct + sizeof(type)) \
62 && cb <= sizeof(type)) \
63 { \
64 offset = GCPhysAddr - (RTGCPHYS)pCapStruct; \
65 result = (type *)pCapStruct; \
66 }
67
68#define LOG_ACCESSOR(member, type) \
69 LogFunc(("Guest %s 0x%x %s %s\n", fWrite ? "wrote" : "read ", \
70 *(type *)pv, fWrite ? " to" : "from", #member));
71
72#define LOG_INDEXED_ACCESSOR(member, type, idx) \
73 LogFunc(("Guest %s 0x%x %s %s[%d]\n", fWrite ? "wrote" : "read ", \
74 *(type *)pv, fWrite ? " to" : "from", #member, idx));
75
76#define ACCESSOR(member, type) \
77 { \
78 if (fWrite) \
79 { \
80 pVirtio->member = *(type *)pv; \
81 } \
82 else \
83 { \
84 *(type *)pv = pVirtio->member; \
85 } \
86 LOG_ACCESSOR(member, type); \
87 }
88#define ACCESSOR_WITH_IDX(member, type, idx) \
89 { \
90 if (fWrite) \
91 { \
92 pVirtio->member[idx] = *(type *)pv; \
93 } \
94 else \
95 { \
96 *(type *)pv = pVirtio->member[idx]; \
97 } \
98 LOG_INDEXED_ACCESSOR(member, type, idx); \
99 }
100
101#define ACCESSOR_READONLY(member, type) \
102 { \
103 if (fWrite) \
104 { \
105 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
106 AssertMsgFailed(("bad access\n")); \
107 } \
108 else \
109 { \
110 *(type *)pv = pVirtio->member; \
111 LOG_ACCESSOR(member, type); \
112 } \
113 }
114
115#define ACCESSOR_READONLY_WITH_IDX(member, type, idx) \
116 { \
117 if (fWrite) \
118 { \
119 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
120 AssertMsgFailed(("bad access\n")); \
121 } \
122 else \
123 { \
124 *(type *)pv = pVirtio->member[idx]; \
125 LOG_INDEXED_ACCESSOR(member, type, idx); \
126 } \
127 }
128
129
130#ifdef VBOX_DEVICE_STRUCT_TESTCASE
131# define virtioDumpState(x, s) do {} while (0)
132#else
133# ifdef DEBUG /* This still needs to be migrated to VirtIO 1.0 */
134__attribute__((unused))
135static void virtioDumpState(PVIRTIOSTATE pState, const char *pcszCaller)
136{
137 RT_NOREF2(pState, pcszCaller);
138 /* PK TODO, dump state features, selector, status, ISR, queue info (iterate),
139 descriptors, avail, used, size, indices, address
140 each by variable name on new line, indented slightly */
141}
142#endif
143
144
145void virtQueueReadDesc(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint32_t idx, PVIRTQUEUEDESC pDesc)
146{
147 //Log(("%s virtQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQueue, idx));
148 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
149 pVirtQueue->pGcPhysVirtqDescriptors + sizeof(VIRTQUEUEDESC) * (idx % pVirtQueue->cbQueue),
150 pDesc, sizeof(VIRTQUEUEDESC));
151}
152
153uint16_t virtQueueReadAvail(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint32_t idx)
154{
155 uint16_t tmp;
156 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
157 pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQUEUEAVAIL, auRing[idx % pVirtQueue->cbQueue]),
158 &tmp, sizeof(tmp));
159 return tmp;
160}
161
162uint16_t virtQueueReadAvailFlags(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue)
163{
164 uint16_t tmp;
165 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
166 pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEAVAIL, fFlags),
167 &tmp, sizeof(tmp));
168 return tmp;
169}
170
171uint16_t virtQueueReadUsedIndex(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue)
172{
173 uint16_t tmp;
174 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
175 pVirtQueue->pGcPhysVirtqUsed + RT_UOFFSETOF(VIRTQUEUEUSED, uIdx),
176 &tmp, sizeof(tmp));
177 return tmp;
178}
179
180void virtQueueWriteUsedIndex(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint16_t u16Value)
181{
182 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
183 pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEUSED, uIdx),
184 &u16Value, sizeof(u16Value));
185}
186
187void virtQueueWriteUsedElem(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint32_t idx, uint32_t id, uint32_t uLen)
188{
189
190 RT_NOREF5(pState, pVirtQueue, idx, id, uLen);
191 /* PK TODO: Adapt to VirtIO 1.0
192 VIRTQUEUEUSEDELEM elem;
193
194 elem.id = id;
195 elem.uLen = uLen;
196 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
197 pVirtQueue->pGcPhysVirtqUsed + RT_UOFFSETOF_DYN(VIRTQUEUEUSED, ring[idx % pVirtQueue->cbQueue]),
198 &elem, sizeof(elem));
199 */
200}
201
202void virtQueueSetNotification(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, bool fEnabled)
203{
204 RT_NOREF3(pState, pVirtQueue, fEnabled);
205
206/* PK TODO: Adapt to VirtIO 1.0
207 uint16_t tmp;
208
209 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
210 pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEUSED, uFlags),
211 &tmp, sizeof(tmp));
212
213 if (fEnabled)
214 tmp &= ~ VIRTQUEUEUSED_F_NO_NOTIFY;
215 else
216 tmp |= VIRTQUEUEUSED_F_NO_NOTIFY;
217
218 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
219 pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEUSED, uFlags),
220 &tmp, sizeof(tmp));
221*/
222}
223
224bool virtQueueSkip(PVIRTIOSTATE pState, PVQUEUE pQueue)
225{
226
227 RT_NOREF2(pState, pQueue);
228/* PK TODO Adapt to VirtIO 1.0
229 if (virtQueueIsEmpty(pState, pQueue))
230 return false;
231
232 Log2(("%s virtQueueSkip: %s avail_idx=%u\n", INSTANCE(pState),
233 QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
234 pQueue->uNextAvailIndex++;
235*/
236 return true;
237}
238
239bool virtQueueGet(PVIRTIOSTATE pState, PVQUEUE pQueue, PVQUEUEELEM pElem, bool fRemove)
240{
241
242 RT_NOREF4(pState, pQueue, pElem, fRemove);
243
244/* PK TODO: Adapt to VirtIO 1.0
245 if (virtQueueIsEmpty(pState, pQueue))
246 return false;
247
248 pElem->nIn = pElem->nOut = 0;
249
250 Log2(("%s virtQueueGet: %s avail_idx=%u\n", INSTANCE(pState),
251 QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
252
253 VIRTQUEUEDESC desc;
254 uint16_t idx = virtQueueReadAvail(pState, &pQueue->VirtQueue, pQueue->uNextAvailIndex);
255 if (fRemove)
256 pQueue->uNextAvailIndex++;
257 pElem->idx = idx;
258 do
259 {
260 VQUEUESEG *pSeg;
261
262 //
263 // Malicious guests may try to trick us into writing beyond aSegsIn or
264 // aSegsOut boundaries by linking several descriptors into a loop. We
265 // cannot possibly get a sequence of linked descriptors exceeding the
266 // total number of descriptors in the ring (see @bugref{8620}).
267 ///
268 if (pElem->nIn + pElem->nOut >= VIRTQUEUE_MAX_SIZE)
269 {
270 static volatile uint32_t s_cMessages = 0;
271 static volatile uint32_t s_cThreshold = 1;
272 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
273 {
274 LogRel(("%s: too many linked descriptors; check if the guest arranges descriptors in a loop.\n",
275 INSTANCE(pState)));
276 if (ASMAtomicReadU32(&s_cMessages) != 1)
277 LogRel(("%s: (the above error has occured %u times so far)\n",
278 INSTANCE(pState), ASMAtomicReadU32(&s_cMessages)));
279 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
280 }
281 break;
282 }
283 RT_UNTRUSTED_VALIDATED_FENCE();
284
285 virtQueueReadDesc(pState, &pQueue->VirtQueue, idx, &desc);
286 if (desc.u16Flags & VIRTQUEUEDESC_F_WRITE)
287 {
288 Log2(("%s virtQueueGet: %s IN seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
289 QUEUENAME(pState, pQueue), pElem->nIn, idx, desc.addr, desc.uLen));
290 pSeg = &pElem->aSegsIn[pElem->nIn++];
291 }
292 else
293 {
294 Log2(("%s virtQueueGet: %s OUT seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
295 QUEUENAME(pState, pQueue), pElem->nOut, idx, desc.addr, desc.uLen));
296 pSeg = &pElem->aSegsOut[pElem->nOut++];
297 }
298
299 pSeg->addr = desc.addr;
300 pSeg->cb = desc.uLen;
301 pSeg->pv = NULL;
302
303 idx = desc.next;
304 } while (desc.u16Flags & VIRTQUEUEDESC_F_NEXT);
305
306 Log2(("%s virtQueueGet: %s head_desc_idx=%u nIn=%u nOut=%u\n", INSTANCE(pState),
307 QUEUENAME(pState, pQueue), pElem->idx, pElem->nIn, pElem->nOut));
308*/
309 return true;
310}
311
312
313
314void virtQueuePut(PVIRTIOSTATE pState, PVQUEUE pQueue,
315 PVQUEUEELEM pElem, uint32_t uTotalLen, uint32_t uReserved)
316{
317
318 RT_NOREF5(pState, pQueue, pElem, uTotalLen, uReserved);
319
320/* PK TODO Re-work this for VirtIO 1.0
321 Log2(("%s virtQueuePut: %s"
322 " desc_idx=%u acb=%u (%u)\n",
323 INSTANCE(pState), QUEUENAME(pState, pQueue),
324 pElem->idx, uTotalLen, uReserved));
325
326 Assert(uReserved < uTotalLen);
327
328 uint32_t cbLen = uTotalLen - uReserved;
329 uint32_t cbSkip = uReserved;
330
331 for (unsigned i = 0; i < pElem->nIn && cbLen > 0; ++i)
332 {
333 if (cbSkip >= pElem->aSegsIn[i].cb) // segment completely skipped?
334 {
335 cbSkip -= pElem->aSegsIn[i].cb;
336 continue;
337 }
338
339 uint32_t cbSegLen = pElem->aSegsIn[i].cb - cbSkip;
340 if (cbSegLen > cbLen) // last segment only partially used?
341 cbSegLen = cbLen;
342
343 //
344 // XXX: We should assert pv != NULL, but we need to check and
345 // fix all callers first.
346 //
347 if (pElem->aSegsIn[i].pv != NULL)
348 {
349 Log2(("%s virtQueuePut: %s"
350 " used_idx=%u seg=%u addr=%p pv=%p cb=%u acb=%u\n",
351 INSTANCE(pState), QUEUENAME(pState, pQueue),
352 pQueue->uNextUsedIndex, i,
353 (void *)pElem->aSegsIn[i].addr, pElem->aSegsIn[i].pv,
354 pElem->aSegsIn[i].cb, cbSegLen));
355
356 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
357 pElem->aSegsIn[i].addr + cbSkip,
358 pElem->aSegsIn[i].pv,
359 cbSegLen);
360 }
361
362 cbSkip = 0;
363 cbLen -= cbSegLen;
364 }
365
366 Log2(("%s virtQueuePut: %s"
367 " used_idx=%u guest_used_idx=%u id=%u len=%u\n",
368 INSTANCE(pState), QUEUENAME(pState, pQueue),
369 pQueue->uNextUsedIndex, virtQueueReadUsedIndex(pState, &pQueue->VirtQueue),
370 pElem->idx, uTotalLen));
371
372 virtQueueWriteUsedElem(pState, &pQueue->VirtQueue,
373 pQueue->uNextUsedIndex++,
374 pElem->idx, uTotalLen);
375*/
376
377}
378
379
380void virtQueueNotify(PVIRTIOSTATE pState, PVQUEUE pQueue)
381{
382
383 RT_NOREF2(pState, pQueue);
384/* PK TODO Adapt to VirtIO 1.0
385 LogFlow(("%s virtQueueNotify: %s availFlags=%x guestFeatures=%x virtQueue is %sempty\n",
386 INSTANCE(pState), QUEUENAME(pState, pQueue),
387 virtQueueReadAvailFlags(pState, &pQueue->VirtQueue),
388 pState->uGuestFeatures, virtQueueIsEmpty(pState, pQueue)?"":"not "));
389 if (!(virtQueueReadAvailFlags(pState, &pQueue->VirtQueue) & VIRTQUEUEAVAIL_F_NO_INTERRUPT)
390 || ((pState->uGuestFeatures & VIRTIO_F_NOTIFY_ON_EMPTY) && virtQueueIsEmpty(pState, pQueue)))
391 {
392 int rc = virtioRaiseInterrupt(pState, VERR_INTERNAL_ERROR, VIRTIO_ISR_QUEUE);
393 if (RT_FAILURE(rc))
394 Log(("%s virtQueueNotify: Failed to raise an interrupt (%Rrc).\n", INSTANCE(pState), rc));
395 }
396*/
397}
398
399void virtQueueSync(PVIRTIOSTATE pState, PVQUEUE pQueue)
400{
401 RT_NOREF(pState, pQueue);
402/* PK TODO Adapt to VirtIO 1.0
403 Log2(("%s virtQueueSync: %s old_used_idx=%u new_used_idx=%u\n", INSTANCE(pState),
404 QUEUENAME(pState, pQueue), virtQueueReadUsedIndex(pState, &pQueue->VirtQueue), pQueue->uNextUsedIndex));
405 virtQueueWriteUsedIndex(pState, &pQueue->VirtQueue, pQueue->uNextUsedIndex);
406 virtQueueNotify(pState, pQueue);
407*/
408}
409
410int virtioReset(PVIRTIOSTATE pVirtio)
411{
412 RT_NOREF(pVirtio);
413/* PK TODO Adapt to VirtIO 1.09
414 pState->uGuestFeatures = 0;
415 pState->uQueueSelector = 0;
416 pState->uStatus = 0;
417 pState->uISR = 0;
418
419 for (unsigned i = 0; i < pState->nQueues; i++)
420 virtQueueReset(&pState->Queues[i]);
421*/
422 virtioNotify(pVirtio);
423 return VINF_SUCCESS;
424}
425
426
427/**
428 * Raise interrupt.
429 *
430 * @param pState The device state structure.
431 * @param rcBusy Status code to return when the critical section is busy.
432 * @param u8IntCause Interrupt cause bit mask to set in PCI ISR port.
433 */
434int virtioRaiseInterrupt(VIRTIOSTATE *pState, int rcBusy, uint8_t u8IntCause)
435{
436 RT_NOREF2(pState, u8IntCause);
437 RT_NOREF_PV(rcBusy);
438/* PK TODO: Adapt to VirtIO 1.0
439 STAM_COUNTER_INC(&pState->StatIntsRaised);
440 LogFlow(("%s virtioRaiseInterrupt: u8IntCause=%x\n",
441 INSTANCE(pState), u8IntCause));
442
443 pState->uISR |= u8IntCause;
444 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
445*/
446 return VINF_SUCCESS;
447}
448
449/**
450 * Lower interrupt.
451 *
452 * @param pState The device state structure.
453 */
454__attribute__((unused))
455static void virtioLowerInterrupt(VIRTIOSTATE *pState)
456{
457 LogFlow(("%s virtioLowerInterrupt\n", INSTANCE(pState)));
458 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
459}
460
461DECLINLINE(uint32_t) virtioGetHostFeatures(PVIRTIOSTATE pState,
462 PFNGETHOSTFEATURES pfnGetHostFeatures)
463{
464 return pfnGetHostFeatures(pState) /*| VIRTIO_F_NOTIFY_ON_EMPTY */;
465}
466
467
468#ifdef IN_RING3
469
470
471/**
472 * Saves the state of device.
473 *
474 * @returns VBox status code.
475 * @param pDevIns The device instance.
476 * @param pSSM The handle to the saved state.
477 */
478int virtioSaveExec(PVIRTIOSTATE pState, PSSMHANDLE pSSM)
479{
480 RT_NOREF2(pState, pSSM);
481 int rc = VINF_SUCCESS;
482// virtioDumpState(pState, "virtioSaveExec");
483 /*
484 * PK TODO save guest features, queue selector, sttus ISR,
485 * and per queue info (size, address, indicies)...
486 * using calls like SSMR3PutU8(), SSMR3PutU16(), SSMR3PutU16()...
487 * and AssertRCReturn(rc, rc)
488 */
489
490 return rc;
491}
492
493/**
494 * Loads a saved device state.
495 *
496 * @returns VBox status code.
497 * @param pDevIns The device instance.
498 * @param pSSM The handle to the saved state.
499 * @param uVersion The data unit version number.
500 * @param uPass The data pass.
501 */
502int virtioLoadExec(PVIRTIOSTATE pState, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass, uint32_t nQueues)
503{
504 RT_NOREF5(pState, pSSM, uVersion, uPass, nQueues);
505 int rc = VINF_SUCCESS;
506
507 /*
508 * PK TODO, restore everything saved in virtioSaveExect, using
509 * using calls like SSMR3PutU8(), SSMR3PutU16(), SSMR3PutU16()...
510 * and AssertRCReturn(rc, rc)
511 */
512 if (uPass == SSM_PASS_FINAL)
513 {
514 }
515
516// virtioDumpState(pState, "virtioLoadExec");
517
518 return rc;
519}
520
521/**
522 * Device relocation callback.
523 *
524 * When this callback is called the device instance data, and if the
525 * device have a GC component, is being relocated, or/and the selectors
526 * have been changed. The device must use the chance to perform the
527 * necessary pointer relocations and data updates.
528 *
529 * Before the GC code is executed the first time, this function will be
530 * called with a 0 delta so GC pointer calculations can be one in one place.
531 *
532 * @param pDevIns Pointer to the device instance.
533 * @param offDelta The relocation delta relative to the old location.
534 *
535 * @remark A relocation CANNOT fail.
536 */
537void virtioRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
538{
539 RT_NOREF(offDelta);
540 VIRTIOSTATE *pState = PDMINS_2_DATA(pDevIns, VIRTIOSTATE*);
541 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
542 // TBD
543}
544
545PVQUEUE virtioAddQueue(VIRTIOSTATE* pState, unsigned cbQueue, PFNVIRTIOQUEUECALLBACK pfnCallback, const char *pcszName)
546{
547
548 RT_NOREF4(pState, cbQueue, pfnCallback, pcszName);
549/* PK TODO Adapt to VirtIO 1.0
550
551 PVQUEUE pQueue = NULL;
552 // Find an empty queue slot
553 for (unsigned i = 0; i < pState->nQueues; i++)
554 {
555 if (pState->Queues[i].VirtQueue.cbQueue == 0)
556 {
557 pQueue = &pState->Queues[i];
558 break;
559 }
560 }
561
562 if (!pQueue)
563 {
564 Log(("%s Too many queues being added, no empty slots available!\n", INSTANCE(pState)));
565 }
566 else
567 {
568 pQueue->VirtQueue.cbQueue = cbQueue;
569 pQueue->VirtQueue.addrDescriptors = 0;
570 pQueue->uPageNumber = 0;
571 pQueue->pfnCallback = pfnCallback;
572 pQueue->pcszName = pcszName;
573 }
574 return pQueue;
575*/
576 return NULL;// Temporary
577}
578
579
580__attribute__((unused))
581static void virtQueueReset(PVQUEUE pQueue)
582{
583 RT_NOREF(pQueue);
584/* PK TODO Adapt to VirtIO 1.0
585 pQueue->VirtQueue.addrDescriptors = 0;
586 pQueue->VirtQueue.addrAvail = 0;
587 pQueue->VirtQueue.addrUsed = 0;
588 pQueue->uNextAvailIndex = 0;
589 pQueue->uNextUsedIndex = 0;
590 pQueue->uPageNumber = 0;
591*/
592}
593
594__attribute__((unused))
595static void virtQueueInit(PVQUEUE pQueue, uint32_t uPageNumber)
596{
597 RT_NOREF2(pQueue, uPageNumber);
598
599/* PK TODO, re-work this for VirtIO 1.0
600 pQueue->VirtQueue.addrDescriptors = (uint64_t)uPageNumber << PAGE_SHIFT;
601
602 pQueue->VirtQueue.addrAvail = pQueue->VirtQueue.addrDescriptors
603 + sizeof(VIRTQUEUEDESC) * pQueue->VirtQueue.cbQueue;
604
605 pQueue->VirtQueue.addrUsed = RT_ALIGN(pQueue->VirtQueue.addrAvail
606 + RT_UOFFSETOF_DYN(VIRTQUEUEAVAIL, ring[pQueue->VirtQueue.cbQueue])
607 + sizeof(uint16_t), // virtio 1.0 adds a 16-bit field following ring data
608 PAGE_SIZE); // The used ring must start from the next page.
609
610 pQueue->uNextAvailIndex = 0;
611 pQueue->uNextUsedIndex = 0;
612*/
613
614}
615
616void virtioNotify(PVIRTIOSTATE pVirtio)
617{
618 RT_NOREF(pVirtio);
619}
620
621static void virtioResetDevice(PVIRTIOSTATE pVirtio)
622{
623 RT_NOREF(pVirtio);
624 LogFunc((""));
625 pVirtio->uDeviceStatus = 0;
626}
627
628/**
629 * Handle accesses to Common Configuration capability
630 *
631 * @returns VBox status code
632 *
633 * @param pVirtio Virtio instance state
634 * @param fWrite If write access (otherwise read access)
635 * @param pv Pointer to location to write to or read from
636 * @param cb Number of bytes to read or write
637 */
638int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t uoff, void const *pv)
639{
640 int rv = VINF_SUCCESS;
641 if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeature))
642 {
643 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceFeature */
644 {
645 AssertMsgFailed(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
646 }
647 else /* Guest WRITE pCommonCfg->uDeviceFeature */
648 {
649 switch(pVirtio->uFeaturesOfferedSelect)
650 {
651 case 0:
652 pVirtio->uFeaturesOffered = *(uint32_t *)pv;
653 LOG_ACCESSOR(uFeaturesOffered, uint32_t);
654 break;
655 case 1:
656 pVirtio->uFeaturesOffered = (uint64_t)(*(uint32_t *)pv) << 32;
657 LOG_ACCESSOR(uFeaturesOffered, uint32_t);
658 break;
659 default:
660 Log(("Guest selected out of range pVirtio->uDeviceFeature (%d), returning 0\n",
661 pVirtio->uFeaturesOfferedSelect));
662 }
663 }
664 }
665 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeature))
666 {
667 if (fWrite) /* Guest WRITE pCommonCfg->uDriverFeature */
668 {
669 switch(pVirtio->uFeaturesOfferedSelect)
670 {
671 case 0:
672 pVirtio->uFeaturesAccepted = *(uint32_t *)pv;
673 LOG_ACCESSOR(uFeaturesAccepted, uint32_t);
674 break;
675 case 1:
676 pVirtio->uFeaturesAccepted = (uint64_t)(*(uint32_t *)pv) << 32;
677 LOG_ACCESSOR(uFeaturesAccepted, uint32_t);
678 break;
679 }
680 }
681 else /* Guest READ pCommonCfg->uDriverFeature */
682 {
683 switch(pVirtio->uFeaturesOfferedSelect)
684 {
685 case 0:
686 *(uint32_t *)pv = pVirtio->uFeaturesAccepted & 0xffffffff;
687 LOG_ACCESSOR(uFeaturesAccepted, uint32_t);
688 break;
689 case 1:
690 *(uint32_t *)pv = (pVirtio->uFeaturesAccepted >> 32) & 0xffffffff;
691 LOG_ACCESSOR(uFeaturesAccepted, uint32_t);
692 break;
693 }
694 }
695 }
696 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uNumQueues))
697 {
698 if (fWrite)
699 {
700 AssertMsgFailed(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
701 }
702 else
703 {
704 *(uint16_t *)pv = MAX_QUEUES;
705 Log(("Guest read 0x%x from pVirtio->uNumQueues\n", MAX_QUEUES));
706 }
707 }
708 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceStatus))
709 {
710 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
711 {
712 pVirtio->uDeviceStatus = *(uint8_t *)pv;
713 LogFunc(("Driver wrote uDeviceStatus:\n"));
714 showDeviceStatus(pVirtio->uDeviceStatus);
715 if (pVirtio->uDeviceStatus == 0)
716 virtioResetDevice(pVirtio);
717 }
718 else /* Guest READ pCommonCfg->uDeviceStatus */
719 {
720 LogFunc(("Driver read uDeviceStatus:\n"));
721 *(uint32_t *)pv = pVirtio->uDeviceStatus;
722 showDeviceStatus(pVirtio->uDeviceStatus);
723 }
724 }
725 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uMsixConfig))
726 {
727 ACCESSOR(uMsixConfig, uint32_t);
728 }
729 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatureSelect))
730 {
731 ACCESSOR(uFeaturesOfferedSelect, uint32_t);
732 }
733 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatureSelect))
734 {
735 ACCESSOR(uFeaturesAcceptedSelect, uint32_t);
736 }
737 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uConfigGeneration))
738 {
739 ACCESSOR_READONLY(uConfigGeneration, uint8_t);
740 }
741 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueSelect))
742 {
743 ACCESSOR(uQueueSelect, uint16_t);
744 }
745 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueSize))
746 {
747 ACCESSOR_WITH_IDX(uQueueSize, uint16_t, pVirtio->uQueueSelect);
748 }
749 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueMsixVector))
750 {
751 ACCESSOR_WITH_IDX(uQueueMsixVector, uint16_t, pVirtio->uQueueSelect);
752 }
753 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueEnable))
754 {
755 ACCESSOR_WITH_IDX(uQueueEnable, uint16_t, pVirtio->uQueueSelect);
756 }
757 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueNotifyOff))
758 {
759 ACCESSOR_READONLY_WITH_IDX(uQueueNotifyOff, uint16_t, pVirtio->uQueueSelect);
760 }
761 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueDesc))
762 {
763 ACCESSOR_WITH_IDX(uQueueDesc, uint64_t, pVirtio->uQueueSelect);
764 }
765 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueAvail))
766 {
767 ACCESSOR_WITH_IDX(uQueueAvail, uint64_t, pVirtio->uQueueSelect);
768 }
769 else if (uoff == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uQueueUsed))
770 {
771 ACCESSOR_WITH_IDX(uQueueUsed, uint64_t, pVirtio->uQueueSelect);
772 }
773 else
774 {
775
776 AssertMsgFailed(("virtio: Bad common cfg offset \n"));
777 }
778 return rv;
779}
780
781/**
782 * Memory mapped I/O Handler for PCI Capabilities read operations.
783 *
784 * @returns VBox status code.
785 *
786 * @param pDevIns The device instance.
787 * @param pvUser User argument.
788 * @param GCPhysAddr Physical address (in GC) where the read starts.
789 * @param pv Where to store the result.
790 * @param cb Number of bytes read.
791 */
792PDMBOTHCBDECL(int) virtioR3MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
793{
794 RT_NOREF(pvUser);
795 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
796 int rc = VINF_SUCCESS;
797
798//#ifdef LOG_ENABLED
799// LogFlowFunc(("pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n", pVirtio, GCPhysAddr, pv, cb, pv, cb));
800//#endif
801 off_t uoff = 0;
802 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, VIRTIO_PCI_COMMON_CFG_T, pCommonCfg, uoff);
803 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, VIRTIO_PCI_ISR_CAP_T, pIsrCap, uoff);
804#if HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
805 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, VIRTIO_PCI_DEVICE_CAP_T, pDeviceCap, uoff);
806#endif
807 /* Note: The notify capability is handled differently as per VirtIO 1.0 spec 4.1.4.4 */
808
809 if (pCommonCfg)
810 virtioCommonCfgAccessed(pVirtio, 0 /* fWrite */, uoff, pv);
811 else if (pIsrCap)
812 {
813 *(uint8_t *)pv = pVirtio->fQueueInterrupt | pVirtio->fDeviceConfigInterrupt << 1;
814 LogFunc(("Read 0x%s from pIsrCap\n", *(uint8_t *)pv));
815 }
816#if HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
817 else if (pDeviceCap)
818 rc = pThis->pfnVirtioDevCapRead(pDevIns, GCPhysAddr, pv, cb);
819#endif
820 else
821 AssertMsgFailed(("virtio: Write outside of capabilities region\n"));
822
823 return rc;
824}
825#if TBD
826/**
827 * Callback function for reading from the PCI configuration space.
828 *
829 * @returns The register value.
830 * @param pDevIns Pointer to the device instance the PCI device
831 * belongs to.
832 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
833 * @param uAddress The configuration space register address. [0..4096]
834 * @param cb The register size. [1,2,4]
835 *
836 * @remarks Called with the PDM lock held. The device lock is NOT take because
837 * that is very likely be a lock order violation.
838 */
839static DECLCALLBACK(uint32_t) virtioPciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
840 uint32_t uAddress, unsigned cb)
841{
842 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
843 int rc = VINF_SUCCESS;
844
845 LogFunc(("uAddress: %d, uPciConfigDataOffset: %d\n", uAddress, pVirtio->uPciConfigDataOffset));
846
847 if (uAddress == pVirtio->uPciConfigDataOffset)
848 Log(("Read uPciConfigDataOffset\n"));
849 rc = pVirtio->pfnPciConfigReadOld(pDevIns, pPciDev, uAddress, cb);
850 return rc;
851
852}
853
854/**
855 * Callback function for writing to the PCI configuration space.
856 *
857 * @returns VINF_SUCCESS or PDMDevHlpDBGFStop status.
858 *
859 * @param pDevIns Pointer to the device instance the PCI device
860 * belongs to.
861 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
862 * @param uAddress The configuration space register address. [0..4096]
863 * @param u32Value The value that's being written. The number of bits actually used from
864 * this value is determined by the cb parameter.
865 * @param cb The register size. [1,2,4]
866 *
867 * @remarks Called with the PDM lock held. The device lock is NOT take because
868 * that is very likely be a lock order violation.
869 */
870static DECLCALLBACK(VBOXSTRICTRC) virtioPciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
871 uint32_t uAddress, uint32_t u32Value, unsigned cb)
872{
873 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
874 VBOXSTRICTRC strictRc;
875
876 LogFunc(("uAddress: %d, uPciConfigDataOffset: %d\n", uAddress, pVirtio->uPciConfigDataOffset));
877 if (uAddress == pVirtio->uPciConfigDataOffset)
878 Log(("Wrote uPciConfigDataOffset\n"));
879 strictRc = pVirtio->pfnPciConfigWriteOld(pDevIns, pPciDev, uAddress, u32Value, cb);
880 return strictRc;
881}
882#endif
883/**
884 * Memory mapped I/O Handler for PCI Capabilities write operations.
885 *
886 * @returns VBox status code.
887 *
888 * @param pDevIns The device instance.
889 * @param pvUser User argument.
890 * @param GCPhysAddr Physical address (in GC) where the write starts.
891 * @param pv Where to fetch the result.
892 * @param cb Number of bytes to write.
893 */
894PDMBOTHCBDECL(int) virtioR3MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
895{
896
897 RT_NOREF(pvUser);
898 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
899 int rc = VINF_SUCCESS;
900
901#ifdef LOG_ENABLED
902// LogFunc(("pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n", pVirtio, GCPhysAddr, pv, cb, pv, cb));
903#endif
904 off_t uoff = 0;
905
906 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, VIRTIO_PCI_COMMON_CFG_T, pCommonCfg, uoff);
907 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, VIRTIO_PCI_ISR_CAP_T, pIsrCap, uoff);
908#if HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
909 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, VIRTIO_PCI_DEVICE_CAP_T, pDeviceCap, uoff);
910#endif
911
912 if (pCommonCfg)
913 virtioCommonCfgAccessed(pVirtio, 1 /* fWrite */, uoff, pv);
914 else if (pIsrCap)
915 {
916 pVirtio->fQueueInterrupt = (*(uint8_t *)pv) & 1;
917 pVirtio->fDeviceConfigInterrupt = !!(*((uint8_t *)pv) & 2);
918 Log(("pIsrCap... setting fQueueInterrupt=%d fDeviceConfigInterrupt=%d\n",
919 pVirtio->fQueueInterrupt, pVirtio->fDeviceConfigInterrupt));
920 }
921#if HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
922 else if (pDeviceCap)
923 rc = pThis->pfnVirtioDevCapWrite(pDevIns, GCPhysAddr, pv, cb);
924#endif
925 else
926 AssertMsgFailed(("virtio: Write outside of capabilities region\n"));
927
928 return rc;
929}
930
931
932/**
933 * @callback_method_impl{FNPCIIOREGIONMAP}
934 */
935static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
936 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
937{
938 RT_NOREF3(pPciDev, iRegion, enmType);
939 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
940 int rc = VINF_SUCCESS;
941
942 Assert(cb >= 32);
943
944 /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
945 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
946 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
947 virtioR3MmioWrite, virtioR3MmioRead,
948 "virtio-scsi MMIO");
949
950 LogFunc(("virtio: PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp, region=%d\n", GCPhysAddress, cb, iRegion));
951
952 if (RT_SUCCESS(rc))
953 {
954 pVirtio->GCPhysPciCapBase = GCPhysAddress;
955 pVirtio->pGcPhysCommonCfg = (PVIRTIO_PCI_COMMON_CFG_T)(GCPhysAddress + pVirtio->uCommonCfgOffset);
956 pVirtio->pGcPhysNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)(GCPhysAddress + pVirtio->uNotifyCapOffset);
957 pVirtio->pGcPhysIsrCap = (PVIRTIO_PCI_ISR_CAP_T)(GCPhysAddress + pVirtio->uIsrCapOffset);
958 pVirtio->pGcPhysPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)(GCPhysAddress + pVirtio->uPciCfgCapOffset);
959#ifdef HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
960 pVirtio->pGcPhysDeviceCap = (PVIRTIO_PCI_DEVICE_CAP_T)(GCPhysAddress + pVirtio->uuDeviceCapOffset);
961#endif
962 }
963 return rc;
964}
965
966
967/**
968 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
969 */
970void *virtioQueryInterface(PPDMIBASE pInterface, const char *pszIID)
971{
972 VIRTIOSTATE *pThis = IFACE_TO_STATE(pInterface, IBase);
973 Assert(&pThis->IBase == pInterface);
974
975 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
976 return NULL;
977}
978
979/**
980 * Get VirtIO available host-side features
981 *
982 * @returns feature bits selected or 0 if selector out of range.
983 *
984 * @param pState Virtio state
985 * @param uSelect Selects which 32-bit set of feature information to return
986 */
987
988static uint64_t virtioGetHostFeatures(PVIRTIOSTATE pVirtio)
989{
990 return pVirtio->uFeaturesOffered;
991}
992
993/**
994 *
995 * Set VirtIO available host-side features
996 *
997 * @returns VBox status code
998 *
999 * @param pState Virtio state
1000 * @param uFeaturesOffered Feature bits (0-63) to set
1001 */
1002
1003static int virtioSetHostFeatures(PVIRTIOSTATE pVirtio, uint64_t uFeaturesOffered)
1004{
1005 pVirtio->uFeaturesOffered = VIRTIO_F_VERSION_1 | uFeaturesOffered;
1006 return VINF_SUCCESS;
1007}
1008
1009/**
1010 * Destruct PCI-related part of device.
1011 *
1012 * We need to free non-VM resources only.
1013 *
1014 * @returns VBox status code.
1015 * @param pState The device state structure.
1016 */
1017int virtioDestruct(VIRTIOSTATE* pState)
1018{
1019 Log(("%s Destroying PCI instance\n", INSTANCE(pState)));
1020 return VINF_SUCCESS;
1021}
1022
1023/** PK (temp note to self):
1024 *
1025 * Device needs to negotiate capabilities,
1026 * then get queue size address information from driver.
1027 *
1028 * Still need consumer to pass in:
1029 *
1030 * num_queues
1031 * config_generation
1032 * Needs to manage feature negotiation
1033 * That means consumer needs to pass in device-specific feature bits/values
1034 * Device has to provie at least one notifier capability
1035 *
1036 * ISR config value are set by the device (config interrupt vs. queue interrupt)
1037 *
1038 */
1039
1040/**
1041 * Setup PCI device controller and Virtio state
1042 *
1043 * @param pDevIns Device instance data
1044 * @param pVirtio Device State
1045 * @param iInstance Instance number
1046 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1047 * @param pcszNameFmt Device instance name (format-specifier)
1048 * @param nQueues Number of Virtio Queues created by consumer (driver)
1049 * @param uVirtioRegion Region number to map for PCi Capabilities structs
1050 * @param devCapReadCallback Client function to call back to handle device specific capabilities
1051 * @param devCapWriteCallback Client function to call back to handle device specific capabilities
1052 * @param cbDevSpecificCap Size of device specific struct
1053 * @param uNotifyOffMultiplier See VirtIO 1.0 spec 4.1.4.4 re: virtio_pci_notify_cap
1054 */
1055
1056int virtioConstruct(PPDMDEVINS pDevIns, PVIRTIOSTATE pVirtio, int iInstance, PVIRTIOPCIPARAMS pPciParams,
1057 PPVIRTIOCALLBACKS ppVirtioCallbacks, const char *pcszNameFmt, uint32_t nQueues, uint32_t uVirtioRegion,
1058 PFNVIRTIODEVCAPREAD devCapReadCallback, PFNVIRTIODEVCAPWRITE devCapWriteCallback,
1059 uint16_t cbDevSpecificCap, uint32_t uNotifyOffMultiplier)
1060{
1061 pVirtio->nQueues = nQueues;
1062 pVirtio->uNotifyOffMultiplier = uNotifyOffMultiplier;
1063
1064 /* Init handles and log related stuff. */
1065 RTStrPrintf(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszNameFmt, iInstance);
1066
1067 VIRTIOCALLBACKS virtioCallbacks;
1068 virtioCallbacks.pfnSetHostFeatures = virtioSetHostFeatures;
1069 virtioCallbacks.pfnGetHostFeatures = virtioGetHostFeatures;
1070 virtioCallbacks.pfnReset = virtioReset;
1071 *ppVirtioCallbacks = &virtioCallbacks;
1072
1073 pVirtio->pDevInsR3 = pDevIns;
1074 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
1075 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
1076 pVirtio->uDeviceStatus = 0;
1077 pVirtio->pfnVirtioDevCapRead = devCapReadCallback;
1078 pVirtio->pfnVirtioDevCapWrite = devCapWriteCallback;
1079
1080 /* Set PCI config registers (assume 32-bit mode) */
1081 PCIDevSetRevisionId (&pVirtio->dev, DEVICE_PCI_REVISION_ID_VIRTIO);
1082 PCIDevSetVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1083 PCIDevSetSubSystemVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1084 PCIDevSetDeviceId (&pVirtio->dev, pPciParams->uDeviceId);
1085 PCIDevSetClassBase (&pVirtio->dev, pPciParams->uClassBase);
1086 PCIDevSetClassSub (&pVirtio->dev, pPciParams->uClassSub);
1087 PCIDevSetClassProg (&pVirtio->dev, pPciParams->uClassProg);
1088 PCIDevSetSubSystemId (&pVirtio->dev, pPciParams->uSubsystemId);
1089 PCIDevSetInterruptLine (&pVirtio->dev, pPciParams->uInterruptLine);
1090 PCIDevSetInterruptPin (&pVirtio->dev, pPciParams->uInterruptPin);
1091
1092 int rc = VINF_SUCCESS;
1093 /* Register PCI device */
1094 rc = PDMDevHlpPCIRegister(pDevIns, &pVirtio->dev);
1095 if (RT_FAILURE(rc))
1096 return PDMDEV_SET_ERROR(pDevIns, rc,
1097 N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1098
1099 pVirtio->IBase = pDevIns->IBase;
1100
1101 /** Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1102
1103#if 0 && defined(VBOX_WITH_MSI_DEVICES) /* T.B.D. */
1104 uint8_t fMsiSupport = true;
1105#else
1106 uint8_t fMsiSupport = false;
1107#endif
1108
1109 uint8_t uCfgCapOffset = 0x40;
1110 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap;
1111 PVIRTIO_PCI_CAP_T pCommonCfg, pIsrCap;
1112#ifdef HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
1113 PVIRTIO_PCI_CAP_T pDeviceCap;
1114#endif
1115 uint32_t cbVirtioCaps = 0;
1116
1117 /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CAP_T) */
1118 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)&pVirtio->dev.abConfig[uCfgCapOffset];
1119 pVirtio->uPciConfigDataOffset = 0x40 + sizeof(VIRTIO_PCI_CAP_T);
1120 PVIRTIO_PCI_CAP_T pCfg = (PVIRTIO_PCI_CAP_T)pVirtio->pPciCfgCap;
1121 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1122 pCfg->uCapNext = uCfgCapOffset += sizeof(VIRTIO_PCI_CFG_CAP_T);
1123 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1124 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1125 pCfg->uBar = uVirtioRegion;
1126 pCfg->uOffset = pVirtio->uPciCfgCapOffset = 0;
1127 pCfg->uLength = sizeof(VIRTIO_PCI_CFG_CAP_T);
1128 cbVirtioCaps += pCfg->uLength;
1129
1130 /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_common_cfg (VIRTIO_PCI_COMMON_CFG_T)*/
1131 pCfg = pCommonCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[uCfgCapOffset];
1132 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1133 pCfg->uCapNext = uCfgCapOffset += sizeof(VIRTIO_PCI_CAP_T);
1134 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1135 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1136 pCfg->uBar = uVirtioRegion;
1137 pCfg->uOffset = pVirtio->uCommonCfgOffset = pVirtio->pPciCfgCap->pciCap.uOffset + sizeof(VIRTIO_PCI_CFG_CAP_T);
1138 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1139 cbVirtioCaps += pCfg->uLength;
1140
1141 /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_notify_cap (VIRTIO_PCI_NOTIFY_CAP_T)*/
1142 pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)&pVirtio->dev.abConfig[uCfgCapOffset];
1143 pCfg = (PVIRTIO_PCI_CAP_T)pNotifyCap;
1144 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1145 pCfg->uCapNext = uCfgCapOffset += sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1146 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1147 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1148 pCfg->uBar = uVirtioRegion;
1149 pCfg->uOffset = pVirtio->uNotifyCapOffset = pCommonCfg->uOffset + sizeof(VIRTIO_PCI_COMMON_CFG_T);
1150 pCfg->uLength = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1151 pNotifyCap->uNotifyOffMultiplier = uNotifyOffMultiplier;
1152 cbVirtioCaps += pCfg->uLength;
1153
1154 /* Capability will be mapped via VirtIO 1.0: uint8_t (VIRTIO_PCI_ISR_CAP_T) */
1155 pCfg = pIsrCap = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[uCfgCapOffset];
1156 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1157 pCfg->uCapNext = (uint8_t)(fMsiSupport || cbDevSpecificCap ? (uCfgCapOffset += sizeof(VIRTIO_PCI_ISR_CAP_T)) : 0);
1158 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1159 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1160 pCfg->uBar = uVirtioRegion;
1161 pCfg->uOffset = pVirtio->uIsrCapOffset = pNotifyCap->pciCap.uOffset + sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1162 pCfg->uLength = sizeof(VIRTIO_PCI_ISR_CAP_T);
1163 cbVirtioCaps += pCfg->uLength;
1164
1165#ifdef HAVE_VIRTIO_DEVICE_SPECIFIC_CAP
1166 /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_dev_cap (VIRTIODEVCAP)*/
1167 pCfg = pDeviceCap = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[uCfgCapOffset];
1168 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1169 pCfg->uCapNext = (uint8_t)(fMsiSupport ? (uCfgCapOffset += sizeof(VIRTIO_PCI_CAP_T)) : 0);
1170 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1171 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1172 pCfg->uBar = uVirtioRegion;
1173 pCfg->uOffset = uDeviceCapOffset->uOffset + sizeof(VIRTIO_PCI_ISR_CAP_T);
1174 pCfg->uLength = cbDevSpecificCap;
1175 cbVirtioCaps += pCfg->uLength;
1176#endif
1177
1178 /* Set offset to first capability and enable PCI dev capabilities */
1179 PCIDevSetCapabilityList (&pVirtio->dev, 0x40);
1180 PCIDevSetStatus (&pVirtio->dev, VBOX_PCI_STATUS_CAP_LIST);
1181
1182 if (fMsiSupport)
1183 {
1184 PDMMSIREG aMsiReg;
1185 RT_ZERO(aMsiReg);
1186 aMsiReg.iMsixCapOffset = uCfgCapOffset;
1187 aMsiReg.iMsixNextOffset = 0;
1188 aMsiReg.iMsixBar = 0;
1189 aMsiReg.cMsixVectors = 1;
1190 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1191 if (RT_FAILURE (rc))
1192 /* PK TODO: The following is moot, we need to flag no MSI-X support */
1193 PCIDevSetCapabilityList(&pVirtio->dev, 0x40);
1194 }
1195/*
1196 PDMDevHlpPCISetConfigCallbacks(pDevIns, &pVirtio->dev,
1197 virtioPciConfigRead, &pVirtio->pfnPciConfigReadOld,
1198 virtioPciConfigWrite, &pVirtio->pfnPciConfigWriteOld);
1199*/
1200 rc = PDMDevHlpPCIIORegionRegister(pDevIns, uVirtioRegion, cbVirtioCaps,
1201 PCI_ADDRESS_SPACE_MEM, virtioR3Map);
1202 if (RT_FAILURE(rc))
1203 return PDMDEV_SET_ERROR(pDevIns, rc,
1204 N_("virtio: cannot register PCI Capabilities address space"));
1205
1206 return rc;
1207}
1208
1209#endif /* IN_RING3 */
1210
1211#endif /* VBOX_DEVICE_STRUCT_TESTCASE */
1212
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette