1 | /* $Id: Virtio_1_0.cpp 79973 2019-07-25 08:10:25Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * Virtio_1_0 - Virtio Common Functions (VirtQueue, VQueue, Virtio PCI)
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2009-2019 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 |
|
---|
19 | /*********************************************************************************************************************************
|
---|
20 | * Header Files *
|
---|
21 | *********************************************************************************************************************************/
|
---|
22 | #define LOG_GROUP LOG_GROUP_DEV_VIRTIO
|
---|
23 |
|
---|
24 | #include <VBox/log.h>
|
---|
25 | #include <iprt/param.h>
|
---|
26 | #include <iprt/assert.h>
|
---|
27 | #include <iprt/uuid.h>
|
---|
28 | #include <VBox/vmm/pdmdev.h>
|
---|
29 | #include "Virtio_1_0.h"
|
---|
30 |
|
---|
31 | #define INSTANCE(pState) pState->szInstance
|
---|
32 | #define IFACE_TO_STATE(pIface, ifaceName) ((VIRTIOSTATE *)((char*)(pIface) - RT_UOFFSETOF(VIRTIOSTATE, ifaceName)))
|
---|
33 |
|
---|
34 | #ifdef LOG_ENABLED
|
---|
35 | # define QUEUENAME(s, q) (q->pcszName)
|
---|
36 | #endif
|
---|
37 |
|
---|
38 | #ifdef VBOX_DEVICE_STRUCT_TESTCASE
|
---|
39 | # define virtioDumpState(x, s) do {} while (0)
|
---|
40 | #else
|
---|
41 | # ifdef DEBUG /* This still needs to be migrated to VirtIO 1.0 */
|
---|
42 | __attribute__((unused))
|
---|
43 | static void virtioDumpState(PVIRTIOSTATE pState, const char *pcszCaller)
|
---|
44 | {
|
---|
45 | RT_NOREF2(pState, pcszCaller);
|
---|
46 | /* PK TODO, dump state features, selector, status, ISR, queue info (iterate),
|
---|
47 | descriptors, avail, used, size, indices, address
|
---|
48 | each by variable name on new line, indented slightly */
|
---|
49 | }
|
---|
50 | #endif
|
---|
51 |
|
---|
52 |
|
---|
53 | void virtQueueReadDesc(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint32_t idx, PVIRTQUEUEDESC pDesc)
|
---|
54 | {
|
---|
55 | //Log(("%s virtQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQueue, idx));
|
---|
56 | PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
|
---|
57 | pVirtQueue->pGcPhysVirtqDescriptors + sizeof(VIRTQUEUEDESC) * (idx % pVirtQueue->cbQueue),
|
---|
58 | pDesc, sizeof(VIRTQUEUEDESC));
|
---|
59 | }
|
---|
60 |
|
---|
61 | uint16_t virtQueueReadAvail(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint32_t idx)
|
---|
62 | {
|
---|
63 | uint16_t tmp;
|
---|
64 | PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
|
---|
65 | pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQUEUEAVAIL, auRing[idx % pVirtQueue->cbQueue]),
|
---|
66 | &tmp, sizeof(tmp));
|
---|
67 | return tmp;
|
---|
68 | }
|
---|
69 |
|
---|
70 | uint16_t virtQueueReadAvailFlags(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue)
|
---|
71 | {
|
---|
72 | uint16_t tmp;
|
---|
73 | PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
|
---|
74 | pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEAVAIL, fFlags),
|
---|
75 | &tmp, sizeof(tmp));
|
---|
76 | return tmp;
|
---|
77 | }
|
---|
78 |
|
---|
79 | uint16_t virtQueueReadUsedIndex(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue)
|
---|
80 | {
|
---|
81 | uint16_t tmp;
|
---|
82 | PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
|
---|
83 | pVirtQueue->pGcPhysVirtqUsed + RT_UOFFSETOF(VIRTQUEUEUSED, uIdx),
|
---|
84 | &tmp, sizeof(tmp));
|
---|
85 | return tmp;
|
---|
86 | }
|
---|
87 |
|
---|
88 | void virtQueueWriteUsedIndex(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint16_t u16Value)
|
---|
89 | {
|
---|
90 | PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
|
---|
91 | pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEUSED, uIdx),
|
---|
92 | &u16Value, sizeof(u16Value));
|
---|
93 | }
|
---|
94 |
|
---|
95 | void virtQueueWriteUsedElem(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, uint32_t idx, uint32_t id, uint32_t uLen)
|
---|
96 | {
|
---|
97 |
|
---|
98 | RT_NOREF5(pState, pVirtQueue, idx, id, uLen);
|
---|
99 | /* PK TODO: Adapt to VirtIO 1.0
|
---|
100 | VIRTQUEUEUSEDELEM elem;
|
---|
101 |
|
---|
102 | elem.id = id;
|
---|
103 | elem.uLen = uLen;
|
---|
104 | PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
|
---|
105 | pVirtQueue->pGcPhysVirtqUsed + RT_UOFFSETOF_DYN(VIRTQUEUEUSED, ring[idx % pVirtQueue->cbQueue]),
|
---|
106 | &elem, sizeof(elem));
|
---|
107 | */
|
---|
108 | }
|
---|
109 |
|
---|
110 | void virtQueueSetNotification(PVIRTIOSTATE pState, PVIRTQUEUE pVirtQueue, bool fEnabled)
|
---|
111 | {
|
---|
112 | RT_NOREF3(pState, pVirtQueue, fEnabled);
|
---|
113 |
|
---|
114 | /* PK TODO: Adapt to VirtIO 1.0
|
---|
115 | uint16_t tmp;
|
---|
116 |
|
---|
117 | PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
|
---|
118 | pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEUSED, uFlags),
|
---|
119 | &tmp, sizeof(tmp));
|
---|
120 |
|
---|
121 | if (fEnabled)
|
---|
122 | tmp &= ~ VIRTQUEUEUSED_F_NO_NOTIFY;
|
---|
123 | else
|
---|
124 | tmp |= VIRTQUEUEUSED_F_NO_NOTIFY;
|
---|
125 |
|
---|
126 | PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
|
---|
127 | pVirtQueue->pGcPhysVirtqAvail + RT_UOFFSETOF(VIRTQUEUEUSED, uFlags),
|
---|
128 | &tmp, sizeof(tmp));
|
---|
129 | */
|
---|
130 | }
|
---|
131 |
|
---|
132 | bool virtQueueSkip(PVIRTIOSTATE pState, PVQUEUE pQueue)
|
---|
133 | {
|
---|
134 |
|
---|
135 | RT_NOREF2(pState, pQueue);
|
---|
136 | /* PK TODO Adapt to VirtIO 1.0
|
---|
137 | if (virtQueueIsEmpty(pState, pQueue))
|
---|
138 | return false;
|
---|
139 |
|
---|
140 | Log2(("%s virtQueueSkip: %s avail_idx=%u\n", INSTANCE(pState),
|
---|
141 | QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
|
---|
142 | pQueue->uNextAvailIndex++;
|
---|
143 | */
|
---|
144 | return true;
|
---|
145 | }
|
---|
146 |
|
---|
147 | bool virtQueueGet(PVIRTIOSTATE pState, PVQUEUE pQueue, PVQUEUEELEM pElem, bool fRemove)
|
---|
148 | {
|
---|
149 |
|
---|
150 | RT_NOREF4(pState, pQueue, pElem, fRemove);
|
---|
151 |
|
---|
152 | /* PK TODO: Adapt to VirtIO 1.0
|
---|
153 | if (virtQueueIsEmpty(pState, pQueue))
|
---|
154 | return false;
|
---|
155 |
|
---|
156 | pElem->nIn = pElem->nOut = 0;
|
---|
157 |
|
---|
158 | Log2(("%s virtQueueGet: %s avail_idx=%u\n", INSTANCE(pState),
|
---|
159 | QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
|
---|
160 |
|
---|
161 | VIRTQUEUEDESC desc;
|
---|
162 | uint16_t idx = virtQueueReadAvail(pState, &pQueue->VirtQueue, pQueue->uNextAvailIndex);
|
---|
163 | if (fRemove)
|
---|
164 | pQueue->uNextAvailIndex++;
|
---|
165 | pElem->idx = idx;
|
---|
166 | do
|
---|
167 | {
|
---|
168 | VQUEUESEG *pSeg;
|
---|
169 |
|
---|
170 | //
|
---|
171 | // Malicious guests may try to trick us into writing beyond aSegsIn or
|
---|
172 | // aSegsOut boundaries by linking several descriptors into a loop. We
|
---|
173 | // cannot possibly get a sequence of linked descriptors exceeding the
|
---|
174 | // total number of descriptors in the ring (see @bugref{8620}).
|
---|
175 | ///
|
---|
176 | if (pElem->nIn + pElem->nOut >= VIRTQUEUE_MAX_SIZE)
|
---|
177 | {
|
---|
178 | static volatile uint32_t s_cMessages = 0;
|
---|
179 | static volatile uint32_t s_cThreshold = 1;
|
---|
180 | if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
|
---|
181 | {
|
---|
182 | LogRel(("%s: too many linked descriptors; check if the guest arranges descriptors in a loop.\n",
|
---|
183 | INSTANCE(pState)));
|
---|
184 | if (ASMAtomicReadU32(&s_cMessages) != 1)
|
---|
185 | LogRel(("%s: (the above error has occured %u times so far)\n",
|
---|
186 | INSTANCE(pState), ASMAtomicReadU32(&s_cMessages)));
|
---|
187 | ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
|
---|
188 | }
|
---|
189 | break;
|
---|
190 | }
|
---|
191 | RT_UNTRUSTED_VALIDATED_FENCE();
|
---|
192 |
|
---|
193 | virtQueueReadDesc(pState, &pQueue->VirtQueue, idx, &desc);
|
---|
194 | if (desc.u16Flags & VIRTQUEUEDESC_F_WRITE)
|
---|
195 | {
|
---|
196 | Log2(("%s virtQueueGet: %s IN seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
|
---|
197 | QUEUENAME(pState, pQueue), pElem->nIn, idx, desc.addr, desc.uLen));
|
---|
198 | pSeg = &pElem->aSegsIn[pElem->nIn++];
|
---|
199 | }
|
---|
200 | else
|
---|
201 | {
|
---|
202 | Log2(("%s virtQueueGet: %s OUT seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
|
---|
203 | QUEUENAME(pState, pQueue), pElem->nOut, idx, desc.addr, desc.uLen));
|
---|
204 | pSeg = &pElem->aSegsOut[pElem->nOut++];
|
---|
205 | }
|
---|
206 |
|
---|
207 | pSeg->addr = desc.addr;
|
---|
208 | pSeg->cb = desc.uLen;
|
---|
209 | pSeg->pv = NULL;
|
---|
210 |
|
---|
211 | idx = desc.next;
|
---|
212 | } while (desc.u16Flags & VIRTQUEUEDESC_F_NEXT);
|
---|
213 |
|
---|
214 | Log2(("%s virtQueueGet: %s head_desc_idx=%u nIn=%u nOut=%u\n", INSTANCE(pState),
|
---|
215 | QUEUENAME(pState, pQueue), pElem->idx, pElem->nIn, pElem->nOut));
|
---|
216 | */
|
---|
217 | return true;
|
---|
218 | }
|
---|
219 |
|
---|
220 |
|
---|
221 |
|
---|
222 | void virtQueuePut(PVIRTIOSTATE pState, PVQUEUE pQueue,
|
---|
223 | PVQUEUEELEM pElem, uint32_t uTotalLen, uint32_t uReserved)
|
---|
224 | {
|
---|
225 |
|
---|
226 | RT_NOREF5(pState, pQueue, pElem, uTotalLen, uReserved);
|
---|
227 |
|
---|
228 | /* PK TODO Re-work this for VirtIO 1.0
|
---|
229 | Log2(("%s virtQueuePut: %s"
|
---|
230 | " desc_idx=%u acb=%u (%u)\n",
|
---|
231 | INSTANCE(pState), QUEUENAME(pState, pQueue),
|
---|
232 | pElem->idx, uTotalLen, uReserved));
|
---|
233 |
|
---|
234 | Assert(uReserved < uTotalLen);
|
---|
235 |
|
---|
236 | uint32_t cbLen = uTotalLen - uReserved;
|
---|
237 | uint32_t cbSkip = uReserved;
|
---|
238 |
|
---|
239 | for (unsigned i = 0; i < pElem->nIn && cbLen > 0; ++i)
|
---|
240 | {
|
---|
241 | if (cbSkip >= pElem->aSegsIn[i].cb) // segment completely skipped?
|
---|
242 | {
|
---|
243 | cbSkip -= pElem->aSegsIn[i].cb;
|
---|
244 | continue;
|
---|
245 | }
|
---|
246 |
|
---|
247 | uint32_t cbSegLen = pElem->aSegsIn[i].cb - cbSkip;
|
---|
248 | if (cbSegLen > cbLen) // last segment only partially used?
|
---|
249 | cbSegLen = cbLen;
|
---|
250 |
|
---|
251 | //
|
---|
252 | // XXX: We should assert pv != NULL, but we need to check and
|
---|
253 | // fix all callers first.
|
---|
254 | //
|
---|
255 | if (pElem->aSegsIn[i].pv != NULL)
|
---|
256 | {
|
---|
257 | Log2(("%s virtQueuePut: %s"
|
---|
258 | " used_idx=%u seg=%u addr=%p pv=%p cb=%u acb=%u\n",
|
---|
259 | INSTANCE(pState), QUEUENAME(pState, pQueue),
|
---|
260 | pQueue->uNextUsedIndex, i,
|
---|
261 | (void *)pElem->aSegsIn[i].addr, pElem->aSegsIn[i].pv,
|
---|
262 | pElem->aSegsIn[i].cb, cbSegLen));
|
---|
263 |
|
---|
264 | PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
|
---|
265 | pElem->aSegsIn[i].addr + cbSkip,
|
---|
266 | pElem->aSegsIn[i].pv,
|
---|
267 | cbSegLen);
|
---|
268 | }
|
---|
269 |
|
---|
270 | cbSkip = 0;
|
---|
271 | cbLen -= cbSegLen;
|
---|
272 | }
|
---|
273 |
|
---|
274 | Log2(("%s virtQueuePut: %s"
|
---|
275 | " used_idx=%u guest_used_idx=%u id=%u len=%u\n",
|
---|
276 | INSTANCE(pState), QUEUENAME(pState, pQueue),
|
---|
277 | pQueue->uNextUsedIndex, virtQueueReadUsedIndex(pState, &pQueue->VirtQueue),
|
---|
278 | pElem->idx, uTotalLen));
|
---|
279 |
|
---|
280 | virtQueueWriteUsedElem(pState, &pQueue->VirtQueue,
|
---|
281 | pQueue->uNextUsedIndex++,
|
---|
282 | pElem->idx, uTotalLen);
|
---|
283 | */
|
---|
284 |
|
---|
285 | }
|
---|
286 |
|
---|
287 |
|
---|
288 | void virtQueueNotify(PVIRTIOSTATE pState, PVQUEUE pQueue)
|
---|
289 | {
|
---|
290 |
|
---|
291 | RT_NOREF2(pState, pQueue);
|
---|
292 | /* PK TODO Adapt to VirtIO 1.0
|
---|
293 | LogFlow(("%s virtQueueNotify: %s availFlags=%x guestFeatures=%x virtQueue is %sempty\n",
|
---|
294 | INSTANCE(pState), QUEUENAME(pState, pQueue),
|
---|
295 | virtQueueReadAvailFlags(pState, &pQueue->VirtQueue),
|
---|
296 | pState->uGuestFeatures, virtQueueIsEmpty(pState, pQueue)?"":"not "));
|
---|
297 | if (!(virtQueueReadAvailFlags(pState, &pQueue->VirtQueue) & VIRTQUEUEAVAIL_F_NO_INTERRUPT)
|
---|
298 | || ((pState->uGuestFeatures & VIRTIO_F_NOTIFY_ON_EMPTY) && virtQueueIsEmpty(pState, pQueue)))
|
---|
299 | {
|
---|
300 | int rc = virtioRaiseInterrupt(pState, VERR_INTERNAL_ERROR, VIRTIO_ISR_QUEUE);
|
---|
301 | if (RT_FAILURE(rc))
|
---|
302 | Log(("%s virtQueueNotify: Failed to raise an interrupt (%Rrc).\n", INSTANCE(pState), rc));
|
---|
303 | }
|
---|
304 | */
|
---|
305 | }
|
---|
306 |
|
---|
307 | void virtQueueSync(PVIRTIOSTATE pState, PVQUEUE pQueue)
|
---|
308 | {
|
---|
309 | RT_NOREF(pState, pQueue);
|
---|
310 | /* PK TODO Adapt to VirtIO 1.0
|
---|
311 | Log2(("%s virtQueueSync: %s old_used_idx=%u new_used_idx=%u\n", INSTANCE(pState),
|
---|
312 | QUEUENAME(pState, pQueue), virtQueueReadUsedIndex(pState, &pQueue->VirtQueue), pQueue->uNextUsedIndex));
|
---|
313 | virtQueueWriteUsedIndex(pState, &pQueue->VirtQueue, pQueue->uNextUsedIndex);
|
---|
314 | virtQueueNotify(pState, pQueue);
|
---|
315 | */
|
---|
316 | }
|
---|
317 |
|
---|
318 | void virtioReset(PVIRTIOSTATE pState)
|
---|
319 | {
|
---|
320 |
|
---|
321 | RT_NOREF(pState);
|
---|
322 | /* PK TODO Adapt to VirtIO 1.0
|
---|
323 | pState->uGuestFeatures = 0;
|
---|
324 | pState->uQueueSelector = 0;
|
---|
325 | pState->uStatus = 0;
|
---|
326 | pState->uISR = 0;
|
---|
327 |
|
---|
328 | for (unsigned i = 0; i < pState->nQueues; i++)
|
---|
329 | virtQueueReset(&pState->Queues[i]);
|
---|
330 | */
|
---|
331 | }
|
---|
332 |
|
---|
333 |
|
---|
334 | /**
|
---|
335 | * Raise interrupt.
|
---|
336 | *
|
---|
337 | * @param pState The device state structure.
|
---|
338 | * @param rcBusy Status code to return when the critical section is busy.
|
---|
339 | * @param u8IntCause Interrupt cause bit mask to set in PCI ISR port.
|
---|
340 | */
|
---|
341 | int virtioRaiseInterrupt(VIRTIOSTATE *pState, int rcBusy, uint8_t u8IntCause)
|
---|
342 | {
|
---|
343 | RT_NOREF2(pState, u8IntCause);
|
---|
344 | RT_NOREF_PV(rcBusy);
|
---|
345 | /* PK TODO: Adapt to VirtIO 1.0
|
---|
346 | STAM_COUNTER_INC(&pState->StatIntsRaised);
|
---|
347 | LogFlow(("%s virtioRaiseInterrupt: u8IntCause=%x\n",
|
---|
348 | INSTANCE(pState), u8IntCause));
|
---|
349 |
|
---|
350 | pState->uISR |= u8IntCause;
|
---|
351 | PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
|
---|
352 | */
|
---|
353 | return VINF_SUCCESS;
|
---|
354 | }
|
---|
355 |
|
---|
356 | /**
|
---|
357 | * Lower interrupt.
|
---|
358 | *
|
---|
359 | * @param pState The device state structure.
|
---|
360 | */
|
---|
361 | __attribute__((unused))
|
---|
362 | static void virtioLowerInterrupt(VIRTIOSTATE *pState)
|
---|
363 | {
|
---|
364 | LogFlow(("%s virtioLowerInterrupt\n", INSTANCE(pState)));
|
---|
365 | PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
|
---|
366 | }
|
---|
367 |
|
---|
368 | DECLINLINE(uint32_t) virtioGetHostFeatures(PVIRTIOSTATE pState,
|
---|
369 | PFNGETHOSTFEATURES pfnGetHostFeatures)
|
---|
370 | {
|
---|
371 | return pfnGetHostFeatures(pState) /*| VIRTIO_F_NOTIFY_ON_EMPTY */;
|
---|
372 | }
|
---|
373 |
|
---|
374 |
|
---|
375 | #ifdef IN_RING3
|
---|
376 |
|
---|
377 | /**
|
---|
378 | * Gets the pointer to the status LED of a unit.
|
---|
379 | *
|
---|
380 | * @returns VBox status code.
|
---|
381 | * @param pInterface Pointer to the interface structure.
|
---|
382 | * @param iLUN The unit which status LED we desire.
|
---|
383 | * @param ppLed Where to store the LED pointer.
|
---|
384 | * @thread EMT
|
---|
385 | */
|
---|
386 | static DECLCALLBACK(int) virtioQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
|
---|
387 | {
|
---|
388 | VIRTIOSTATE *pState = IFACE_TO_STATE(pInterface, ILeds);
|
---|
389 | int rc = VERR_PDM_LUN_NOT_FOUND;
|
---|
390 |
|
---|
391 | if (iLUN == 0)
|
---|
392 | {
|
---|
393 | *ppLed = &pState->led;
|
---|
394 | rc = VINF_SUCCESS;
|
---|
395 | }
|
---|
396 | return rc;
|
---|
397 | }
|
---|
398 |
|
---|
399 | /**
|
---|
400 | * Turns on/off the write status LED.
|
---|
401 | *
|
---|
402 | * @returns VBox status code.
|
---|
403 | * @param pState Pointer to the device state structure.
|
---|
404 | * @param fOn New LED state.
|
---|
405 | */
|
---|
406 | void virtioSetWriteLed(PVIRTIOSTATE pState, bool fOn)
|
---|
407 | {
|
---|
408 | LogFlow(("%s virtioSetWriteLed: %s\n", INSTANCE(pState), fOn ? "on" : "off"));
|
---|
409 | if (fOn)
|
---|
410 | pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
|
---|
411 | else
|
---|
412 | pState->led.Actual.s.fWriting = fOn;
|
---|
413 | }
|
---|
414 |
|
---|
415 | /**
|
---|
416 | * Turns on/off the read status LED.
|
---|
417 | *
|
---|
418 | * @returns VBox status code.
|
---|
419 | * @param pState Pointer to the device state structure.
|
---|
420 | * @param fOn New LED state.
|
---|
421 | */
|
---|
422 | void virtioSetReadLed(PVIRTIOSTATE pState, bool fOn)
|
---|
423 | {
|
---|
424 | LogFlow(("%s virtioSetReadLed: %s\n", INSTANCE(pState), fOn ? "on" : "off"));
|
---|
425 | if (fOn)
|
---|
426 | pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
|
---|
427 | else
|
---|
428 | pState->led.Actual.s.fReading = fOn;
|
---|
429 | }
|
---|
430 |
|
---|
431 |
|
---|
432 | /**
|
---|
433 | * Saves the state of device.
|
---|
434 | *
|
---|
435 | * @returns VBox status code.
|
---|
436 | * @param pDevIns The device instance.
|
---|
437 | * @param pSSM The handle to the saved state.
|
---|
438 | */
|
---|
439 | int virtioSaveExec(PVIRTIOSTATE pState, PSSMHANDLE pSSM)
|
---|
440 | {
|
---|
441 | RT_NOREF2(pState, pSSM);
|
---|
442 | int rc = VINF_SUCCESS;
|
---|
443 | // virtioDumpState(pState, "virtioSaveExec");
|
---|
444 | /*
|
---|
445 | * PK TODO save guest features, queue selector, sttus ISR,
|
---|
446 | * and per queue info (size, address, indicies)...
|
---|
447 | * using calls like SSMR3PutU8(), SSMR3PutU16(), SSMR3PutU16()...
|
---|
448 | * and AssertRCReturn(rc, rc)
|
---|
449 | */
|
---|
450 |
|
---|
451 | return rc;
|
---|
452 | }
|
---|
453 |
|
---|
454 | /**
|
---|
455 | * Loads a saved device state.
|
---|
456 | *
|
---|
457 | * @returns VBox status code.
|
---|
458 | * @param pDevIns The device instance.
|
---|
459 | * @param pSSM The handle to the saved state.
|
---|
460 | * @param uVersion The data unit version number.
|
---|
461 | * @param uPass The data pass.
|
---|
462 | */
|
---|
463 | int virtioLoadExec(PVIRTIOSTATE pState, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass, uint32_t nQueues)
|
---|
464 | {
|
---|
465 | RT_NOREF5(pState, pSSM, uVersion, uPass, nQueues);
|
---|
466 | int rc = VINF_SUCCESS;
|
---|
467 |
|
---|
468 | /*
|
---|
469 | * PK TODO, restore everything saved in virtioSaveExect, using
|
---|
470 | * using calls like SSMR3PutU8(), SSMR3PutU16(), SSMR3PutU16()...
|
---|
471 | * and AssertRCReturn(rc, rc)
|
---|
472 | */
|
---|
473 | if (uPass == SSM_PASS_FINAL)
|
---|
474 | {
|
---|
475 | }
|
---|
476 |
|
---|
477 | // virtioDumpState(pState, "virtioLoadExec");
|
---|
478 |
|
---|
479 | return rc;
|
---|
480 | }
|
---|
481 |
|
---|
482 | /**
|
---|
483 | * Device relocation callback.
|
---|
484 | *
|
---|
485 | * When this callback is called the device instance data, and if the
|
---|
486 | * device have a GC component, is being relocated, or/and the selectors
|
---|
487 | * have been changed. The device must use the chance to perform the
|
---|
488 | * necessary pointer relocations and data updates.
|
---|
489 | *
|
---|
490 | * Before the GC code is executed the first time, this function will be
|
---|
491 | * called with a 0 delta so GC pointer calculations can be one in one place.
|
---|
492 | *
|
---|
493 | * @param pDevIns Pointer to the device instance.
|
---|
494 | * @param offDelta The relocation delta relative to the old location.
|
---|
495 | *
|
---|
496 | * @remark A relocation CANNOT fail.
|
---|
497 | */
|
---|
498 | void virtioRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
|
---|
499 | {
|
---|
500 | RT_NOREF(offDelta);
|
---|
501 | VIRTIOSTATE *pState = PDMINS_2_DATA(pDevIns, VIRTIOSTATE*);
|
---|
502 | pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
|
---|
503 | // TBD
|
---|
504 | }
|
---|
505 |
|
---|
506 | PVQUEUE virtioAddQueue(VIRTIOSTATE* pState, unsigned cbQueue, PFNVIRTIOQUEUECALLBACK pfnCallback, const char *pcszName)
|
---|
507 | {
|
---|
508 |
|
---|
509 | RT_NOREF4(pState, cbQueue, pfnCallback, pcszName);
|
---|
510 | /* PK TODO Adapt to VirtIO 1.0
|
---|
511 |
|
---|
512 | PVQUEUE pQueue = NULL;
|
---|
513 | // Find an empty queue slot
|
---|
514 | for (unsigned i = 0; i < pState->nQueues; i++)
|
---|
515 | {
|
---|
516 | if (pState->Queues[i].VirtQueue.cbQueue == 0)
|
---|
517 | {
|
---|
518 | pQueue = &pState->Queues[i];
|
---|
519 | break;
|
---|
520 | }
|
---|
521 | }
|
---|
522 |
|
---|
523 | if (!pQueue)
|
---|
524 | {
|
---|
525 | Log(("%s Too many queues being added, no empty slots available!\n", INSTANCE(pState)));
|
---|
526 | }
|
---|
527 | else
|
---|
528 | {
|
---|
529 | pQueue->VirtQueue.cbQueue = cbQueue;
|
---|
530 | pQueue->VirtQueue.addrDescriptors = 0;
|
---|
531 | pQueue->uPageNumber = 0;
|
---|
532 | pQueue->pfnCallback = pfnCallback;
|
---|
533 | pQueue->pcszName = pcszName;
|
---|
534 | }
|
---|
535 | return pQueue;
|
---|
536 | */
|
---|
537 | return NULL;// Temporary
|
---|
538 | }
|
---|
539 |
|
---|
540 |
|
---|
541 | __attribute__((unused))
|
---|
542 | static void virtQueueReset(PVQUEUE pQueue)
|
---|
543 | {
|
---|
544 | RT_NOREF(pQueue);
|
---|
545 | /* PK TODO Adapt to VirtIO 1.0
|
---|
546 | pQueue->VirtQueue.addrDescriptors = 0;
|
---|
547 | pQueue->VirtQueue.addrAvail = 0;
|
---|
548 | pQueue->VirtQueue.addrUsed = 0;
|
---|
549 | pQueue->uNextAvailIndex = 0;
|
---|
550 | pQueue->uNextUsedIndex = 0;
|
---|
551 | pQueue->uPageNumber = 0;
|
---|
552 | */
|
---|
553 | }
|
---|
554 |
|
---|
555 | __attribute__((unused))
|
---|
556 | static void virtQueueInit(PVQUEUE pQueue, uint32_t uPageNumber)
|
---|
557 | {
|
---|
558 | RT_NOREF2(pQueue, uPageNumber);
|
---|
559 |
|
---|
560 | /* PK TODO, re-work this for VirtIO 1.0
|
---|
561 | pQueue->VirtQueue.addrDescriptors = (uint64_t)uPageNumber << PAGE_SHIFT;
|
---|
562 |
|
---|
563 | pQueue->VirtQueue.addrAvail = pQueue->VirtQueue.addrDescriptors
|
---|
564 | + sizeof(VIRTQUEUEDESC) * pQueue->VirtQueue.cbQueue;
|
---|
565 |
|
---|
566 | pQueue->VirtQueue.addrUsed = RT_ALIGN(pQueue->VirtQueue.addrAvail
|
---|
567 | + RT_UOFFSETOF_DYN(VIRTQUEUEAVAIL, ring[pQueue->VirtQueue.cbQueue])
|
---|
568 | + sizeof(uint16_t), // virtio 1.0 adds a 16-bit field following ring data
|
---|
569 | PAGE_SIZE); // The used ring must start from the next page.
|
---|
570 |
|
---|
571 | pQueue->uNextAvailIndex = 0;
|
---|
572 | pQueue->uNextUsedIndex = 0;
|
---|
573 | */
|
---|
574 |
|
---|
575 | }
|
---|
576 |
|
---|
577 |
|
---|
578 | /**
|
---|
579 | * Memory mapped I/O Handler for PCI Capabilities read operations.
|
---|
580 | *
|
---|
581 | * @returns VBox status code.
|
---|
582 | *
|
---|
583 | * @param pDevIns The device instance.
|
---|
584 | * @param pvUser User argument.
|
---|
585 | * @param GCPhysAddr Physical address (in GC) where the read starts.
|
---|
586 | * @param pv Where to store the result.
|
---|
587 | * @param cb Number of bytes read.
|
---|
588 | */
|
---|
589 | PDMBOTHCBDECL(int) virtioPciCapMemRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
|
---|
590 | {
|
---|
591 | /* PK New feature! */
|
---|
592 |
|
---|
593 | RT_NOREF(pvUser);
|
---|
594 | int rc = VINF_SUCCESS;
|
---|
595 |
|
---|
596 | LogFunc(("Read (MMIO) VirtIO capabilities\n"));
|
---|
597 | PVIRTIOSTATE pThis = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
|
---|
598 |
|
---|
599 | /* TBD: This should be called only if VIRTIO_PCI_CAP_DEVICE_CFG capability is being accessed */
|
---|
600 | rc = pThis->pfnVirtioDevCapRead(pDevIns, GCPhysAddr, pv, cb);
|
---|
601 | return rc;
|
---|
602 | }
|
---|
603 |
|
---|
604 |
|
---|
605 | /**
|
---|
606 | * Memory mapped I/O Handler for PCI Capabilities write operations.
|
---|
607 | *
|
---|
608 | * @returns VBox status code.
|
---|
609 | *
|
---|
610 | * @param pDevIns The device instance.
|
---|
611 | * @param pvUser User argument.
|
---|
612 | * @param GCPhysAddr Physical address (in GC) where the write starts.
|
---|
613 | * @param pv Where to fetch the result.
|
---|
614 | * @param cb Number of bytes to write.
|
---|
615 | */
|
---|
616 | PDMBOTHCBDECL(int) virtioPciCapMemWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
|
---|
617 | {
|
---|
618 |
|
---|
619 | /* PK New feature! */
|
---|
620 | RT_NOREF(pvUser);
|
---|
621 | int rc = VINF_SUCCESS;
|
---|
622 | LogFunc(("Write (MMIO) Virtio capabilities\n"));
|
---|
623 | PVIRTIOSTATE pThis = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
|
---|
624 |
|
---|
625 | /* TBD: This should be called only if VIRTIO_PCI_CAP_DEVICE_CFG capability is being accessed */
|
---|
626 | rc = pThis->pfnVirtioDevCapWrite(pDevIns, GCPhysAddr, pv,cb);
|
---|
627 |
|
---|
628 | return rc;
|
---|
629 | }
|
---|
630 |
|
---|
631 | /**
|
---|
632 | * @callback_method_impl{FNPCIIOREGIONMAP}
|
---|
633 | */
|
---|
634 | static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
|
---|
635 | RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
|
---|
636 | {
|
---|
637 | RT_NOREF3(pPciDev, iRegion, enmType);
|
---|
638 | PVIRTIOSTATE pThis = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
|
---|
639 | int rc = VINF_SUCCESS;
|
---|
640 |
|
---|
641 | Assert(cb >= 32);
|
---|
642 |
|
---|
643 | LogFunc(("virtIO controller PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp\n", GCPhysAddress, cb));
|
---|
644 |
|
---|
645 | /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
|
---|
646 | rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
|
---|
647 | IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
|
---|
648 | virtioPciCapMemWrite, virtioPciCapMemRead,
|
---|
649 | "virtio-scsi MMIO");
|
---|
650 | pThis->GCPhysPciCapBase = RT_SUCCESS(rc) ? GCPhysAddress : 0;
|
---|
651 | return rc;
|
---|
652 | }
|
---|
653 |
|
---|
654 |
|
---|
655 | /**
|
---|
656 | * @interface_method_impl{PDMIBASE,pfnQueryInterface}
|
---|
657 | */
|
---|
658 | void *virtioQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
|
---|
659 | {
|
---|
660 | VIRTIOSTATE *pThis = IFACE_TO_STATE(pInterface, IBase);
|
---|
661 | Assert(&pThis->IBase == pInterface);
|
---|
662 |
|
---|
663 | PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
|
---|
664 | PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
|
---|
665 | return NULL;
|
---|
666 | }
|
---|
667 |
|
---|
668 |
|
---|
669 | /**
|
---|
670 | * Destruct PCI-related part of device.
|
---|
671 | *
|
---|
672 | * We need to free non-VM resources only.
|
---|
673 | *
|
---|
674 | * @returns VBox status code.
|
---|
675 | * @param pState The device state structure.
|
---|
676 | */
|
---|
677 | int virtioDestruct(VIRTIOSTATE* pState)
|
---|
678 | {
|
---|
679 | Log(("%s Destroying PCI instance\n", INSTANCE(pState)));
|
---|
680 |
|
---|
681 | return VINF_SUCCESS;
|
---|
682 | }
|
---|
683 |
|
---|
684 | /** PK (temp note to self):
|
---|
685 | *
|
---|
686 | * Device needs to negotiate capabilities,
|
---|
687 | * then get queue size address information from driver.
|
---|
688 | *
|
---|
689 | * Still need consumer to pass in:
|
---|
690 | *
|
---|
691 | * num_queues
|
---|
692 | * config_generation
|
---|
693 | * Needs to manage feature negotiation
|
---|
694 | * That means consumer needs to pass in device-specific feature bits/values
|
---|
695 | * Device has to provie at least one notifier capability
|
---|
696 | *
|
---|
697 | * ISR config value are set by the device (config interrupt vs. queue interrupt)
|
---|
698 | *
|
---|
699 | */
|
---|
700 |
|
---|
701 | /**
|
---|
702 | * Setup PCI device controller and Virtio state
|
---|
703 | *
|
---|
704 | * @param pDevIns Device instance data
|
---|
705 | * @param pVirtio Device State
|
---|
706 | * @param iInstance Instance number
|
---|
707 | * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
|
---|
708 | * @param pcszNameFmt Device instance name (format-specifier)
|
---|
709 | * @param nQueues Number of Virtio Queues created by consumer (driver)
|
---|
710 | * @param uVirtioRegion Region number to map for PCi Capabilities structs
|
---|
711 | * @param devCapReadCallback Client function to call back to handle device specific capabilities
|
---|
712 | * @param devCapWriteCallback Client function to call back to handle device specific capabilities
|
---|
713 | * @param cbDevSpecificCap Size of device specific struct
|
---|
714 | */
|
---|
715 | int virtioConstruct(PPDMDEVINS pDevIns, PVIRTIOSTATE pVirtio, int iInstance,
|
---|
716 | PVIRTIOPCIPARAMS pPciParams, const char *pcszNameFmt,
|
---|
717 | uint32_t nQueues, uint32_t uVirtioRegion,
|
---|
718 | PFNVIRTIODEVCAPREAD devCapReadCallback, PFNVIRTIODEVCAPWRITE devCapWriteCallback,
|
---|
719 | uint16_t cbDevSpecificCap)
|
---|
720 | {
|
---|
721 | RT_NOREF(nQueues);
|
---|
722 |
|
---|
723 | /* Init handles and log related stuff. */
|
---|
724 | RTStrPrintf(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszNameFmt, iInstance);
|
---|
725 |
|
---|
726 | pVirtio->pDevInsR3 = pDevIns;
|
---|
727 | pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
|
---|
728 | pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
|
---|
729 | pVirtio->led.u32Magic = PDMLED_MAGIC;
|
---|
730 | pVirtio->ILeds.pfnQueryStatusLed = virtioQueryStatusLed;
|
---|
731 | pVirtio->pfnVirtioDevCapRead = devCapReadCallback;
|
---|
732 | pVirtio->pfnVirtioDevCapWrite = devCapWriteCallback;
|
---|
733 |
|
---|
734 | /* Set PCI config registers (assume 32-bit mode) */
|
---|
735 | PCIDevSetRevisionId (&pVirtio->dev, DEVICE_PCI_REVISION_ID_VIRTIO);
|
---|
736 | PCIDevSetVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
|
---|
737 | PCIDevSetSubSystemVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
|
---|
738 | PCIDevSetDeviceId (&pVirtio->dev, pPciParams->uDeviceId);
|
---|
739 | PCIDevSetClassBase (&pVirtio->dev, pPciParams->uClassBase);
|
---|
740 | PCIDevSetClassSub (&pVirtio->dev, pPciParams->uClassSub);
|
---|
741 | PCIDevSetClassProg (&pVirtio->dev, pPciParams->uClassProg);
|
---|
742 | PCIDevSetSubSystemId (&pVirtio->dev, pPciParams->uSubsystemId);
|
---|
743 | PCIDevSetInterruptLine (&pVirtio->dev, pPciParams->uInterruptLine);
|
---|
744 | PCIDevSetInterruptPin (&pVirtio->dev, pPciParams->uInterruptPin);
|
---|
745 |
|
---|
746 | int rc = VINF_SUCCESS;
|
---|
747 | /* Register PCI device */
|
---|
748 | rc = PDMDevHlpPCIRegister(pDevIns, &pVirtio->dev);
|
---|
749 | if (RT_FAILURE(rc))
|
---|
750 | return PDMDEV_SET_ERROR(pDevIns, rc,
|
---|
751 | N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
|
---|
752 |
|
---|
753 | pVirtio->IBase = pDevIns->IBase;
|
---|
754 |
|
---|
755 | rc = PDMDevHlpPCIIORegionRegister(pDevIns, uVirtioRegion, 32, PCI_ADDRESS_SPACE_MEM, virtioR3Map);
|
---|
756 | if (RT_FAILURE(rc))
|
---|
757 | return PDMDEV_SET_ERROR(pDevIns, rc,
|
---|
758 | N_("virtio: cannot register PCI Capabilities address space")); /* can we put params in this error? */
|
---|
759 |
|
---|
760 | /** Build PCI vendor-specific capabilities list for exchanging
|
---|
761 | * VirtIO device capabilities with driver */
|
---|
762 |
|
---|
763 | uint8_t fMsiSupport = false;
|
---|
764 | #if 0 && defined(VBOX_WITH_MSI_DEVICES) /* T.B.D. */
|
---|
765 | fMsiSupport = true;
|
---|
766 | #endif
|
---|
767 | uint8_t uCfgCapOffset = 0x40;
|
---|
768 | /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_common_cfg (VIRTIOCOMMONCFG)*/
|
---|
769 | PVIRTIOPCICAP pCfg = pVirtio->pCommonCfg = (PVIRTIOPCICAP)&pVirtio->dev.abConfig[uCfgCapOffset];
|
---|
770 | pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
|
---|
771 | pCfg->uCapNext = uCfgCapOffset += sizeof(VIRTIOPCICAP);
|
---|
772 | pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
|
---|
773 | pCfg->uBar = uVirtioRegion;
|
---|
774 | pCfg->uOffset = 0;
|
---|
775 | pCfg->uLength = sizeof(VIRTIOPCICAP);
|
---|
776 |
|
---|
777 | /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_notify_cap (VIRTIONOTIFYCAP)*/
|
---|
778 | pCfg = pVirtio->pNotifyCap = (PVIRTIOPCICAP)&pVirtio->dev.abConfig[uCfgCapOffset];
|
---|
779 | pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
|
---|
780 | pCfg->uCapNext = uCfgCapOffset += sizeof(VIRTIOPCICAP);
|
---|
781 | pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
|
---|
782 | pCfg->uBar = uVirtioRegion;
|
---|
783 | pCfg->uOffset = pVirtio->pCommonCfg->uOffset + sizeof(VIRTIOCOMMONCFG);
|
---|
784 | pCfg->uLength = sizeof(VIRTIOPCICAP);
|
---|
785 |
|
---|
786 | /* Capability will be mapped via VirtIO 1.0: uint8_t (VIRTIOISRCAP) */
|
---|
787 | pCfg = pVirtio->pISRCap = (PVIRTIOPCICAP)&pVirtio->dev.abConfig[uCfgCapOffset];
|
---|
788 | pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
|
---|
789 | pCfg->uCapNext = uCfgCapOffset += sizeof(VIRTIOPCICAP);
|
---|
790 | pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
|
---|
791 | pCfg->uBar = uVirtioRegion;
|
---|
792 | pCfg->uOffset = pVirtio->pNotifyCap->uOffset + sizeof(VIRTIONOTIFYCAP);
|
---|
793 | pCfg->uLength = sizeof(VIRTIOPCICAP);
|
---|
794 |
|
---|
795 | /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIOPCICAP) */
|
---|
796 | pCfg = pVirtio->pPCICfgCap = (PVIRTIOPCICAP)&pVirtio->dev.abConfig[uCfgCapOffset];
|
---|
797 | pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
|
---|
798 | pCfg->uCapNext = (uint8_t)(fMsiSupport || cbDevSpecificCap ? (uCfgCapOffset += sizeof(VIRTIOPCICAP)): 0);
|
---|
799 | pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
|
---|
800 | pCfg->uBar = uVirtioRegion;
|
---|
801 | pCfg->uOffset = pVirtio->pISRCap->uOffset + sizeof(VIRTIOISRCAP);
|
---|
802 | pCfg->uLength = sizeof(VIRTIOPCICAP);
|
---|
803 |
|
---|
804 | if (cbDevSpecificCap)
|
---|
805 | {
|
---|
806 | /* Capability will be mapped via VirtIO 1.0: struct virtio_pci_dev_cap (VIRTIODEVCAP)*/
|
---|
807 | pCfg = pVirtio->pDeviceCap = (PVIRTIOPCICAP)&pVirtio->dev.abConfig[uCfgCapOffset];
|
---|
808 | pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
|
---|
809 | pCfg->uCapNext = (uint8_t)(fMsiSupport ? (uCfgCapOffset += sizeof(VIRTIOPCICAP)) : 0);
|
---|
810 | pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
|
---|
811 | pCfg->uBar = uVirtioRegion;
|
---|
812 | pCfg->uOffset = pVirtio->pPCICfgCap->uOffset + sizeof(VIRTIOPCICAP);
|
---|
813 | pCfg->uLength = sizeof(VIRTIOPCICAP);
|
---|
814 | }
|
---|
815 |
|
---|
816 | /* Set offset to first capability and enable PCI dev capabilities */
|
---|
817 | PCIDevSetCapabilityList (&pVirtio->dev, 0x40);
|
---|
818 | PCIDevSetStatus (&pVirtio->dev, VBOX_PCI_STATUS_CAP_LIST);
|
---|
819 |
|
---|
820 | if (fMsiSupport)
|
---|
821 | {
|
---|
822 | PDMMSIREG aMsiReg;
|
---|
823 |
|
---|
824 | RT_ZERO(aMsiReg);
|
---|
825 | aMsiReg.iMsixCapOffset = uCfgCapOffset;
|
---|
826 | aMsiReg.iMsixNextOffset = 0;
|
---|
827 | aMsiReg.iMsixBar = 0;
|
---|
828 | aMsiReg.cMsixVectors = 1;
|
---|
829 | rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
|
---|
830 | if (RT_FAILURE (rc))
|
---|
831 | {
|
---|
832 | /* PK TODO: The following is moot, we need to flag no MSI-X support */
|
---|
833 | PCIDevSetCapabilityList(&pVirtio->dev, 0x40);
|
---|
834 | }
|
---|
835 | }
|
---|
836 |
|
---|
837 | /* Status driver */
|
---|
838 | PPDMIBASE pUpstreamBase;
|
---|
839 |
|
---|
840 | rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pVirtio->IBase, &pUpstreamBase, "Status Port");
|
---|
841 | if (RT_FAILURE(rc))
|
---|
842 | return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
|
---|
843 | pVirtio->pLedsConnector = PDMIBASE_QUERY_INTERFACE(&pVirtio->IBase, PDMILEDCONNECTORS);
|
---|
844 |
|
---|
845 | return rc;
|
---|
846 | }
|
---|
847 |
|
---|
848 |
|
---|
849 |
|
---|
850 | #endif /* IN_RING3 */
|
---|
851 |
|
---|
852 | #endif /* VBOX_DEVICE_STRUCT_TESTCASE */
|
---|
853 |
|
---|