VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio.cpp@ 86605

Last change on this file since 86605 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.5 KB
Line 
1/* $Id: Virtio.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * Virtio - Virtio Common Functions (VRing, VQueue, Virtio PCI)
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <iprt/param.h>
25#include <iprt/uuid.h>
26#include <VBox/vmm/pdmdev.h>
27#include <VBox/AssertGuest.h>
28#include "Virtio.h"
29
30
31/*********************************************************************************************************************************
32* Defined Constants And Macros *
33*********************************************************************************************************************************/
34#define INSTANCE(pThis) (pThis->szInstance)
35
36
37static void vqueueReset(PVQUEUE pQueue)
38{
39 pQueue->VRing.addrDescriptors = 0;
40 pQueue->VRing.addrAvail = 0;
41 pQueue->VRing.addrUsed = 0;
42 pQueue->uNextAvailIndex = 0;
43 pQueue->uNextUsedIndex = 0;
44 pQueue->uPageNumber = 0;
45}
46
47static void vqueueInit(PVQUEUE pQueue, uint32_t uPageNumber)
48{
49 pQueue->VRing.addrDescriptors = (uint64_t)uPageNumber << PAGE_SHIFT;
50 pQueue->VRing.addrAvail = pQueue->VRing.addrDescriptors + sizeof(VRINGDESC) * pQueue->VRing.uSize;
51 pQueue->VRing.addrUsed = RT_ALIGN(pQueue->VRing.addrAvail + RT_UOFFSETOF_DYN(VRINGAVAIL, auRing[pQueue->VRing.uSize]),
52 PAGE_SIZE); /* The used ring must start from the next page. */
53 pQueue->uNextAvailIndex = 0;
54 pQueue->uNextUsedIndex = 0;
55}
56
57// void vqueueElemFree(PVQUEUEELEM pElem)
58// {
59// }
60
61static void vringReadDesc(PPDMDEVINS pDevIns, PVRING pVRing, uint32_t uIndex, PVRINGDESC pDesc)
62{
63 //Log(("%s vringReadDesc: ring=%p idx=%u\n", INSTANCE(pThis), pVRing, uIndex));
64 PDMDevHlpPhysRead(pDevIns,
65 pVRing->addrDescriptors + sizeof(VRINGDESC) * (uIndex % pVRing->uSize),
66 pDesc, sizeof(VRINGDESC));
67 /** @todo r=bird: Why exactly are we sometimes using PDMDevHlpPhysRead rather
68 * than PDMDevHlpPCIPhysRead? */
69}
70
71static uint16_t vringReadAvail(PPDMDEVINS pDevIns, PVRING pVRing, uint32_t uIndex)
72{
73 uint16_t tmp = 0;
74 PDMDevHlpPhysRead(pDevIns, pVRing->addrAvail + RT_UOFFSETOF_DYN(VRINGAVAIL, auRing[uIndex % pVRing->uSize]),
75 &tmp, sizeof(tmp));
76 return tmp;
77}
78
79static uint16_t vringReadAvailFlags(PPDMDEVINS pDevIns, PVRING pVRing)
80{
81 uint16_t tmp = 0;
82 PDMDevHlpPhysRead(pDevIns, pVRing->addrAvail + RT_UOFFSETOF(VRINGAVAIL, uFlags), &tmp, sizeof(tmp));
83 return tmp;
84}
85
86void vringSetNotification(PPDMDEVINS pDevIns, PVRING pVRing, bool fEnabled)
87{
88 uint16_t fState = 0;
89 PDMDevHlpPhysRead(pDevIns, pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uFlags), &fState, sizeof(fState));
90
91 if (fEnabled)
92 fState &= ~ VRINGUSED_F_NO_NOTIFY;
93 else
94 fState |= VRINGUSED_F_NO_NOTIFY;
95
96 PDMDevHlpPCIPhysWrite(pDevIns, pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uFlags), &fState, sizeof(fState));
97}
98
99bool vqueueSkip(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue)
100{
101 if (vqueueIsEmpty(pDevIns, pQueue))
102 return false;
103
104 Log2(("%s vqueueSkip: %s avail_idx=%u\n", INSTANCE(pThis), pQueue->szName, pQueue->uNextAvailIndex));
105 RT_NOREF(pThis);
106 pQueue->uNextAvailIndex++;
107 return true;
108}
109
110bool vqueueGet(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue, PVQUEUEELEM pElem, bool fRemove)
111{
112 if (vqueueIsEmpty(pDevIns, pQueue))
113 return false;
114
115 pElem->cIn = pElem->cOut = 0;
116
117 Log2(("%s vqueueGet: %s avail_idx=%u\n", INSTANCE(pThis), pQueue->szName, pQueue->uNextAvailIndex));
118
119 VRINGDESC desc;
120 uint16_t idx = vringReadAvail(pDevIns, &pQueue->VRing, pQueue->uNextAvailIndex);
121 if (fRemove)
122 pQueue->uNextAvailIndex++;
123 pElem->uIndex = idx;
124 do
125 {
126 VQUEUESEG *pSeg;
127
128 /*
129 * Malicious guests may try to trick us into writing beyond aSegsIn or
130 * aSegsOut boundaries by linking several descriptors into a loop. We
131 * cannot possibly get a sequence of linked descriptors exceeding the
132 * total number of descriptors in the ring (see @bugref{8620}).
133 */
134 if (pElem->cIn + pElem->cOut >= VRING_MAX_SIZE)
135 {
136 static volatile uint32_t s_cMessages = 0;
137 static volatile uint32_t s_cThreshold = 1;
138 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
139 {
140 LogRel(("%s: too many linked descriptors; check if the guest arranges descriptors in a loop.\n",
141 INSTANCE(pThis)));
142 if (ASMAtomicReadU32(&s_cMessages) != 1)
143 LogRel(("%s: (the above error has occured %u times so far)\n",
144 INSTANCE(pThis), ASMAtomicReadU32(&s_cMessages)));
145 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
146 }
147 break;
148 }
149 RT_UNTRUSTED_VALIDATED_FENCE();
150
151 vringReadDesc(pDevIns, &pQueue->VRing, idx, &desc);
152 if (desc.u16Flags & VRINGDESC_F_WRITE)
153 {
154 Log2(("%s vqueueGet: %s IN seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pThis),
155 pQueue->szName, pElem->cIn, idx, desc.u64Addr, desc.uLen));
156 pSeg = &pElem->aSegsIn[pElem->cIn++];
157 }
158 else
159 {
160 Log2(("%s vqueueGet: %s OUT seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pThis),
161 pQueue->szName, pElem->cOut, idx, desc.u64Addr, desc.uLen));
162 pSeg = &pElem->aSegsOut[pElem->cOut++];
163 }
164
165 pSeg->addr = desc.u64Addr;
166 pSeg->cb = desc.uLen;
167 pSeg->pv = NULL;
168
169 idx = desc.u16Next;
170 } while (desc.u16Flags & VRINGDESC_F_NEXT);
171
172 Log2(("%s vqueueGet: %s head_desc_idx=%u nIn=%u nOut=%u\n", INSTANCE(pThis),
173 pQueue->szName, pElem->uIndex, pElem->cIn, pElem->cOut));
174 return true;
175}
176
177#ifdef LOG_ENABLED
178static uint16_t vringReadUsedIndex(PPDMDEVINS pDevIns, PVRING pVRing)
179{
180 uint16_t tmp = 0;
181 PDMDevHlpPhysRead(pDevIns, pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uIndex), &tmp, sizeof(tmp));
182 return tmp;
183}
184#endif
185
186static void vringWriteUsedIndex(PPDMDEVINS pDevIns, PVRING pVRing, uint16_t u16Value)
187{
188 PDMDevHlpPCIPhysWrite(pDevIns,
189 pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uIndex),
190 &u16Value, sizeof(u16Value));
191}
192
193static void vringWriteUsedElem(PPDMDEVINS pDevIns, PVRING pVRing, uint32_t uIndex, uint32_t uId, uint32_t uLen)
194{
195 VRINGUSEDELEM elem;
196
197 elem.uId = uId;
198 elem.uLen = uLen;
199 PDMDevHlpPCIPhysWrite(pDevIns,
200 pVRing->addrUsed + RT_UOFFSETOF_DYN(VRINGUSED, aRing[uIndex % pVRing->uSize]),
201 &elem, sizeof(elem));
202}
203
204void vqueuePut(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue, PVQUEUEELEM pElem, uint32_t uTotalLen, uint32_t uReserved)
205{
206 Log2(("%s vqueuePut: %s desc_idx=%u acb=%u (%u)\n", INSTANCE(pThis), pQueue->szName, pElem->uIndex, uTotalLen, uReserved));
207 RT_NOREF(pThis);
208
209 Assert(uReserved < uTotalLen);
210
211 uint32_t cbLen = uTotalLen - uReserved;
212 uint32_t cbSkip = uReserved;
213
214 for (unsigned i = 0; i < pElem->cIn && cbLen > 0; ++i)
215 {
216 if (cbSkip >= pElem->aSegsIn[i].cb) /* segment completely skipped? */
217 {
218 cbSkip -= pElem->aSegsIn[i].cb;
219 continue;
220 }
221
222 uint32_t cbSegLen = pElem->aSegsIn[i].cb - cbSkip;
223 if (cbSegLen > cbLen) /* last segment only partially used? */
224 cbSegLen = cbLen;
225
226 /*
227 * XXX: We should assert pv != NULL, but we need to check and
228 * fix all callers first.
229 */
230 if (pElem->aSegsIn[i].pv != NULL)
231 {
232 Log2(("%s vqueuePut: %s used_idx=%u seg=%u addr=%RGp pv=%p cb=%u acb=%u\n", INSTANCE(pThis), pQueue->szName,
233 pQueue->uNextUsedIndex, i, pElem->aSegsIn[i].addr, pElem->aSegsIn[i].pv, pElem->aSegsIn[i].cb, cbSegLen));
234
235 PDMDevHlpPCIPhysWrite(pDevIns,
236 pElem->aSegsIn[i].addr + cbSkip,
237 pElem->aSegsIn[i].pv,
238 cbSegLen);
239 }
240
241 cbSkip = 0;
242 cbLen -= cbSegLen;
243 }
244
245 Log2(("%s vqueuePut: %s used_idx=%u guest_used_idx=%u id=%u len=%u\n", INSTANCE(pThis), pQueue->szName,
246 pQueue->uNextUsedIndex, vringReadUsedIndex(pDevIns, &pQueue->VRing), pElem->uIndex, uTotalLen));
247
248 vringWriteUsedElem(pDevIns, &pQueue->VRing,
249 pQueue->uNextUsedIndex++,
250 pElem->uIndex, uTotalLen);
251}
252
253static void vqueueNotify(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue)
254{
255 uint16_t const fAvail = vringReadAvailFlags(pDevIns, &pQueue->VRing);
256 LogFlow(("%s vqueueNotify: %s availFlags=%x guestFeatures=%x vqueue is %sempty\n", INSTANCE(pThis), pQueue->szName,
257 fAvail, pThis->uGuestFeatures, vqueueIsEmpty(pDevIns, pQueue)?"":"not "));
258 if ( !(fAvail & VRINGAVAIL_F_NO_INTERRUPT)
259 || ((pThis->uGuestFeatures & VPCI_F_NOTIFY_ON_EMPTY) && vqueueIsEmpty(pDevIns, pQueue)))
260 {
261 int rc = vpciRaiseInterrupt(pDevIns, pThis, VERR_INTERNAL_ERROR, VPCI_ISR_QUEUE);
262 if (RT_FAILURE(rc))
263 Log(("%s vqueueNotify: Failed to raise an interrupt (%Rrc).\n", INSTANCE(pThis), rc));
264 }
265 else
266 STAM_REL_COUNTER_INC(&pThis->StatIntsSkipped);
267
268}
269
270void vqueueSync(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue)
271{
272 Log2(("%s vqueueSync: %s old_used_idx=%u new_used_idx=%u\n", INSTANCE(pThis),
273 pQueue->szName, vringReadUsedIndex(pDevIns, &pQueue->VRing), pQueue->uNextUsedIndex));
274 vringWriteUsedIndex(pDevIns, &pQueue->VRing, pQueue->uNextUsedIndex);
275 vqueueNotify(pDevIns, pThis, pQueue);
276}
277
278
279/**
280 * Raise interrupt.
281 *
282 * @param pDevIns The device instance.
283 * @param pThis The shared virtio core instance data.
284 * @param rcBusy Status code to return when the critical section is busy.
285 * @param u8IntCause Interrupt cause bit mask to set in PCI ISR port.
286 */
287int vpciRaiseInterrupt(PPDMDEVINS pDevIns, PVPCISTATE pThis, int rcBusy, uint8_t u8IntCause)
288{
289 RT_NOREF_PV(rcBusy);
290 // int rc = vpciCsEnter(pThis, rcBusy);
291 // if (RT_UNLIKELY(rc != VINF_SUCCESS))
292 // return rc;
293
294 STAM_REL_COUNTER_INC(&pThis->StatIntsRaised);
295 LogFlow(("%s vpciRaiseInterrupt: u8IntCause=%x\n", INSTANCE(pThis), u8IntCause));
296
297 pThis->uISR |= u8IntCause;
298 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
299 // vpciCsLeave(pThis);
300 return VINF_SUCCESS;
301}
302
303/**
304 * Lower interrupt.
305 *
306 * @param pDevIns The device instance.
307 * @param pThis The shared virtio core instance data.
308 */
309static void vpciLowerInterrupt(PPDMDEVINS pDevIns, PVPCISTATE pThis)
310{
311 LogFlow(("%s vpciLowerInterrupt\n", INSTANCE(pThis)));
312 RT_NOREF(pThis);
313 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
314}
315
316
317void vpciReset(PPDMDEVINS pDevIns, PVPCISTATE pThis)
318{
319 /* No interrupts should survive device reset, see @bugref(9556). */
320 if (pThis->uISR)
321 vpciLowerInterrupt(pDevIns, pThis);
322
323 pThis->uGuestFeatures = 0;
324 pThis->uQueueSelector = 0;
325 pThis->uStatus = 0;
326 pThis->uISR = 0;
327
328 for (unsigned i = 0; i < pThis->cQueues; i++)
329 vqueueReset(&pThis->Queues[i]);
330}
331
332
333DECLINLINE(uint32_t) vpciGetHostFeatures(PVPCISTATE pThis, PCVPCIIOCALLBACKS pCallbacks)
334{
335 return pCallbacks->pfnGetHostFeatures(pThis) | VPCI_F_NOTIFY_ON_EMPTY;
336}
337
338/**
339 * Port I/O Handler for IN operations.
340 *
341 * @returns VBox status code.
342 *
343 * @param pDevIns The device instance.
344 * @param pThis The shared virtio core instance data.
345 * @param offPort The offset into the I/O range of the port being read.
346 * @param pu32 Where to store the result.
347 * @param cb Number of bytes read.
348 * @param pCallbacks Pointer to the callbacks.
349 * @thread EMT
350 */
351int vpciIOPortIn(PPDMDEVINS pDevIns,
352 PVPCISTATE pThis,
353 RTIOPORT offPort,
354 uint32_t *pu32,
355 unsigned cb,
356 PCVPCIIOCALLBACKS pCallbacks)
357{
358 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF(StatIORead), a);
359
360 /*
361 * We probably do not need to enter critical section when reading registers
362 * as the most of them are either constant or being changed during
363 * initialization only, the exception being ISR which can be raced by all
364 * threads but I see no big harm in it. It also happens to be the most read
365 * register as it gets read in interrupt handler. By dropping cs protection
366 * here we gain the ability to deliver RX packets to the guest while TX is
367 * holding cs transmitting queued packets.
368 *
369 int rc = vpciCsEnter(pThis, VINF_IOM_R3_IOPORT_READ);
370 if (RT_UNLIKELY(rc != VINF_SUCCESS))
371 {
372 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF(StatIORead), a);
373 return rc;
374 }*/
375 int rc = VINF_SUCCESS;
376
377 switch (offPort)
378 {
379 case VPCI_HOST_FEATURES:
380 /* Tell the guest what features we support. */
381 ASSERT_GUEST_MSG(cb == 4, ("%d\n", cb));
382 *pu32 = vpciGetHostFeatures(pThis, pCallbacks) | VPCI_F_BAD_FEATURE;
383 break;
384
385 case VPCI_GUEST_FEATURES:
386 ASSERT_GUEST_MSG(cb == 4, ("%d\n", cb));
387 *pu32 = pThis->uGuestFeatures;
388 break;
389
390 case VPCI_QUEUE_PFN:
391 ASSERT_GUEST_MSG(cb == 4, ("%d\n", cb));
392 *pu32 = pThis->Queues[pThis->uQueueSelector].uPageNumber;
393 break;
394
395 case VPCI_QUEUE_NUM:
396 ASSERT_GUEST_MSG(cb == 2, ("%d\n", cb));
397 *pu32 = pThis->Queues[pThis->uQueueSelector].VRing.uSize;
398 break;
399
400 case VPCI_QUEUE_SEL:
401 ASSERT_GUEST_MSG(cb == 2, ("%d\n", cb));
402 *pu32 = pThis->uQueueSelector;
403 break;
404
405 case VPCI_STATUS:
406 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
407 *pu32 = pThis->uStatus;
408 break;
409
410 case VPCI_ISR:
411 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
412 *pu32 = pThis->uISR;
413 pThis->uISR = 0; /* read clears all interrupts */
414 vpciLowerInterrupt(pDevIns, pThis);
415 break;
416
417 default:
418 if (offPort >= VPCI_CONFIG)
419 rc = pCallbacks->pfnGetConfig(pThis, offPort - VPCI_CONFIG, cb, pu32);
420 else
421 {
422 *pu32 = UINT32_MAX;
423 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s vpciIOPortIn: no valid port at offset port=%RTiop cb=%08x\n",
424 INSTANCE(pThis), offPort, cb);
425 }
426 break;
427 }
428 Log3(("%s vpciIOPortIn: At %RTiop in %0*x\n", INSTANCE(pThis), offPort, cb*2, *pu32));
429
430 //vpciCsLeave(pThis);
431
432 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF(StatIORead), a);
433 return rc;
434}
435
436
437/**
438 * Port I/O Handler for OUT operations.
439 *
440 * @returns VBox status code.
441 *
442 * @param pDevIns The device instance.
443 * @param pThis The shared virtio core instance data.
444 * @param offPort The offset into the I/O range of the port being written.
445 * @param u32 The value to output.
446 * @param cb The value size in bytes.
447 * @param pCallbacks Pointer to the callbacks.
448 * @thread EMT
449 */
450int vpciIOPortOut(PPDMDEVINS pDevIns,
451 PVPCISTATE pThis,
452 PVPCISTATECC pThisCC,
453 RTIOPORT offPort,
454 uint32_t u32,
455 unsigned cb,
456 PCVPCIIOCALLBACKS pCallbacks)
457{
458 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF(StatIOWrite), a);
459 int rc = VINF_SUCCESS;
460 bool fHasBecomeReady;
461#ifndef IN_RING3
462 RT_NOREF_PV(pThisCC);
463#endif
464
465 Log3(("%s virtioIOPortOut: At offPort=%RTiop out %0*x\n", INSTANCE(pThis), offPort, cb*2, u32));
466
467 switch (offPort)
468 {
469 case VPCI_GUEST_FEATURES:
470 {
471 const uint32_t fHostFeatures = vpciGetHostFeatures(pThis, pCallbacks);
472
473 if (RT_LIKELY((u32 & ~fHostFeatures) == 0))
474 pThis->uGuestFeatures = u32;
475 else
476 {
477 /*
478 * Guest requests features we don't advertise. Stick
479 * to the minimum if negotiation looks completely
480 * botched, otherwise restrict to advertised features.
481 */
482 if (u32 & VPCI_F_BAD_FEATURE)
483 {
484 Log(("%s WARNING! Guest failed to negotiate properly (guest=%x)\n",
485 INSTANCE(pThis), u32));
486 pThis->uGuestFeatures = pCallbacks->pfnGetHostMinimalFeatures(pThis);
487 }
488 else
489 {
490 Log(("%s Guest asked for features host does not support! (host=%x guest=%x)\n",
491 INSTANCE(pThis), fHostFeatures, u32));
492 pThis->uGuestFeatures = u32 & fHostFeatures;
493 }
494 }
495 pCallbacks->pfnSetHostFeatures(pThis, pThis->uGuestFeatures);
496 break;
497 }
498
499 case VPCI_QUEUE_PFN:
500 /*
501 * The guest is responsible for allocating the pages for queues,
502 * here it provides us with the page number of descriptor table.
503 * Note that we provide the size of the queue to the guest via
504 * VIRTIO_PCI_QUEUE_NUM.
505 */
506 pThis->Queues[pThis->uQueueSelector].uPageNumber = u32;
507 if (u32)
508 vqueueInit(&pThis->Queues[pThis->uQueueSelector], u32);
509 else
510 rc = pCallbacks->pfnReset(pDevIns);
511 break;
512
513 case VPCI_QUEUE_SEL:
514 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
515 u32 &= 0xFFFF;
516 if (u32 < pThis->cQueues)
517 pThis->uQueueSelector = u32;
518 else
519 Log3(("%s vpciIOPortOut: Invalid queue selector %08x\n", INSTANCE(pThis), u32));
520 break;
521
522 case VPCI_QUEUE_NOTIFY:
523#ifdef IN_RING3
524 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
525 u32 &= 0xFFFF;
526 if (u32 < pThis->cQueues)
527 {
528 RT_UNTRUSTED_VALIDATED_FENCE();
529 if (pThis->Queues[u32].VRing.addrDescriptors)
530 {
531
532 // rc = vpciCsEnter(pThis, VERR_SEM_BUSY);
533 // if (RT_LIKELY(rc == VINF_SUCCESS))
534 // {
535 pThisCC->Queues[u32].pfnCallback(pDevIns, &pThis->Queues[u32]);
536 // vpciCsLeave(pThis);
537 // }
538 }
539 else
540 Log(("%s The queue (#%d) being notified has not been initialized.\n",
541 INSTANCE(pThis), u32));
542 }
543 else
544 Log(("%s Invalid queue number (%d)\n", INSTANCE(pThis), u32));
545#else
546 rc = VINF_IOM_R3_IOPORT_WRITE;
547#endif
548 break;
549
550 case VPCI_STATUS:
551 ASSERT_GUEST_MSG(cb == 1, ("cb=%u\n", cb));
552 u32 &= 0xFF;
553 fHasBecomeReady = !(pThis->uStatus & VPCI_STATUS_DRV_OK) && (u32 & VPCI_STATUS_DRV_OK);
554 pThis->uStatus = u32;
555 /* Writing 0 to the status port triggers device reset. */
556 if (u32 == 0)
557 rc = pCallbacks->pfnReset(pDevIns);
558 else if (fHasBecomeReady)
559 {
560 /* Older hypervisors were lax and did not enforce bus mastering. Older guests
561 * (Linux prior to 2.6.34, NetBSD 6.x) were lazy and did not enable bus mastering.
562 * We automagically enable bus mastering on driver initialization to make existing
563 * drivers work.
564 */
565 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
566 PDMPciDevSetCommand(pPciDev, PDMPciDevGetCommand(pPciDev) | PCI_COMMAND_BUSMASTER);
567
568 pCallbacks->pfnReady(pDevIns);
569 }
570 break;
571
572 default:
573 if (offPort >= VPCI_CONFIG)
574 rc = pCallbacks->pfnSetConfig(pThis, offPort - VPCI_CONFIG, cb, &u32);
575 else
576 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s vpciIOPortOut: no valid port at offset offPort=%RTiop cb=%08x\n",
577 INSTANCE(pThis), offPort, cb);
578 break;
579 }
580
581 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF(StatIOWrite), a);
582 return rc;
583}
584
585#ifdef IN_RING3
586
587/**
588 * Handles common IBase.pfnQueryInterface requests.
589 */
590void *vpciR3QueryInterface(PVPCISTATECC pThisCC, const char *pszIID)
591{
592 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
593 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
594 return NULL;
595}
596
597/**
598 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
599 */
600static DECLCALLBACK(int) vpciR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
601{
602 PVPCISTATECC pThisCC = RT_FROM_MEMBER(pInterface, VPCISTATECC, ILeds);
603 if (iLUN == 0)
604 {
605 *ppLed = &pThisCC->pShared->led;
606 return VINF_SUCCESS;
607 }
608 return VERR_PDM_LUN_NOT_FOUND;
609}
610
611/**
612 * Turns on/off the write status LED.
613 *
614 * @returns VBox status code.
615 * @param pThis Pointer to the device state structure.
616 * @param fOn New LED state.
617 */
618void vpciR3SetWriteLed(PVPCISTATE pThis, bool fOn)
619{
620 LogFlow(("%s vpciR3SetWriteLed: %s\n", INSTANCE(pThis), fOn?"on":"off"));
621 if (fOn)
622 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
623 else
624 pThis->led.Actual.s.fWriting = fOn;
625}
626
627/**
628 * Turns on/off the read status LED.
629 *
630 * @returns VBox status code.
631 * @param pThis Pointer to the device state structure.
632 * @param fOn New LED state.
633 */
634void vpciR3SetReadLed(PVPCISTATE pThis, bool fOn)
635{
636 LogFlow(("%s vpciR3SetReadLed: %s\n", INSTANCE(pThis), fOn?"on":"off"));
637 if (fOn)
638 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
639 else
640 pThis->led.Actual.s.fReading = fOn;
641}
642
643# if 0 /* unused */
644/**
645 * Sets 32-bit register in PCI configuration space.
646 * @param refPciDev The PCI device.
647 * @param uOffset The register offset.
648 * @param u32Value The value to store in the register.
649 * @thread EMT
650 */
651DECLINLINE(void) vpciCfgSetU32(PDMPCIDEV& refPciDev, uint32_t uOffset, uint32_t u32Value)
652{
653 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
654 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
655}
656# endif /* unused */
657
658/**
659 * Dumps the state (useful for both logging and info items).
660 */
661void vpciR3DumpStateWorker(PVPCISTATE pThis, PCDBGFINFOHLP pHlp)
662{
663
664 pHlp->pfnPrintf(pHlp,
665 " uGuestFeatures = 0x%08x\n"
666 " uQueueSelector = 0x%04x\n"
667 " uStatus = 0x%02x\n"
668 " uISR = 0x%02x\n",
669 pThis->uGuestFeatures,
670 pThis->uQueueSelector,
671 pThis->uStatus,
672 pThis->uISR);
673
674 for (unsigned i = 0; i < pThis->cQueues; i++)
675 pHlp->pfnPrintf(pHlp,
676 " %s queue:\n"
677 " VRing.uSize = %u\n"
678 " VRing.addrDescriptors = %p\n"
679 " VRing.addrAvail = %p\n"
680 " VRing.addrUsed = %p\n"
681 " uNextAvailIndex = %u\n"
682 " uNextUsedIndex = %u\n"
683 " uPageNumber = %x\n",
684 pThis->Queues[i].szName,
685 pThis->Queues[i].VRing.uSize,
686 pThis->Queues[i].VRing.addrDescriptors,
687 pThis->Queues[i].VRing.addrAvail,
688 pThis->Queues[i].VRing.addrUsed,
689 pThis->Queues[i].uNextAvailIndex,
690 pThis->Queues[i].uNextUsedIndex,
691 pThis->Queues[i].uPageNumber);
692}
693
694# ifdef LOG_ENABLED
695void vpciR3DumpState(PVPCISTATE pThis, const char *pcszCaller)
696{
697 if (LogIs2Enabled())
698 {
699 Log2(("vpciR3DumpState: (called from %s)\n", pcszCaller));
700 vpciR3DumpStateWorker(pThis, DBGFR3InfoLogHlp());
701 }
702}
703# else
704# define vpciR3DumpState(x, s) do {} while (0)
705# endif
706
707/**
708 * Saved the core virtio state.
709 *
710 * @returns VBox status code.
711 * @param pHlp The device helpers.
712 * @param pThis The shared virtio core instance data.
713 * @param pSSM The handle to the saved state.
714 */
715int vpciR3SaveExec(PCPDMDEVHLPR3 pHlp, PVPCISTATE pThis, PSSMHANDLE pSSM)
716{
717 vpciR3DumpState(pThis, "vpciR3SaveExec");
718
719 pHlp->pfnSSMPutU32(pSSM, pThis->uGuestFeatures);
720 pHlp->pfnSSMPutU16(pSSM, pThis->uQueueSelector);
721 pHlp->pfnSSMPutU8( pSSM, pThis->uStatus);
722 pHlp->pfnSSMPutU8( pSSM, pThis->uISR);
723
724 /* Save queue states */
725 int rc = pHlp->pfnSSMPutU32(pSSM, pThis->cQueues);
726 AssertRCReturn(rc, rc);
727 for (unsigned i = 0; i < pThis->cQueues; i++)
728 {
729 pHlp->pfnSSMPutU16(pSSM, pThis->Queues[i].VRing.uSize);
730 pHlp->pfnSSMPutU32(pSSM, pThis->Queues[i].uPageNumber);
731 pHlp->pfnSSMPutU16(pSSM, pThis->Queues[i].uNextAvailIndex);
732 rc = pHlp->pfnSSMPutU16(pSSM, pThis->Queues[i].uNextUsedIndex);
733 AssertRCReturn(rc, rc);
734 }
735
736 return VINF_SUCCESS;
737}
738
739/**
740 * Loads a saved device state.
741 *
742 * @returns VBox status code.
743 * @param pHlp The device helpers.
744 * @param pThis The shared virtio core instance data.
745 * @param pSSM The handle to the saved state.
746 * @param uVersion The data unit version number.
747 * @param uPass The data pass.
748 * @param cQueues The default queue count (for old states).
749 */
750int vpciR3LoadExec(PCPDMDEVHLPR3 pHlp, PVPCISTATE pThis, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass, uint32_t cQueues)
751{
752 int rc;
753
754 if (uPass == SSM_PASS_FINAL)
755 {
756 /* Restore state data */
757 pHlp->pfnSSMGetU32(pSSM, &pThis->uGuestFeatures);
758 pHlp->pfnSSMGetU16(pSSM, &pThis->uQueueSelector);
759 pHlp->pfnSSMGetU8( pSSM, &pThis->uStatus);
760 pHlp->pfnSSMGetU8( pSSM, &pThis->uISR);
761
762 /* Restore queues */
763 if (uVersion > VIRTIO_SAVEDSTATE_VERSION_3_1_BETA1)
764 {
765 rc = pHlp->pfnSSMGetU32(pSSM, &pThis->cQueues);
766 AssertRCReturn(rc, rc);
767 }
768 else
769 pThis->cQueues = cQueues;
770 AssertLogRelMsgReturn(pThis->cQueues <= VIRTIO_MAX_NQUEUES, ("%#x\n", pThis->cQueues), VERR_SSM_LOAD_CONFIG_MISMATCH);
771 AssertLogRelMsgReturn(pThis->uQueueSelector < pThis->cQueues || (pThis->cQueues == 0 && pThis->uQueueSelector),
772 ("uQueueSelector=%u cQueues=%u\n", pThis->uQueueSelector, pThis->cQueues),
773 VERR_SSM_LOAD_CONFIG_MISMATCH);
774
775 for (unsigned i = 0; i < pThis->cQueues; i++)
776 {
777 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->Queues[i].VRing.uSize);
778 AssertRCReturn(rc, rc);
779 rc = pHlp->pfnSSMGetU32(pSSM, &pThis->Queues[i].uPageNumber);
780 AssertRCReturn(rc, rc);
781
782 if (pThis->Queues[i].uPageNumber)
783 vqueueInit(&pThis->Queues[i], pThis->Queues[i].uPageNumber);
784
785 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->Queues[i].uNextAvailIndex);
786 AssertRCReturn(rc, rc);
787 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->Queues[i].uNextUsedIndex);
788 AssertRCReturn(rc, rc);
789 }
790 }
791
792 vpciR3DumpState(pThis, "vpciLoadExec");
793
794 return VINF_SUCCESS;
795}
796
797PVQUEUE vpciR3AddQueue(PVPCISTATE pThis, PVPCISTATECC pThisCC, unsigned uSize,
798 PFNVPCIQUEUECALLBACK pfnCallback, const char *pcszName)
799{
800 /* Find an empty queue slot */
801 for (unsigned i = 0; i < pThis->cQueues; i++)
802 {
803 if (pThis->Queues[i].VRing.uSize == 0)
804 {
805 PVQUEUE pQueue = &pThis->Queues[i];
806 pQueue->VRing.uSize = uSize;
807 pQueue->VRing.addrDescriptors = 0;
808 pQueue->uPageNumber = 0;
809 int rc = RTStrCopy(pQueue->szName, sizeof(pQueue->szName), pcszName);
810 AssertRC(rc);
811 pThisCC->Queues[i].pfnCallback = pfnCallback;
812 return pQueue;
813 }
814 }
815 AssertMsgFailedReturn(("%s Too many queues being added, no empty slots available!\n", INSTANCE(pThis)), NULL);
816}
817
818/**
819 * Destruct PCI-related part of device.
820 *
821 * We need to free non-VM resources only.
822 *
823 * @returns VBox status code.
824 * @param pThis The shared virtio core instance data.
825 */
826int vpciR3Term(PPDMDEVINS pDevIns, PVPCISTATE pThis)
827{
828 Log(("%s Destroying PCI instance\n", INSTANCE(pThis)));
829
830 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
831 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
832
833 return VINF_SUCCESS;
834}
835
836/**
837 * Set PCI configuration space registers.
838 *
839 * @param pPciDev Pointer to the PCI device structure.
840 * @param uDeviceId VirtiO Device Id
841 * @param uClass Class of PCI device (network, etc)
842 * @thread EMT
843 */
844static void vpciConfigure(PPDMPCIDEV pPciDev, uint16_t uDeviceId, uint16_t uClass)
845{
846 /* Configure PCI Device, assume 32-bit mode ******************************/
847 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID);
848 PDMPciDevSetDeviceId(pPciDev, DEVICE_PCI_BASE_ID + uDeviceId);
849 PDMPciDevSetWord(pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, DEVICE_PCI_SUBSYSTEM_VENDOR_ID);
850 PDMPciDevSetWord(pPciDev, VBOX_PCI_SUBSYSTEM_ID, DEVICE_PCI_SUBSYSTEM_BASE_ID + uDeviceId);
851
852 /* ABI version, must be equal 0 as of 2.6.30 kernel. */
853 PDMPciDevSetByte(pPciDev, VBOX_PCI_REVISION_ID, 0x00);
854 /* Ethernet adapter */
855 PDMPciDevSetByte(pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
856 PDMPciDevSetWord(pPciDev, VBOX_PCI_CLASS_DEVICE, uClass);
857 /* Interrupt Pin: INTA# */
858 PDMPciDevSetByte(pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
859
860# ifdef VBOX_WITH_MSI_DEVICES
861 PDMPciDevSetCapabilityList(pPciDev, 0x80);
862 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
863# endif
864}
865
866int vpciR3Init(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVPCISTATECC pThisCC, uint16_t uDeviceId, uint16_t uClass, uint32_t cQueues)
867{
868 /* Init data members. */
869 pThis->cQueues = cQueues;
870 pThis->led.u32Magic = PDMLED_MAGIC;
871 pThisCC->pShared = pThis;
872 pThisCC->ILeds.pfnQueryStatusLed = vpciR3QueryStatusLed;
873 AssertReturn(pThisCC->IBase.pfnQueryInterface, VERR_INVALID_POINTER);
874 AssertReturn(pThis->szInstance[0], VERR_INVALID_PARAMETER);
875 AssertReturn(strlen(pThis->szInstance) < sizeof(pThis->szInstance), VERR_INVALID_PARAMETER);
876
877 /* Initialize critical section. */
878 int rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "%s", pThis->szInstance);
879 AssertRCReturn(rc, rc);
880
881 /*
882 * Set up the PCI device.
883 */
884 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
885 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
886
887 /* Set PCI config registers */
888 vpciConfigure(pPciDev, uDeviceId, uClass);
889
890 /* Register PCI device */
891 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
892 AssertRCReturn(rc, rc);
893
894# ifdef VBOX_WITH_MSI_DEVICES
895# if 0
896 {
897 PDMMSIREG aMsiReg;
898
899 RT_ZERO(aMsiReg);
900 aMsiReg.cMsixVectors = 1;
901 aMsiReg.iMsixCapOffset = 0x80;
902 aMsiReg.iMsixNextOffset = 0x0;
903 aMsiReg.iMsixBar = 0;
904 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
905 if (RT_FAILURE (rc))
906 PCIDevSetCapabilityList(&pThis->pciDevice, 0x0);
907 }
908# endif
909# endif
910
911 /*
912 * Attach the status driver (optional).
913 */
914 PPDMIBASE pBase;
915 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
916 if (RT_SUCCESS(rc))
917 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
918 else if (rc != VERR_PDM_NO_ATTACHED_DRIVER)
919 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
920
921 /*
922 * Statistics.
923 */
924 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
925 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsSkipped, STAMTYPE_COUNTER, "Interrupts/Skipped", STAMUNIT_OCCURENCES, "Number of skipped interrupts");
926# ifdef VBOX_WITH_STATISTICS
927 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
928 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0");
929 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC");
930 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
931 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0");
932 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC");
933 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCsR3, STAMTYPE_PROFILE, "Cs/CsR3", STAMUNIT_TICKS_PER_CALL, "Profiling CS wait in R3");
934 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCsR0, STAMTYPE_PROFILE, "Cs/CsR0", STAMUNIT_TICKS_PER_CALL, "Profiling CS wait in R0");
935 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCsRC, STAMTYPE_PROFILE, "Cs/CsRC", STAMUNIT_TICKS_PER_CALL, "Profiling CS wait in RC");
936# endif /* VBOX_WITH_STATISTICS */
937
938 return VINF_SUCCESS;
939}
940
941#else /* !IN_RING3 */
942
943/**
944 * Does ring-0/raw-mode initialization.
945 */
946int vpciRZInit(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVPCISTATECC pThisCC)
947{
948 RT_NOREF(pDevIns, pThis, pThisCC);
949 return VINF_SUCCESS;
950}
951
952#endif /* !IN_RING3 */
953
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette