VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio.cpp@ 64301

Last change on this file since 64301 was 64301, checked in by vboxsync, 8 years ago

Devices/VirtIO: additional check

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.0 KB
Line 
1/* $Id: Virtio.cpp 64301 2016-10-17 14:28:31Z vboxsync $ */
2/** @file
3 * Virtio - Virtio Common Functions (VRing, VQueue, Virtio PCI)
4 */
5
6/*
7 * Copyright (C) 2009-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <iprt/param.h>
25#include <iprt/uuid.h>
26#include <VBox/vmm/pdmdev.h>
27#include "Virtio.h"
28
29#define INSTANCE(pState) pState->szInstance
30#define IFACE_TO_STATE(pIface, ifaceName) ((VPCISTATE *)((char*)pIface - RT_OFFSETOF(VPCISTATE, ifaceName)))
31
32#ifdef LOG_ENABLED
33# define QUEUENAME(s, q) (q->pcszName)
34#endif
35
36
37
38#ifndef VBOX_DEVICE_STRUCT_TESTCASE
39
40//RT_C_DECLS_BEGIN
41//RT_C_DECLS_END
42
43
44static void vqueueReset(PVQUEUE pQueue)
45{
46 pQueue->VRing.addrDescriptors = 0;
47 pQueue->VRing.addrAvail = 0;
48 pQueue->VRing.addrUsed = 0;
49 pQueue->uNextAvailIndex = 0;
50 pQueue->uNextUsedIndex = 0;
51 pQueue->uPageNumber = 0;
52}
53
54static void vqueueInit(PVQUEUE pQueue, uint32_t uPageNumber)
55{
56 pQueue->VRing.addrDescriptors = (uint64_t)uPageNumber << PAGE_SHIFT;
57 pQueue->VRing.addrAvail = pQueue->VRing.addrDescriptors
58 + sizeof(VRINGDESC) * pQueue->VRing.uSize;
59 pQueue->VRing.addrUsed = RT_ALIGN(
60 pQueue->VRing.addrAvail + RT_OFFSETOF(VRINGAVAIL, auRing[pQueue->VRing.uSize]),
61 PAGE_SIZE); /* The used ring must start from the next page. */
62 pQueue->uNextAvailIndex = 0;
63 pQueue->uNextUsedIndex = 0;
64}
65
66// void vqueueElemFree(PVQUEUEELEM pElem)
67// {
68// }
69
70void vringReadDesc(PVPCISTATE pState, PVRING pVRing, uint32_t uIndex, PVRINGDESC pDesc)
71{
72 //Log(("%s vringReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVRing, uIndex));
73 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
74 pVRing->addrDescriptors + sizeof(VRINGDESC) * (uIndex % pVRing->uSize),
75 pDesc, sizeof(VRINGDESC));
76}
77
78uint16_t vringReadAvail(PVPCISTATE pState, PVRING pVRing, uint32_t uIndex)
79{
80 uint16_t tmp;
81
82 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
83 pVRing->addrAvail + RT_OFFSETOF(VRINGAVAIL, auRing[uIndex % pVRing->uSize]),
84 &tmp, sizeof(tmp));
85 return tmp;
86}
87
88uint16_t vringReadAvailFlags(PVPCISTATE pState, PVRING pVRing)
89{
90 uint16_t tmp;
91
92 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
93 pVRing->addrAvail + RT_OFFSETOF(VRINGAVAIL, uFlags),
94 &tmp, sizeof(tmp));
95 return tmp;
96}
97
98void vringSetNotification(PVPCISTATE pState, PVRING pVRing, bool fEnabled)
99{
100 uint16_t tmp;
101
102 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
103 pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uFlags),
104 &tmp, sizeof(tmp));
105
106 if (fEnabled)
107 tmp &= ~ VRINGUSED_F_NO_NOTIFY;
108 else
109 tmp |= VRINGUSED_F_NO_NOTIFY;
110
111 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
112 pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uFlags),
113 &tmp, sizeof(tmp));
114}
115
116bool vqueueSkip(PVPCISTATE pState, PVQUEUE pQueue)
117{
118 if (vqueueIsEmpty(pState, pQueue))
119 return false;
120
121 Log2(("%s vqueueSkip: %s avail_idx=%u\n", INSTANCE(pState),
122 QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
123 pQueue->uNextAvailIndex++;
124 return true;
125}
126
127bool vqueueGet(PVPCISTATE pState, PVQUEUE pQueue, PVQUEUEELEM pElem, bool fRemove)
128{
129 if (vqueueIsEmpty(pState, pQueue))
130 return false;
131
132 pElem->nIn = pElem->nOut = 0;
133
134 Log2(("%s vqueueGet: %s avail_idx=%u\n", INSTANCE(pState),
135 QUEUENAME(pState, pQueue), pQueue->uNextAvailIndex));
136
137 VRINGDESC desc;
138 uint16_t idx = vringReadAvail(pState, &pQueue->VRing, pQueue->uNextAvailIndex);
139 if (fRemove)
140 pQueue->uNextAvailIndex++;
141 pElem->uIndex = idx;
142 do
143 {
144 VQUEUESEG *pSeg;
145
146 /*
147 * Malicious guests may try to trick us into writing beyond aSegsIn or
148 * aSegsOut boundaries by linking several descriptors into a loop. We
149 * cannot possibly get a sequence of linked descriptors exceeding the
150 * total number of descriptors in the ring (see @bugref{8620}).
151 */
152 if (pElem->nIn + pElem->nOut >= VRING_MAX_SIZE)
153 {
154 static volatile uint32_t s_cMessages = 0;
155 static volatile uint32_t s_cThreshold = 1;
156 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
157 {
158 LogRel(("%s: too many linked descriptors; check if the guest arranges descriptors in a loop.\n",
159 INSTANCE(pState)));
160 if (ASMAtomicReadU32(&s_cMessages) != 1)
161 LogRel(("%s: (the above error has occured %u times so far)\n",
162 INSTANCE(pState), ASMAtomicReadU32(&s_cMessages)));
163 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
164 }
165 break;
166 }
167
168 vringReadDesc(pState, &pQueue->VRing, idx, &desc);
169 if (desc.u16Flags & VRINGDESC_F_WRITE)
170 {
171 Log2(("%s vqueueGet: %s IN seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
172 QUEUENAME(pState, pQueue), pElem->nIn, idx, desc.u64Addr, desc.uLen));
173 pSeg = &pElem->aSegsIn[pElem->nIn++];
174 }
175 else
176 {
177 Log2(("%s vqueueGet: %s OUT seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pState),
178 QUEUENAME(pState, pQueue), pElem->nOut, idx, desc.u64Addr, desc.uLen));
179 pSeg = &pElem->aSegsOut[pElem->nOut++];
180 }
181
182 pSeg->addr = desc.u64Addr;
183 pSeg->cb = desc.uLen;
184 pSeg->pv = NULL;
185
186 idx = desc.u16Next;
187 } while (desc.u16Flags & VRINGDESC_F_NEXT);
188
189 Log2(("%s vqueueGet: %s head_desc_idx=%u nIn=%u nOut=%u\n", INSTANCE(pState),
190 QUEUENAME(pState, pQueue), pElem->uIndex, pElem->nIn, pElem->nOut));
191 return true;
192}
193
194uint16_t vringReadUsedIndex(PVPCISTATE pState, PVRING pVRing)
195{
196 uint16_t tmp;
197 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
198 pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uIndex),
199 &tmp, sizeof(tmp));
200 return tmp;
201}
202
203void vringWriteUsedIndex(PVPCISTATE pState, PVRING pVRing, uint16_t u16Value)
204{
205 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
206 pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uIndex),
207 &u16Value, sizeof(u16Value));
208}
209
210void vringWriteUsedElem(PVPCISTATE pState, PVRING pVRing, uint32_t uIndex, uint32_t uId, uint32_t uLen)
211{
212 VRINGUSEDELEM elem;
213
214 elem.uId = uId;
215 elem.uLen = uLen;
216 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns),
217 pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, aRing[uIndex % pVRing->uSize]),
218 &elem, sizeof(elem));
219}
220
221void vqueuePut(PVPCISTATE pState, PVQUEUE pQueue, PVQUEUEELEM pElem, uint32_t uLen, uint32_t uReserved)
222{
223 unsigned int i, uOffset, cbReserved = uReserved;
224
225 Log2(("%s vqueuePut: %s desc_idx=%u acb=%u\n", INSTANCE(pState),
226 QUEUENAME(pState, pQueue), pElem->uIndex, uLen));
227 for (i = uOffset = 0; i < pElem->nIn && uOffset < uLen - uReserved; i++)
228 {
229 uint32_t cbSegLen = RT_MIN(uLen - cbReserved - uOffset, pElem->aSegsIn[i].cb - cbReserved);
230 if (pElem->aSegsIn[i].pv)
231 {
232 Log2(("%s vqueuePut: %s used_idx=%u seg=%u addr=%p pv=%p cb=%u acb=%u\n", INSTANCE(pState),
233 QUEUENAME(pState, pQueue), pQueue->uNextUsedIndex, i, pElem->aSegsIn[i].addr, pElem->aSegsIn[i].pv, pElem->aSegsIn[i].cb, cbSegLen));
234 PDMDevHlpPCIPhysWrite(pState->CTX_SUFF(pDevIns), pElem->aSegsIn[i].addr + cbReserved,
235 pElem->aSegsIn[i].pv, cbSegLen);
236 cbReserved = 0;
237 }
238 uOffset += cbSegLen;
239 }
240
241 Assert((uReserved + uOffset) == uLen || pElem->nIn == 0);
242 Log2(("%s vqueuePut: %s used_idx=%u guest_used_idx=%u id=%u len=%u\n", INSTANCE(pState),
243 QUEUENAME(pState, pQueue), pQueue->uNextUsedIndex, vringReadUsedIndex(pState, &pQueue->VRing), pElem->uIndex, uLen));
244 vringWriteUsedElem(pState, &pQueue->VRing, pQueue->uNextUsedIndex++, pElem->uIndex, uLen);
245}
246
247void vqueueNotify(PVPCISTATE pState, PVQUEUE pQueue)
248{
249 LogFlow(("%s vqueueNotify: %s availFlags=%x guestFeatures=%x vqueue is %sempty\n",
250 INSTANCE(pState), QUEUENAME(pState, pQueue),
251 vringReadAvailFlags(pState, &pQueue->VRing),
252 pState->uGuestFeatures, vqueueIsEmpty(pState, pQueue)?"":"not "));
253 if (!(vringReadAvailFlags(pState, &pQueue->VRing) & VRINGAVAIL_F_NO_INTERRUPT)
254 || ((pState->uGuestFeatures & VPCI_F_NOTIFY_ON_EMPTY) && vqueueIsEmpty(pState, pQueue)))
255 {
256 int rc = vpciRaiseInterrupt(pState, VERR_INTERNAL_ERROR, VPCI_ISR_QUEUE);
257 if (RT_FAILURE(rc))
258 Log(("%s vqueueNotify: Failed to raise an interrupt (%Rrc).\n", INSTANCE(pState), rc));
259 }
260 else
261 {
262 STAM_COUNTER_INC(&pState->StatIntsSkipped);
263 }
264
265}
266
267void vqueueSync(PVPCISTATE pState, PVQUEUE pQueue)
268{
269 Log2(("%s vqueueSync: %s old_used_idx=%u new_used_idx=%u\n", INSTANCE(pState),
270 QUEUENAME(pState, pQueue), vringReadUsedIndex(pState, &pQueue->VRing), pQueue->uNextUsedIndex));
271 vringWriteUsedIndex(pState, &pQueue->VRing, pQueue->uNextUsedIndex);
272 vqueueNotify(pState, pQueue);
273}
274
275void vpciReset(PVPCISTATE pState)
276{
277 pState->uGuestFeatures = 0;
278 pState->uQueueSelector = 0;
279 pState->uStatus = 0;
280 pState->uISR = 0;
281
282 for (unsigned i = 0; i < pState->nQueues; i++)
283 vqueueReset(&pState->Queues[i]);
284}
285
286
287/**
288 * Raise interrupt.
289 *
290 * @param pState The device state structure.
291 * @param rcBusy Status code to return when the critical section is busy.
292 * @param u8IntCause Interrupt cause bit mask to set in PCI ISR port.
293 */
294int vpciRaiseInterrupt(VPCISTATE *pState, int rcBusy, uint8_t u8IntCause)
295{
296 RT_NOREF_PV(rcBusy);
297 // int rc = vpciCsEnter(pState, rcBusy);
298 // if (RT_UNLIKELY(rc != VINF_SUCCESS))
299 // return rc;
300
301 STAM_COUNTER_INC(&pState->StatIntsRaised);
302 LogFlow(("%s vpciRaiseInterrupt: u8IntCause=%x\n",
303 INSTANCE(pState), u8IntCause));
304
305 pState->uISR |= u8IntCause;
306 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
307 // vpciCsLeave(pState);
308 return VINF_SUCCESS;
309}
310
311/**
312 * Lower interrupt.
313 *
314 * @param pState The device state structure.
315 */
316static void vpciLowerInterrupt(VPCISTATE *pState)
317{
318 LogFlow(("%s vpciLowerInterrupt\n", INSTANCE(pState)));
319 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
320}
321
322DECLINLINE(uint32_t) vpciGetHostFeatures(PVPCISTATE pState,
323 PFNGETHOSTFEATURES pfnGetHostFeatures)
324{
325 return pfnGetHostFeatures(pState)
326 | VPCI_F_NOTIFY_ON_EMPTY;
327}
328
329/**
330 * Port I/O Handler for IN operations.
331 *
332 * @returns VBox status code.
333 *
334 * @param pDevIns The device instance.
335 * @param pvUser Pointer to the device state structure.
336 * @param Port Port number used for the IN operation.
337 * @param pu32 Where to store the result.
338 * @param cb Number of bytes read.
339 * @param pCallbacks Pointer to the callbacks.
340 * @thread EMT
341 */
342int vpciIOPortIn(PPDMDEVINS pDevIns,
343 void *pvUser,
344 RTIOPORT Port,
345 uint32_t *pu32,
346 unsigned cb,
347 PCVPCIIOCALLBACKS pCallbacks)
348{
349 VPCISTATE *pState = PDMINS_2_DATA(pDevIns, VPCISTATE *);
350 int rc = VINF_SUCCESS;
351 STAM_PROFILE_ADV_START(&pState->CTXSUFF(StatIORead), a);
352 RT_NOREF_PV(pvUser);
353
354 /*
355 * We probably do not need to enter critical section when reading registers
356 * as the most of them are either constant or being changed during
357 * initialization only, the exception being ISR which can be raced by all
358 * threads but I see no big harm in it. It also happens to be the most read
359 * register as it gets read in interrupt handler. By dropping cs protection
360 * here we gain the ability to deliver RX packets to the guest while TX is
361 * holding cs transmitting queued packets.
362 *
363 rc = vpciCsEnter(pState, VINF_IOM_R3_IOPORT_READ);
364 if (RT_UNLIKELY(rc != VINF_SUCCESS))
365 {
366 STAM_PROFILE_ADV_STOP(&pState->CTXSUFF(StatIORead), a);
367 return rc;
368 }*/
369
370 Port -= pState->IOPortBase;
371 switch (Port)
372 {
373 case VPCI_HOST_FEATURES:
374 /* Tell the guest what features we support. */
375 *pu32 = vpciGetHostFeatures(pState, pCallbacks->pfnGetHostFeatures)
376 | VPCI_F_BAD_FEATURE;
377 break;
378
379 case VPCI_GUEST_FEATURES:
380 *pu32 = pState->uGuestFeatures;
381 break;
382
383 case VPCI_QUEUE_PFN:
384 *pu32 = pState->Queues[pState->uQueueSelector].uPageNumber;
385 break;
386
387 case VPCI_QUEUE_NUM:
388 Assert(cb == 2);
389 *(uint16_t*)pu32 = pState->Queues[pState->uQueueSelector].VRing.uSize;
390 break;
391
392 case VPCI_QUEUE_SEL:
393 Assert(cb == 2);
394 *(uint16_t*)pu32 = pState->uQueueSelector;
395 break;
396
397 case VPCI_STATUS:
398 Assert(cb == 1);
399 *(uint8_t*)pu32 = pState->uStatus;
400 break;
401
402 case VPCI_ISR:
403 Assert(cb == 1);
404 *(uint8_t*)pu32 = pState->uISR;
405 pState->uISR = 0; /* read clears all interrupts */
406 vpciLowerInterrupt(pState);
407 break;
408
409 default:
410 if (Port >= VPCI_CONFIG)
411 rc = pCallbacks->pfnGetConfig(pState, Port - VPCI_CONFIG, cb, pu32);
412 else
413 {
414 *pu32 = 0xFFFFFFFF;
415 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s vpciIOPortIn: no valid port at offset port=%RTiop cb=%08x\n",
416 INSTANCE(pState), Port, cb);
417 }
418 break;
419 }
420 Log3(("%s vpciIOPortIn: At %RTiop in %0*x\n", INSTANCE(pState), Port, cb*2, *pu32));
421 STAM_PROFILE_ADV_STOP(&pState->CTXSUFF(StatIORead), a);
422 //vpciCsLeave(pState);
423 return rc;
424}
425
426
427/**
428 * Port I/O Handler for OUT operations.
429 *
430 * @returns VBox status code.
431 *
432 * @param pDevIns The device instance.
433 * @param pvUser User argument.
434 * @param Port Port number used for the IN operation.
435 * @param u32 The value to output.
436 * @param cb The value size in bytes.
437 * @param pCallbacks Pointer to the callbacks.
438 * @thread EMT
439 */
440int vpciIOPortOut(PPDMDEVINS pDevIns,
441 void *pvUser,
442 RTIOPORT Port,
443 uint32_t u32,
444 unsigned cb,
445 PCVPCIIOCALLBACKS pCallbacks)
446{
447 VPCISTATE *pState = PDMINS_2_DATA(pDevIns, VPCISTATE *);
448 int rc = VINF_SUCCESS;
449 bool fHasBecomeReady;
450 STAM_PROFILE_ADV_START(&pState->CTXSUFF(StatIOWrite), a);
451 RT_NOREF_PV(pvUser);
452
453 Port -= pState->IOPortBase;
454 Log3(("%s virtioIOPortOut: At %RTiop out %0*x\n", INSTANCE(pState), Port, cb*2, u32));
455
456 switch (Port)
457 {
458 case VPCI_GUEST_FEATURES:
459 /* Check if the guest negotiates properly, fall back to basics if it does not. */
460 if (VPCI_F_BAD_FEATURE & u32)
461 {
462 Log(("%s WARNING! Guest failed to negotiate properly (guest=%x)\n",
463 INSTANCE(pState), u32));
464 pState->uGuestFeatures = pCallbacks->pfnGetHostMinimalFeatures(pState);
465 }
466 /* The guest may potentially desire features we don't support! */
467 else if (~vpciGetHostFeatures(pState, pCallbacks->pfnGetHostFeatures) & u32)
468 {
469 Log(("%s Guest asked for features host does not support! (host=%x guest=%x)\n",
470 INSTANCE(pState),
471 vpciGetHostFeatures(pState, pCallbacks->pfnGetHostFeatures), u32));
472 pState->uGuestFeatures =
473 vpciGetHostFeatures(pState, pCallbacks->pfnGetHostFeatures);
474 }
475 else
476 pState->uGuestFeatures = u32;
477 pCallbacks->pfnSetHostFeatures(pState, pState->uGuestFeatures);
478 break;
479
480 case VPCI_QUEUE_PFN:
481 /*
482 * The guest is responsible for allocating the pages for queues,
483 * here it provides us with the page number of descriptor table.
484 * Note that we provide the size of the queue to the guest via
485 * VIRTIO_PCI_QUEUE_NUM.
486 */
487 pState->Queues[pState->uQueueSelector].uPageNumber = u32;
488 if (u32)
489 vqueueInit(&pState->Queues[pState->uQueueSelector], u32);
490 else
491 rc = pCallbacks->pfnReset(pState);
492 break;
493
494 case VPCI_QUEUE_SEL:
495 Assert(cb == 2);
496 u32 &= 0xFFFF;
497 if (u32 < pState->nQueues)
498 pState->uQueueSelector = u32;
499 else
500 Log3(("%s vpciIOPortOut: Invalid queue selector %08x\n", INSTANCE(pState), u32));
501 break;
502
503 case VPCI_QUEUE_NOTIFY:
504#ifdef IN_RING3
505 Assert(cb == 2);
506 u32 &= 0xFFFF;
507 if (u32 < pState->nQueues)
508 if (pState->Queues[u32].VRing.addrDescriptors)
509 {
510 // rc = vpciCsEnter(pState, VERR_SEM_BUSY);
511 // if (RT_LIKELY(rc == VINF_SUCCESS))
512 // {
513 pState->Queues[u32].pfnCallback(pState, &pState->Queues[u32]);
514 // vpciCsLeave(pState);
515 // }
516 }
517 else
518 Log(("%s The queue (#%d) being notified has not been initialized.\n",
519 INSTANCE(pState), u32));
520 else
521 Log(("%s Invalid queue number (%d)\n", INSTANCE(pState), u32));
522#else
523 rc = VINF_IOM_R3_IOPORT_WRITE;
524#endif
525 break;
526
527 case VPCI_STATUS:
528 Assert(cb == 1);
529 u32 &= 0xFF;
530 fHasBecomeReady = !(pState->uStatus & VPCI_STATUS_DRV_OK) && (u32 & VPCI_STATUS_DRV_OK);
531 pState->uStatus = u32;
532 /* Writing 0 to the status port triggers device reset. */
533 if (u32 == 0)
534 rc = pCallbacks->pfnReset(pState);
535 else if (fHasBecomeReady)
536 pCallbacks->pfnReady(pState);
537 break;
538
539 default:
540 if (Port >= VPCI_CONFIG)
541 rc = pCallbacks->pfnSetConfig(pState, Port - VPCI_CONFIG, cb, &u32);
542 else
543 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s vpciIOPortOut: no valid port at offset Port=%RTiop cb=%08x\n",
544 INSTANCE(pState), Port, cb);
545 break;
546 }
547
548 STAM_PROFILE_ADV_STOP(&pState->CTXSUFF(StatIOWrite), a);
549 return rc;
550}
551
552#ifdef IN_RING3
553
554/**
555 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
556 */
557void *vpciQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
558{
559 VPCISTATE *pThis = IFACE_TO_STATE(pInterface, IBase);
560 Assert(&pThis->IBase == pInterface);
561
562 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
563 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
564 return NULL;
565}
566
567/**
568 * Gets the pointer to the status LED of a unit.
569 *
570 * @returns VBox status code.
571 * @param pInterface Pointer to the interface structure.
572 * @param iLUN The unit which status LED we desire.
573 * @param ppLed Where to store the LED pointer.
574 * @thread EMT
575 */
576static DECLCALLBACK(int) vpciQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
577{
578 VPCISTATE *pState = IFACE_TO_STATE(pInterface, ILeds);
579 int rc = VERR_PDM_LUN_NOT_FOUND;
580
581 if (iLUN == 0)
582 {
583 *ppLed = &pState->led;
584 rc = VINF_SUCCESS;
585 }
586 return rc;
587}
588
589/**
590 * Turns on/off the write status LED.
591 *
592 * @returns VBox status code.
593 * @param pState Pointer to the device state structure.
594 * @param fOn New LED state.
595 */
596void vpciSetWriteLed(PVPCISTATE pState, bool fOn)
597{
598 LogFlow(("%s vpciSetWriteLed: %s\n", INSTANCE(pState), fOn?"on":"off"));
599 if (fOn)
600 pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
601 else
602 pState->led.Actual.s.fWriting = fOn;
603}
604
605/**
606 * Turns on/off the read status LED.
607 *
608 * @returns VBox status code.
609 * @param pState Pointer to the device state structure.
610 * @param fOn New LED state.
611 */
612void vpciSetReadLed(PVPCISTATE pState, bool fOn)
613{
614 LogFlow(("%s vpciSetReadLed: %s\n", INSTANCE(pState), fOn?"on":"off"));
615 if (fOn)
616 pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
617 else
618 pState->led.Actual.s.fReading = fOn;
619}
620
621/**
622 * Sets 8-bit register in PCI configuration space.
623 * @param refPciDev The PCI device.
624 * @param uOffset The register offset.
625 * @param u16Value The value to store in the register.
626 * @thread EMT
627 */
628DECLINLINE(void) vpciCfgSetU8(PCIDEVICE& refPciDev, uint32_t uOffset, uint8_t u8Value)
629{
630 Assert(uOffset < sizeof(refPciDev.config));
631 refPciDev.config[uOffset] = u8Value;
632}
633
634/**
635 * Sets 16-bit register in PCI configuration space.
636 * @param refPciDev The PCI device.
637 * @param uOffset The register offset.
638 * @param u16Value The value to store in the register.
639 * @thread EMT
640 */
641DECLINLINE(void) vpciCfgSetU16(PCIDEVICE& refPciDev, uint32_t uOffset, uint16_t u16Value)
642{
643 Assert(uOffset+sizeof(u16Value) <= sizeof(refPciDev.config));
644 *(uint16_t*)&refPciDev.config[uOffset] = u16Value;
645}
646
647#if 0 /* unused */
648/**
649 * Sets 32-bit register in PCI configuration space.
650 * @param refPciDev The PCI device.
651 * @param uOffset The register offset.
652 * @param u32Value The value to store in the register.
653 * @thread EMT
654 */
655DECLINLINE(void) vpciCfgSetU32(PCIDEVICE& refPciDev, uint32_t uOffset, uint32_t u32Value)
656{
657 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
658 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
659}
660#endif /* unused */
661
662
663#ifdef DEBUG
664static void vpciDumpState(PVPCISTATE pState, const char *pcszCaller)
665{
666 Log2(("vpciDumpState: (called from %s)\n"
667 " uGuestFeatures = 0x%08x\n"
668 " uQueueSelector = 0x%04x\n"
669 " uStatus = 0x%02x\n"
670 " uISR = 0x%02x\n",
671 pcszCaller,
672 pState->uGuestFeatures,
673 pState->uQueueSelector,
674 pState->uStatus,
675 pState->uISR));
676
677 for (unsigned i = 0; i < pState->nQueues; i++)
678 Log2((" %s queue:\n"
679 " VRing.uSize = %u\n"
680 " VRing.addrDescriptors = %p\n"
681 " VRing.addrAvail = %p\n"
682 " VRing.addrUsed = %p\n"
683 " uNextAvailIndex = %u\n"
684 " uNextUsedIndex = %u\n"
685 " uPageNumber = %x\n",
686 pState->Queues[i].pcszName,
687 pState->Queues[i].VRing.uSize,
688 pState->Queues[i].VRing.addrDescriptors,
689 pState->Queues[i].VRing.addrAvail,
690 pState->Queues[i].VRing.addrUsed,
691 pState->Queues[i].uNextAvailIndex,
692 pState->Queues[i].uNextUsedIndex,
693 pState->Queues[i].uPageNumber));
694}
695#else
696# define vpciDumpState(x, s) do {} while (0)
697#endif
698
699/**
700 * Saves the state of device.
701 *
702 * @returns VBox status code.
703 * @param pDevIns The device instance.
704 * @param pSSM The handle to the saved state.
705 */
706int vpciSaveExec(PVPCISTATE pState, PSSMHANDLE pSSM)
707{
708 int rc;
709
710 vpciDumpState(pState, "vpciSaveExec");
711
712 rc = SSMR3PutU32(pSSM, pState->uGuestFeatures);
713 AssertRCReturn(rc, rc);
714 rc = SSMR3PutU16(pSSM, pState->uQueueSelector);
715 AssertRCReturn(rc, rc);
716 rc = SSMR3PutU8( pSSM, pState->uStatus);
717 AssertRCReturn(rc, rc);
718 rc = SSMR3PutU8( pSSM, pState->uISR);
719 AssertRCReturn(rc, rc);
720
721 /* Save queue states */
722 rc = SSMR3PutU32(pSSM, pState->nQueues);
723 AssertRCReturn(rc, rc);
724 for (unsigned i = 0; i < pState->nQueues; i++)
725 {
726 rc = SSMR3PutU16(pSSM, pState->Queues[i].VRing.uSize);
727 AssertRCReturn(rc, rc);
728 rc = SSMR3PutU32(pSSM, pState->Queues[i].uPageNumber);
729 AssertRCReturn(rc, rc);
730 rc = SSMR3PutU16(pSSM, pState->Queues[i].uNextAvailIndex);
731 AssertRCReturn(rc, rc);
732 rc = SSMR3PutU16(pSSM, pState->Queues[i].uNextUsedIndex);
733 AssertRCReturn(rc, rc);
734 }
735
736 return VINF_SUCCESS;
737}
738
739/**
740 * Loads a saved device state.
741 *
742 * @returns VBox status code.
743 * @param pDevIns The device instance.
744 * @param pSSM The handle to the saved state.
745 * @param uVersion The data unit version number.
746 * @param uPass The data pass.
747 */
748int vpciLoadExec(PVPCISTATE pState, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass, uint32_t nQueues)
749{
750 int rc;
751
752 if (uPass == SSM_PASS_FINAL)
753 {
754 /* Restore state data */
755 rc = SSMR3GetU32(pSSM, &pState->uGuestFeatures);
756 AssertRCReturn(rc, rc);
757 rc = SSMR3GetU16(pSSM, &pState->uQueueSelector);
758 AssertRCReturn(rc, rc);
759 rc = SSMR3GetU8( pSSM, &pState->uStatus);
760 AssertRCReturn(rc, rc);
761 rc = SSMR3GetU8( pSSM, &pState->uISR);
762 AssertRCReturn(rc, rc);
763
764 /* Restore queues */
765 if (uVersion > VIRTIO_SAVEDSTATE_VERSION_3_1_BETA1)
766 {
767 rc = SSMR3GetU32(pSSM, &pState->nQueues);
768 AssertRCReturn(rc, rc);
769 }
770 else
771 pState->nQueues = nQueues;
772 for (unsigned i = 0; i < pState->nQueues; i++)
773 {
774 rc = SSMR3GetU16(pSSM, &pState->Queues[i].VRing.uSize);
775 AssertRCReturn(rc, rc);
776 rc = SSMR3GetU32(pSSM, &pState->Queues[i].uPageNumber);
777 AssertRCReturn(rc, rc);
778
779 if (pState->Queues[i].uPageNumber)
780 vqueueInit(&pState->Queues[i], pState->Queues[i].uPageNumber);
781
782 rc = SSMR3GetU16(pSSM, &pState->Queues[i].uNextAvailIndex);
783 AssertRCReturn(rc, rc);
784 rc = SSMR3GetU16(pSSM, &pState->Queues[i].uNextUsedIndex);
785 AssertRCReturn(rc, rc);
786 }
787 }
788
789 vpciDumpState(pState, "vpciLoadExec");
790
791 return VINF_SUCCESS;
792}
793
794/**
795 * Set PCI configuration space registers.
796 *
797 * @param pci Reference to PCI device structure.
798 * @param uDeviceId VirtiO Device Id
799 * @param uClass Class of PCI device (network, etc)
800 * @thread EMT
801 */
802static DECLCALLBACK(void) vpciConfigure(PCIDEVICE& pci,
803 uint16_t uDeviceId,
804 uint16_t uClass)
805{
806 /* Configure PCI Device, assume 32-bit mode ******************************/
807 PCIDevSetVendorId(&pci, DEVICE_PCI_VENDOR_ID);
808 PCIDevSetDeviceId(&pci, DEVICE_PCI_BASE_ID + uDeviceId);
809 vpciCfgSetU16(pci, VBOX_PCI_SUBSYSTEM_VENDOR_ID, DEVICE_PCI_SUBSYSTEM_VENDOR_ID);
810 vpciCfgSetU16(pci, VBOX_PCI_SUBSYSTEM_ID, DEVICE_PCI_SUBSYSTEM_BASE_ID + uDeviceId);
811
812 /* ABI version, must be equal 0 as of 2.6.30 kernel. */
813 vpciCfgSetU8( pci, VBOX_PCI_REVISION_ID, 0x00);
814 /* Ethernet adapter */
815 vpciCfgSetU8( pci, VBOX_PCI_CLASS_PROG, 0x00);
816 vpciCfgSetU16(pci, VBOX_PCI_CLASS_DEVICE, uClass);
817 /* Interrupt Pin: INTA# */
818 vpciCfgSetU8( pci, VBOX_PCI_INTERRUPT_PIN, 0x01);
819
820#ifdef VBOX_WITH_MSI_DEVICES
821 PCIDevSetCapabilityList (&pci, 0x80);
822 PCIDevSetStatus (&pci, VBOX_PCI_STATUS_CAP_LIST);
823#endif
824}
825
826#ifdef VBOX_WITH_STATISTICS
827/* WARNING! This function must never be used in multithreaded context! */
828static const char *vpciCounter(const char *pszDevFmt,
829 const char *pszCounter)
830{
831 static char s_szCounterName[80];
832
833 RTStrPrintf(s_szCounterName, sizeof(s_szCounterName),
834 "/Devices/%s/%s", pszDevFmt, pszCounter);
835
836 return s_szCounterName;
837}
838#endif
839
840/// @todo header
841int vpciConstruct(PPDMDEVINS pDevIns, VPCISTATE *pState,
842 int iInstance, const char *pcszNameFmt,
843 uint16_t uDeviceId, uint16_t uClass,
844 uint32_t nQueues)
845{
846 /* Init handles and log related stuff. */
847 RTStrPrintf(pState->szInstance, sizeof(pState->szInstance),
848 pcszNameFmt, iInstance);
849
850 pState->pDevInsR3 = pDevIns;
851 pState->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
852 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
853 pState->led.u32Magic = PDMLED_MAGIC;
854
855 pState->ILeds.pfnQueryStatusLed = vpciQueryStatusLed;
856
857 /* Initialize critical section. */
858 int rc = PDMDevHlpCritSectInit(pDevIns, &pState->cs, RT_SRC_POS, "%s", pState->szInstance);
859 if (RT_FAILURE(rc))
860 return rc;
861
862 /* Set PCI config registers */
863 vpciConfigure(pState->pciDevice, uDeviceId, uClass);
864 /* Register PCI device */
865 rc = PDMDevHlpPCIRegister(pDevIns, &pState->pciDevice);
866 if (RT_FAILURE(rc))
867 return rc;
868
869#ifdef VBOX_WITH_MSI_DEVICES
870#if 0
871 {
872 PDMMSIREG aMsiReg;
873
874 RT_ZERO(aMsiReg);
875 aMsiReg.cMsixVectors = 1;
876 aMsiReg.iMsixCapOffset = 0x80;
877 aMsiReg.iMsixNextOffset = 0x0;
878 aMsiReg.iMsixBar = 0;
879 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
880 if (RT_FAILURE (rc))
881 PCIDevSetCapabilityList(&pState->pciDevice, 0x0);
882 }
883#endif
884#endif
885
886 /* Status driver */
887 PPDMIBASE pBase;
888 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pState->IBase, &pBase, "Status Port");
889 if (RT_FAILURE(rc))
890 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
891 pState->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
892
893 pState->nQueues = nQueues;
894
895#if defined(VBOX_WITH_STATISTICS)
896 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadGC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in GC", vpciCounter(pcszNameFmt, "IO/ReadGC"), iInstance);
897 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadHC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in HC", vpciCounter(pcszNameFmt, "IO/ReadHC"), iInstance);
898 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteGC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in GC", vpciCounter(pcszNameFmt, "IO/WriteGC"), iInstance);
899 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteHC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in HC", vpciCounter(pcszNameFmt, "IO/WriteHC"), iInstance);
900 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", vpciCounter(pcszNameFmt, "Interrupts/Raised"), iInstance);
901 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped interrupts", vpciCounter(pcszNameFmt, "Interrupts/Skipped"), iInstance);
902 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatCsGC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling CS wait in GC", vpciCounter(pcszNameFmt, "Cs/CsGC"), iInstance);
903 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatCsHC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling CS wait in HC", vpciCounter(pcszNameFmt, "Cs/CsHC"), iInstance);
904#endif /* VBOX_WITH_STATISTICS */
905
906 return rc;
907}
908
909/**
910 * Destruct PCI-related part of device.
911 *
912 * We need to free non-VM resources only.
913 *
914 * @returns VBox status code.
915 * @param pState The device state structure.
916 */
917int vpciDestruct(VPCISTATE* pState)
918{
919 Log(("%s Destroying PCI instance\n", INSTANCE(pState)));
920
921 if (PDMCritSectIsInitialized(&pState->cs))
922 PDMR3CritSectDelete(&pState->cs);
923
924 return VINF_SUCCESS;
925}
926
927/**
928 * Device relocation callback.
929 *
930 * When this callback is called the device instance data, and if the
931 * device have a GC component, is being relocated, or/and the selectors
932 * have been changed. The device must use the chance to perform the
933 * necessary pointer relocations and data updates.
934 *
935 * Before the GC code is executed the first time, this function will be
936 * called with a 0 delta so GC pointer calculations can be one in one place.
937 *
938 * @param pDevIns Pointer to the device instance.
939 * @param offDelta The relocation delta relative to the old location.
940 *
941 * @remark A relocation CANNOT fail.
942 */
943void vpciRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
944{
945 RT_NOREF(offDelta);
946 VPCISTATE *pState = PDMINS_2_DATA(pDevIns, VPCISTATE*);
947 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
948 // TBD
949}
950
951PVQUEUE vpciAddQueue(VPCISTATE* pState, unsigned uSize, PFNVPCIQUEUECALLBACK pfnCallback, const char *pcszName)
952{
953 PVQUEUE pQueue = NULL;
954 /* Find an empty queue slot */
955 for (unsigned i = 0; i < pState->nQueues; i++)
956 {
957 if (pState->Queues[i].VRing.uSize == 0)
958 {
959 pQueue = &pState->Queues[i];
960 break;
961 }
962 }
963
964 if (!pQueue)
965 {
966 Log(("%s Too many queues being added, no empty slots available!\n", INSTANCE(pState)));
967 }
968 else
969 {
970 pQueue->VRing.uSize = uSize;
971 pQueue->VRing.addrDescriptors = 0;
972 pQueue->uPageNumber = 0;
973 pQueue->pfnCallback = pfnCallback;
974 pQueue->pcszName = pcszName;
975 }
976
977 return pQueue;
978}
979
980#endif /* IN_RING3 */
981
982#endif /* VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette