VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 85030

Last change on this file since 85030 was 85025, checked in by vboxsync, 5 years ago

Minor fixes to last integration

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.6 KB
Line 
1/* $Id: VirtioCore.cpp 85025 2020-07-01 13:38:37Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
45#define VIRTQNAME(a_pVirtio, a_uVirtqNbr) ((a_pVirtio)->aVirtqState[(a_uVirtqNbr)].szVirtqName)
46#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
47#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtqState) \
48 (virtioCoreVirtqAvailBufCount(pDevIns, pVirtio, pVirtqState) == 0)
49
50/**
51 * This macro returns true if the @a a_offAccess and access length (@a
52 * a_cbAccess) are within the range of the mapped capability struct described by
53 * @a a_LocCapData.
54 *
55 *
56 *
57 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
58 * @param[in] a_cbAccess Input: The access size.
59 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
60 * @param[in] a_LocCapData Input: The capability location info.
61 */
62#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
63 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
64 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
65
66
67/** Marks the start of the virtio saved state (just for sanity). */
68#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
69/** The current saved state version for the virtio core. */
70#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
71
72
73/*********************************************************************************************************************************
74* Structures and Typedefs *
75*********************************************************************************************************************************/
76
77
78/** @name virtq related flags
79 * @{ */
80#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
81#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
82#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
83
84#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
85#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
86/** @} */
87
88/**
89 * virtq related structs
90 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
91 */
92typedef struct virtq_desc
93{
94 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
95 uint32_t cb; /**< len Buffer length */
96 uint16_t fFlags; /**< flags Buffer specific flags */
97 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
98} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
99
100typedef struct virtq_avail
101{
102 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
103 uint16_t uIdx; /**< idx Index of next free ring slot */
104 RT_FLEXIBLE_ARRAY_EXTENSION
105 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
106 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
107} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
108
109typedef struct virtq_used_elem
110{
111 uint32_t uDescIdx; /**< idx Start of used desc chain */
112 uint32_t cbElem; /**< len Total len of used desc chain */
113} VIRTQ_USED_ELEM_T;
114
115typedef struct virt_used
116{
117 uint16_t fFlags; /**< flags used ring host-to-guest flags */
118 uint16_t uIdx; /**< idx Index of next ring slot */
119 RT_FLEXIBLE_ARRAY_EXTENSION
120 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
121 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
122} VIRTQ_USED_T, *PVIRTQ_USED_T;
123
124
125const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
126{
127 switch (enmState)
128 {
129 case kvirtIoVmStateChangedReset: return "VM RESET";
130 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
131 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
132 case kvirtIoVmStateChangedResume: return "VM RESUME";
133 default: return "<BAD ENUM>";
134 }
135}
136
137/* Internal Functions */
138
139static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
140static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
141
142/** @name Internal queue operations
143 * @{ */
144
145/**
146 * Accessor for virtq descriptor
147 */
148#ifdef IN_RING3
149DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
150 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
151{
152 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
153 uint16_t const cVirtqItems = RT_MAX(pVirtio->uVirtqSize[uVirtqNbr], 1); /* Make sure to avoid div-by-zero. */
154 PDMDevHlpPCIPhysRead(pDevIns,
155 pVirtio->aGCPhysVirtqDesc[uVirtqNbr] + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
156 pDesc, sizeof(VIRTQ_DESC_T));
157}
158#endif
159
160/**
161 * Accessors for virtq avail ring
162 */
163#ifdef IN_RING3
164DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, uint32_t availIdx)
165{
166 uint16_t uDescIdx;
167 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
168 uint16_t const cVirtqItems = RT_MAX(pVirtio->uVirtqSize[uVirtqNbr], 1); /* Make sure to avoid div-by-zero. */
169 PDMDevHlpPCIPhysRead(pDevIns,
170 pVirtio->aGCPhysVirtqAvail[uVirtqNbr]
171 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
172 &uDescIdx, sizeof(uDescIdx));
173 return uDescIdx;
174}
175
176DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
177{
178 uint16_t uUsedEventIdx;
179 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
180 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
181 PDMDevHlpPCIPhysRead(pDevIns,
182 pVirtio->aGCPhysVirtqAvail[uVirtqNbr] + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uVirtqSize[uVirtqNbr]]),
183 &uUsedEventIdx, sizeof(uUsedEventIdx));
184 return uUsedEventIdx;
185}
186#endif
187
188DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
189{
190 uint16_t uIdx = 0;
191 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
192 PDMDevHlpPCIPhysRead(pDevIns,
193 pVirtio->aGCPhysVirtqAvail[uVirtqNbr] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
194 &uIdx, sizeof(uIdx));
195 return uIdx;
196}
197
198DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
199{
200 uint16_t fFlags = 0;
201 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
202 PDMDevHlpPCIPhysRead(pDevIns,
203 pVirtio->aGCPhysVirtqAvail[uVirtqNbr] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
204 &fFlags, sizeof(fFlags));
205 return fFlags;
206}
207
208/** @} */
209
210/** @name Accessors for virtq used ring
211 * @{
212 */
213
214#ifdef IN_RING3
215DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
216 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
217{
218 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
219 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
220 uint16_t const cVirtqItems = RT_MAX(pVirtio->uVirtqSize[uVirtqNbr], 1); /* Make sure to avoid div-by-zero. */
221 PDMDevHlpPCIPhysWrite(pDevIns,
222 pVirtio->aGCPhysVirtqUsed[uVirtqNbr] + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
223 &elem, sizeof(elem));
224}
225
226DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, uint16_t fFlags)
227{
228 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
229 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
230 PDMDevHlpPCIPhysWrite(pDevIns,
231 pVirtio->aGCPhysVirtqUsed[uVirtqNbr] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
232 &fFlags, sizeof(fFlags));
233}
234#endif
235
236DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, uint16_t uIdx)
237{
238 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
239 PDMDevHlpPCIPhysWrite(pDevIns,
240 pVirtio->aGCPhysVirtqUsed[uVirtqNbr] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
241 &uIdx, sizeof(uIdx));
242}
243
244
245#ifdef IN_RING3
246DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
247{
248 uint16_t uIdx = 0;
249 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
250 PDMDevHlpPCIPhysRead(pDevIns,
251 pVirtio->aGCPhysVirtqUsed[uVirtqNbr] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
252 &uIdx, sizeof(uIdx));
253 return uIdx;
254}
255
256DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
257{
258 uint16_t fFlags = 0;
259 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
260 PDMDevHlpPCIPhysRead(pDevIns,
261 pVirtio->aGCPhysVirtqUsed[uVirtqNbr] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
262 &fFlags, sizeof(fFlags));
263 return fFlags;
264}
265
266DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, uint32_t uAvailEventIdx)
267{
268 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
269 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
270 PDMDevHlpPCIPhysWrite(pDevIns,
271 pVirtio->aGCPhysVirtqUsed[uVirtqNbr] + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtio->uVirtqSize[uVirtqNbr]]),
272 &uAvailEventIdx, sizeof(uAvailEventIdx));
273}
274#endif
275
276DECLINLINE(uint16_t) virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQSTATE pVirtqState)
277{
278 uint16_t uIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtqState->uVirtqNbr);
279 uint16_t uShadow = pVirtqState->uAvailIdxShadow;
280
281 uint16_t uDelta;
282 if (uIdx < uShadow)
283 uDelta = (uIdx + VIRTQ_MAX_ENTRIES) - uShadow;
284 else
285 uDelta = uIdx - uShadow;
286
287 LogFunc(("%s has %u %s (idx=%u shadow=%u)\n",
288 VIRTQNAME(pVirtio, pVirtqState->uVirtqNbr), uDelta, uDelta == 1 ? "entry" : "entries",
289 uIdx, uShadow));
290
291 return uDelta;
292}
293/**
294 * Get count of new (e.g. pending) elements in available ring.
295 *
296 * @param pDevIns The device instance.
297 * @param pVirtio Pointer to the shared virtio state.
298 * @param uVirtqNbr Virtq number
299 *
300 * @returns how many entries have been added to ring as a delta of the consumer's
301 * avail index and the queue's guest-side current avail index.
302 */
303uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
304{
305 if (!IS_DRIVER_OK(pVirtio) || !pVirtio->uVirtqEnable[uVirtqNbr])
306 {
307 LogRelFunc(("Driver not ready or queue not enabled\n"));
308 return 0;
309 }
310 return virtioCoreVirtqAvailBufCount(pDevIns, pVirtio, &pVirtio->aVirtqState[uVirtqNbr]);
311}
312
313
314/** @} */
315
316void virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
317{
318 AssertPtr(pGcSgBuf);
319 Assert( (cSegs > 0 && VALID_PTR(paSegs)) || (!cSegs && !paSegs));
320 Assert(cSegs < (~(unsigned)0 >> 1));
321
322 pGcSgBuf->paSegs = paSegs;
323 pGcSgBuf->cSegs = (unsigned)cSegs;
324 pGcSgBuf->idxSeg = 0;
325 if (cSegs && paSegs)
326 {
327 pGcSgBuf->GCPhysCur = paSegs[0].GCPhys;
328 pGcSgBuf->cbSegLeft = paSegs[0].cbSeg;
329 }
330 else
331 {
332 pGcSgBuf->GCPhysCur = 0;
333 pGcSgBuf->cbSegLeft = 0;
334 }
335}
336
337static RTGCPHYS virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
338{
339 size_t cbData;
340 RTGCPHYS pGcBuf;
341
342 /* Check that the S/G buffer has memory left. */
343 if (RT_LIKELY(pGcSgBuf->idxSeg < pGcSgBuf->cSegs && pGcSgBuf->cbSegLeft))
344 { /* likely */ }
345 else
346 {
347 *pcbData = 0;
348 return 0;
349 }
350
351 AssertMsg( pGcSgBuf->cbSegLeft <= 128 * _1M
352 && (RTGCPHYS)pGcSgBuf->GCPhysCur >= (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys
353 && (RTGCPHYS)pGcSgBuf->GCPhysCur + pGcSgBuf->cbSegLeft <=
354 (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys + pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg,
355 ("pGcSgBuf->idxSeg=%d pGcSgBuf->cSegs=%d pGcSgBuf->GCPhysCur=%p pGcSgBuf->cbSegLeft=%zd "
356 "pGcSgBuf->paSegs[%d].GCPhys=%p pGcSgBuf->paSegs[%d].cbSeg=%zd\n",
357 pGcSgBuf->idxSeg, pGcSgBuf->cSegs, pGcSgBuf->GCPhysCur, pGcSgBuf->cbSegLeft,
358 pGcSgBuf->idxSeg, pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys, pGcSgBuf->idxSeg,
359 pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg));
360
361 cbData = RT_MIN(*pcbData, pGcSgBuf->cbSegLeft);
362 pGcBuf = pGcSgBuf->GCPhysCur;
363 pGcSgBuf->cbSegLeft -= cbData;
364 if (!pGcSgBuf->cbSegLeft)
365 {
366 pGcSgBuf->idxSeg++;
367
368 if (pGcSgBuf->idxSeg < pGcSgBuf->cSegs)
369 {
370 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys;
371 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg;
372 }
373 *pcbData = cbData;
374 }
375 else
376 pGcSgBuf->GCPhysCur = pGcSgBuf->GCPhysCur + cbData;
377
378 return pGcBuf;
379}
380
381void virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
382{
383 AssertPtrReturnVoid(pGcSgBuf);
384
385 pGcSgBuf->idxSeg = 0;
386 if (pGcSgBuf->cSegs)
387 {
388 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[0].GCPhys;
389 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[0].cbSeg;
390 }
391 else
392 {
393 pGcSgBuf->GCPhysCur = 0;
394 pGcSgBuf->cbSegLeft = 0;
395 }
396}
397
398RTGCPHYS virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
399{
400 AssertReturn(pGcSgBuf, 0);
401
402 size_t cbLeft = cbAdvance;
403 while (cbLeft)
404 {
405 size_t cbThisAdvance = cbLeft;
406 virtioCoreGCPhysChainGet(pGcSgBuf, &cbThisAdvance);
407 if (!cbThisAdvance)
408 break;
409
410 cbLeft -= cbThisAdvance;
411 }
412 return cbAdvance - cbLeft;
413}
414
415RTGCPHYS virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
416{
417 AssertReturn(pGcSgBuf, 0);
418 AssertPtrReturn(pcbSeg, 0);
419
420 if (!*pcbSeg)
421 *pcbSeg = pGcSgBuf->cbSegLeft;
422
423 return virtioCoreGCPhysChainGet(pGcSgBuf, pcbSeg);
424}
425
426size_t virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf)
427{
428 size_t cb = 0;
429 unsigned i = pGcSgBuf->cSegs;
430 while (i-- > 0)
431 cb += pGcSgBuf->paSegs[i].cbSeg;
432 return cb;
433 }
434
435#ifdef IN_RING3
436
437/** API Function: See header file*/
438void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp)
439{
440 static struct
441 {
442 uint64_t fFeatureBit;
443 const char *pcszDesc;
444 } const s_aFeatures[] =
445 {
446 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
447 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
448 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },
449 };
450
451#define MAXLINE 80
452 /* Display as a single buf to prevent interceding log messages */
453 uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132;
454 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
455 Assert(pszBuf);
456 char *cp = pszBuf;
457 for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i)
458 {
459 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
460 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
461 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
462 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
463 }
464 if (pHlp)
465 pHlp->pfnPrintf(pHlp, "VirtIO Core Features Configuration\n\n"
466 " Offered Accepted Feature Description\n"
467 " ------- -------- ------- -----------\n"
468 "%s\n", pszBuf);
469#ifdef LOG_ENABLED
470 else
471 Log3(("VirtIO Core Features Configuration\n\n"
472 " Offered Accepted Feature Description\n"
473 " ------- -------- ------- -----------\n"
474 "%s\n", pszBuf));
475#endif
476 RTMemFree(pszBuf);
477}
478#endif
479
480#ifdef LOG_ENABLED
481
482/** API Function: See header file */
483void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
484{
485#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
486 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
487 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
488 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
489 if (pszTitle)
490 {
491 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
492 ADJCURSOR(cbPrint);
493 }
494 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
495 {
496 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
497 ADJCURSOR(cbPrint);
498 for (uint8_t col = 0; col < 16; col++)
499 {
500 uint32_t idx = row * 16 + col;
501 if (idx >= cb)
502 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
503 else
504 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
505 ADJCURSOR(cbPrint);
506 }
507 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
508 {
509 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
510 ADJCURSOR(cbPrint);
511 }
512 *pszOut++ = '\n';
513 --cbRemain;
514 }
515 Log(("%s\n", pszBuf));
516 RTMemFree(pszBuf);
517 RT_NOREF2(uBase, pv);
518#undef ADJCURSOR
519}
520
521/* API FUnction: See header file */
522void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
523{
524#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
525 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
526 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
527 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
528 if (pszTitle)
529 {
530 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
531 ADJCURSOR(cbPrint);
532 }
533 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
534 {
535 uint8_t c;
536 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
537 ADJCURSOR(cbPrint);
538 for (uint8_t col = 0; col < 16; col++)
539 {
540 uint32_t idx = row * 16 + col;
541 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
542 if (idx >= cb)
543 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
544 else
545 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
546 ADJCURSOR(cbPrint);
547 }
548 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
549 {
550 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
551 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
552 ADJCURSOR(cbPrint);
553 }
554 *pszOut++ = '\n';
555 --cbRemain;
556 }
557 Log(("%s\n", pszBuf));
558 RTMemFree(pszBuf);
559 RT_NOREF(uBase);
560#undef ADJCURSOR
561}
562#endif /* LOG_ENABLED */
563
564/** API function: See header file */
565void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
566 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
567 int fHasIndex, uint32_t idx)
568{
569 if (!LogIs6Enabled())
570 return;
571
572 char szIdx[16];
573 if (fHasIndex)
574 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
575 else
576 szIdx[0] = '\0';
577
578 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
579 {
580 char szDepiction[64];
581 size_t cchDepiction;
582 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
583 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
584 pszMember, szIdx, uOffset, uOffset + cb - 1);
585 else
586 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
587
588 /* padding */
589 if (cchDepiction < 30)
590 szDepiction[cchDepiction++] = ' ';
591 while (cchDepiction < 30)
592 szDepiction[cchDepiction++] = '.';
593 szDepiction[cchDepiction] = '\0';
594
595 RTUINT64U uValue;
596 uValue.u = 0;
597 memcpy(uValue.au8, pv, cb);
598 Log6(("%-23s: Guest %s %s %#0*RX64\n",
599 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
600 }
601 else /* odd number or oversized access, ... log inline hex-dump style */
602 {
603 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
604 pszFunc, fWrite ? "wrote" : "read ", pszMember,
605 szIdx, uOffset, uOffset + cb, cb, pv));
606 }
607 RT_NOREF2(fWrite, pszFunc);
608}
609
610/**
611 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
612 * keep the output clean during multi-threaded activity)
613 */
614DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
615{
616
617#define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
618
619 memset(pszBuf, 0, uSize);
620 size_t len;
621 char *cp = pszBuf;
622 char *sep = (char *)"";
623
624 if (bStatus == 0) {
625 RTStrPrintf(cp, uSize, "RESET");
626 return;
627 }
628 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
629 {
630 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
631 ADJCURSOR(len);
632 }
633 if (bStatus & VIRTIO_STATUS_DRIVER)
634 {
635 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
636 ADJCURSOR(len);
637 }
638 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
639 {
640 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
641 ADJCURSOR(len);
642 }
643 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
644 {
645 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
646 ADJCURSOR(len);
647 }
648 if (bStatus & VIRTIO_STATUS_FAILED)
649 {
650 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
651 ADJCURSOR(len);
652 }
653 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
654 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
655
656#undef ADJCURSOR
657}
658
659int virtioCoreVirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, const char *pcszName)
660{
661 LogFunc(("%s\n", pcszName));
662 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
663 pVirtqState->uVirtqNbr = uVirtqNbr;
664 pVirtqState->uAvailIdxShadow = 0;
665 pVirtqState->uUsedIdxShadow = 0;
666 pVirtqState->fVirtqRingEventThreshold = false;
667 RTStrCopy(pVirtqState->szVirtqName, sizeof(pVirtqState->szVirtqName), pcszName);
668 return VINF_SUCCESS;
669}
670
671#ifdef IN_RING3
672
673/** API Fuunction: See header file */
674void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtqNbr)
675{
676 RT_NOREF(pszArgs);
677 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
678 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
679
680 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
681// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
682
683 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, uVirtqNbr);
684 uint16_t uAvailIdxShadow = pVirtqState->uAvailIdxShadow;
685
686 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, uVirtqNbr);
687 uint16_t uUsedIdxShadow = pVirtqState->uUsedIdxShadow;
688
689 PVIRTQBUF pVirtqBuf = NULL;
690
691 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtqState);
692
693 LogFunc(("%s, empty = %s\n", VIRTQNAME(pVirtio, uVirtqNbr), fEmpty ? "true" : "false"));
694
695 int cSendSegs = 0, cReturnSegs = 0;
696 if (!fEmpty)
697 {
698 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtqNbr, &pVirtqBuf);
699 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
700 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
701 }
702
703 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, uVirtqNbr) & VIRTQ_AVAIL_F_NO_INTERRUPT;
704 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, uVirtqNbr) & VIRTQ_USED_F_NO_NOTIFY;
705
706
707 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtio->uVirtqEnable[uVirtqNbr] ? "true" : "false");
708 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtio->uVirtqSize[uVirtqNbr]);
709 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtio->uVirtqNotifyOff[uVirtqNbr]);
710 if (pVirtio->fMsiSupport)
711 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtio->uVirtqMsixVector[uVirtqNbr]);
712 pHlp->pfnPrintf(pHlp, "\n");
713 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
714 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
715 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
716 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
717 pHlp->pfnPrintf(pHlp, "\n");
718 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
719 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
720 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
721 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
722 pHlp->pfnPrintf(pHlp, "\n");
723 if (!fEmpty)
724 {
725 pHlp->pfnPrintf(pHlp, " desc chain:\n");
726 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
727 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
728 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
729 pHlp->pfnPrintf(pHlp, "\n");
730 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
731 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
732 if (cSendSegs)
733 {
734 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
735 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
736 }
737 pHlp->pfnPrintf(pHlp, "\n");
738 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
739 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
740 if (cReturnSegs)
741 {
742 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
743 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
744 }
745 } else
746 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
747 pHlp->pfnPrintf(pHlp, "\n");
748
749}
750
751
752/** API Function: See header file */
753int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
754 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
755{
756 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
757 *ppVirtqBuf = NULL;
758
759 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
760
761 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
762
763 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uVirtqEnable[uVirtqNbr],
764 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
765
766 uint16_t uDescIdx = uHeadIdx;
767
768 Log6Func(("%s DESC CHAIN: (head) desc_idx=%u\n", pVirtqState->szVirtqName, uHeadIdx));
769 RT_NOREF(pVirtqState);
770
771 /*
772 * Allocate and initialize the descriptor chain structure.
773 */
774 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
775 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
776 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
777 pVirtqBuf->cRefs = 1;
778 pVirtqBuf->uHeadIdx = uHeadIdx;
779 pVirtqBuf->uVirtqNbr = uVirtqNbr;
780 *ppVirtqBuf = pVirtqBuf;
781
782 /*
783 * Gather segments.
784 */
785 VIRTQ_DESC_T desc;
786
787 uint32_t cbIn = 0;
788 uint32_t cbOut = 0;
789 uint32_t cSegsIn = 0;
790 uint32_t cSegsOut = 0;
791 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
792 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
793
794 do
795 {
796 PVIRTIOSGSEG pSeg;
797
798 /*
799 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
800 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
801 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
802 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
803 */
804 if (cSegsIn + cSegsOut >= VIRTQ_MAX_ENTRIES)
805 {
806 static volatile uint32_t s_cMessages = 0;
807 static volatile uint32_t s_cThreshold = 1;
808 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
809 {
810 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
811 if (ASMAtomicReadU32(&s_cMessages) != 1)
812 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
813 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
814 }
815 break;
816 }
817 RT_UNTRUSTED_VALIDATED_FENCE();
818
819 virtioReadDesc(pDevIns, pVirtio, uVirtqNbr, uDescIdx, &desc);
820
821 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
822 {
823 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", VIRTQNAME(pVirtio, uVirtqNbr), uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
824 cbIn += desc.cb;
825 pSeg = &paSegsIn[cSegsIn++];
826 }
827 else
828 {
829 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", VIRTQNAME(pVirtio, uVirtqNbr), uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
830 cbOut += desc.cb;
831 pSeg = &paSegsOut[cSegsOut++];
832 if (LogIs11Enabled())
833 {
834 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
835 Log(("\n"));
836 }
837 }
838
839 pSeg->GCPhys = desc.GCPhysBuf;
840 pSeg->cbSeg = desc.cb;
841
842 uDescIdx = desc.uDescIdxNext;
843 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
844
845 /*
846 * Add segments to the descriptor chain structure.
847 */
848 if (cSegsIn)
849 {
850 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
851 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
852 pVirtqBuf->cbPhysReturn = cbIn;
853 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
854 }
855
856 if (cSegsOut)
857 {
858 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
859 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
860 pVirtqBuf->cbPhysSend = cbOut;
861 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
862 }
863
864 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
865 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n", pVirtqState->szVirtqName, cSegsOut, cbOut, cSegsIn, cbIn));
866
867 return VINF_SUCCESS;
868}
869
870/** API Function: See header file */
871uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
872{
873 AssertReturn(pVirtqBuf, UINT32_MAX);
874 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
875 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
876 Assert(cRefs > 1);
877 Assert(cRefs < 16);
878 return cRefs;
879}
880
881
882/** API Function: See header file */
883uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
884{
885 if (!pVirtqBuf)
886 return 0;
887 AssertReturn(pVirtqBuf, 0);
888 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
889 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
890 Assert(cRefs < 16);
891 if (cRefs == 0)
892 {
893 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
894 RTMemFree(pVirtqBuf);
895 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
896 }
897 return cRefs;
898}
899
900/** API Function: See header file */
901void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
902{
903 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
904}
905
906void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable)
907{
908 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
909 {
910 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, uVirtqNbr);
911
912 if (fEnable)
913 fFlags &= ~ VIRTQ_USED_F_NO_NOTIFY;
914 else
915 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
916
917 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, uVirtqNbr, fFlags);
918 }
919}
920
921/** API function: See Header file */
922void virtioCoreResetAll(PVIRTIOCORE pVirtio)
923{
924 LogFunc(("\n"));
925 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
926 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
927 {
928 pVirtio->fGenUpdatePending = true;
929 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
930 }
931}
932
933/** API function: See Header file */
934int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
935 PPVIRTQBUF ppVirtqBuf)
936{
937 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtqNbr, ppVirtqBuf, false);
938}
939
940/** API function: See Header file */
941int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
942{
943 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
944 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
945
946 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uVirtqEnable[uVirtqNbr],
947 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
948
949 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtqState))
950 return VERR_NOT_AVAILABLE;
951
952 Log6Func(("%s avail shadow idx: %u\n", pVirtqState->szVirtqName, pVirtqState->uAvailIdxShadow));
953 pVirtqState->uAvailIdxShadow++;
954
955 return VINF_SUCCESS;
956}
957
958/** API function: See Header file */
959int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
960 PPVIRTQBUF ppVirtqBuf, bool fRemove)
961{
962 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
963
964 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtqState))
965 return VERR_NOT_AVAILABLE;
966
967 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, uVirtqNbr, pVirtqState->uAvailIdxShadow);
968
969 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
970 virtioWriteUsedAvailEvent(pDevIns,pVirtio, uVirtqNbr, pVirtqState->uAvailIdxShadow + 1);
971
972 if (fRemove)
973 pVirtqState->uAvailIdxShadow++;
974
975 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtqNbr, uHeadIdx, ppVirtqBuf);
976 return rc;
977}
978
979/** API function: See Header file */
980int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
981 PVIRTQBUF pVirtqBuf, bool fFence)
982{
983 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
984 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
985 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
986
987 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
988 Assert(pVirtqBuf->cRefs > 0);
989
990 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
991
992 Log6Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
993 VIRTQNAME(pVirtio, uVirtqNbr), virtioReadUsedRingIdx(pDevIns, pVirtio, uVirtqNbr)));
994
995 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
996
997 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
998
999 if (pSgVirtReturn)
1000 {
1001 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
1002 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
1003 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
1004 virtioCoreGCPhysChainReset(pSgPhysReturn); /* Reset ptr because req data may have already been written */
1005 while (cbRemain)
1006 {
1007 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
1008 Assert(cbCopy > 0);
1009 PDMDevHlpPhysWrite(pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
1010 RTSgBufAdvance(pSgVirtReturn, cbCopy);
1011 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
1012 cbRemain -= cbCopy;
1013 }
1014
1015 if (fFence)
1016 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
1017
1018 Assert(!(cbCopy >> 32));
1019 }
1020
1021 /* If this write-ahead crosses threshold where the driver wants to get an event flag it */
1022 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1023 if (pVirtqState->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, uVirtqNbr))
1024 pVirtqState->fVirtqRingEventThreshold = true;
1025
1026 /*
1027 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
1028 * That will be done with a subsequent client call to virtioCoreVirtqSyncUsedRing() */
1029 virtioWriteUsedElem(pDevIns, pVirtio, uVirtqNbr, pVirtqState->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
1030
1031 if (pSgVirtReturn)
1032 Log6Func((".... Copied %zu bytes in %d segs to %u byte buffer, residual=%zu\n",
1033 cbTotal - cbRemain, pSgVirtReturn->cSegs, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
1034
1035 Log6Func(("Write ahead used_idx=%u, %s used_idx=%u\n",
1036 pVirtqState->uUsedIdxShadow, VIRTQNAME(pVirtio, uVirtqNbr), virtioReadUsedRingIdx(pDevIns, pVirtio, uVirtqNbr)));
1037
1038 return VINF_SUCCESS;
1039}
1040
1041/** API function: See Header file */
1042void virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
1043{
1044 uint8_t *pb = (uint8_t *)pv;
1045 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb);
1046 while (cbLim)
1047 {
1048 size_t cbSeg = cbLim;
1049 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg);
1050 PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
1051 pb += cbSeg;
1052 cbLim -= cbSeg;
1053 pVirtqBuf->cbPhysSend -= cbSeg;
1054 }
1055 LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n",
1056 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtqNbr),
1057 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn));
1058}
1059
1060
1061/** API function: See Header file */
1062void virtioCoreR3VirtqBufDrain(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
1063{
1064 uint8_t *pb = (uint8_t *)pv;
1065 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysSend, cb);
1066 while (cbLim)
1067 {
1068 size_t cbSeg = cbLim;
1069 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysSend, &cbSeg);
1070 PDMDevHlpPCIPhysRead(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
1071 pb += cbSeg;
1072 cbLim -= cbSeg;
1073 pVirtqBuf->cbPhysSend -= cbSeg;
1074 }
1075 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
1076 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtqNbr),
1077 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));
1078}
1079
1080#endif /* IN_RING3 */
1081
1082/** API function: See Header file */
1083int virtioCoreVirtqSyncUsedRing(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
1084{
1085 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
1086 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
1087
1088 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uVirtqEnable[uVirtqNbr],
1089 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1090
1091 Log6Func(("Updating %s used_idx to %u\n",
1092 VIRTQNAME(pVirtio, uVirtqNbr), pVirtqState->uUsedIdxShadow));
1093
1094 virtioWriteUsedRingIdx(pDevIns, pVirtio, uVirtqNbr, pVirtqState->uUsedIdxShadow);
1095 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtqNbr);
1096
1097 return VINF_SUCCESS;
1098}
1099
1100
1101/**
1102 */
1103static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, uint16_t uNotifyIdx)
1104{
1105
1106 PVIRTIOCORECC pVirtioCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1107
1108 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtqNbr and uNotifyIdx should match.
1109 * Disregarding this notification may cause throughput to stop, however there's no way to know
1110 * which was queue was intended for wake-up if the two parameters disagree. */
1111
1112 AssertMsg(uNotifyIdx == uVirtqNbr,
1113 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
1114 uVirtqNbr, uNotifyIdx));
1115 RT_NOREF(uNotifyIdx);
1116
1117 AssertReturnVoid(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
1118 Log6Func(("%s (desc chains: %u)\n",
1119 pVirtio->aVirtqState[uVirtqNbr].szVirtqName,
1120 virtioCoreVirtqAvailBufCount(pDevIns, pVirtio, uVirtqNbr)));
1121
1122 /* Inform client */
1123 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtqNbr);
1124}
1125
1126/**
1127 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1128 * the specified virtq, depending on the interrupt configuration of the device
1129 * and depending on negotiated and realtime constraints flagged by the guest driver.
1130 *
1131 * See VirtIO 1.0 specification (section 2.4.7).
1132 *
1133 * @param pDevIns The device instance.
1134 * @param pVirtio Pointer to the shared virtio state.
1135 * @param uVirtqNbr Virtq to check for guest interrupt handling preference
1136 */
1137static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
1138{
1139
1140 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
1141 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
1142
1143 if (!IS_DRIVER_OK(pVirtio))
1144 {
1145 LogFunc(("Guest driver not in ready state.\n"));
1146 return;
1147 }
1148
1149 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1150 {
1151 if (pVirtqState->fVirtqRingEventThreshold)
1152 {
1153#ifdef IN_RING3
1154 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1155 VIRTQNAME(pVirtio, uVirtqNbr), (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, uVirtqNbr)));
1156#endif
1157 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtio->uVirtqMsixVector[uVirtqNbr]);
1158 pVirtqState->fVirtqRingEventThreshold = false;
1159 return;
1160 }
1161#ifdef IN_RING3
1162 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1163 VIRTQNAME(pVirtio, uVirtqNbr),(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, uVirtqNbr), pVirtqState->uUsedIdxShadow));
1164#endif
1165 }
1166 else
1167 {
1168 /** If guest driver hasn't suppressed interrupts, interrupt */
1169 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, uVirtqNbr) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1170 {
1171 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtio->uVirtqMsixVector[uVirtqNbr]);
1172 return;
1173 }
1174 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n",
1175 VIRTQNAME(pVirtio, uVirtqNbr)));
1176 }
1177}
1178
1179/**
1180 * Raise interrupt or MSI-X
1181 *
1182 * @param pDevIns The device instance.
1183 * @param pVirtio Pointer to the shared virtio state.
1184 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1185 * @param uVec MSI-X vector, if enabled
1186 */
1187static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
1188{
1189 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1190 Log6Func(("reason: buffer added to 'used' ring.\n"));
1191 else
1192 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1193 Log6Func(("reason: device config change\n"));
1194
1195 if (!pVirtio->fMsiSupport)
1196 {
1197 pVirtio->uISR |= uCause;
1198 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1199 }
1200 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1201 PDMDevHlpPCISetIrq(pDevIns, uMsixVector, 1);
1202 return VINF_SUCCESS;
1203}
1204
1205/**
1206 * Lower interrupt (Called when guest reads ISR and when resetting)
1207 *
1208 * @param pDevIns The device instance.
1209 */
1210static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixVector)
1211{
1212 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1213 if (!pVirtio->fMsiSupport)
1214 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1215 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1216 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1217}
1218
1219#ifdef IN_RING3
1220static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
1221{
1222 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqState));
1223 PVIRTQSTATE pVirtqState = &pVirtio->aVirtqState[uVirtqNbr];
1224 pVirtqState->uAvailIdxShadow = 0;
1225 pVirtqState->uUsedIdxShadow = 0;
1226 pVirtqState->fVirtqRingEventThreshold = false;
1227 pVirtio->uVirtqEnable[uVirtqNbr] = false;
1228 pVirtio->uVirtqSize[uVirtqNbr] = VIRTQ_MAX_ENTRIES;
1229 pVirtio->uVirtqNotifyOff[uVirtqNbr] = uVirtqNbr;
1230 pVirtio->uVirtqMsixVector[uVirtqNbr] = uVirtqNbr + 2;
1231 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1232 pVirtio->uVirtqMsixVector[uVirtqNbr] = VIRTIO_MSI_NO_VECTOR;
1233
1234 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtio->uVirtqMsixVector[uVirtqNbr]);
1235}
1236
1237static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1238{
1239 Log2Func(("\n"));
1240 pVirtio->uDeviceFeaturesSelect = 0;
1241 pVirtio->uDriverFeaturesSelect = 0;
1242 pVirtio->uConfigGeneration = 0;
1243 pVirtio->fDeviceStatus = 0;
1244 pVirtio->uISR = 0;
1245
1246 if (!pVirtio->fMsiSupport)
1247 virtioLowerInterrupt(pDevIns, 0);
1248 else
1249 {
1250 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1251 for (int i = 0; i < VIRTQ_MAX_CNT; i++)
1252 {
1253 virtioLowerInterrupt(pDevIns, pVirtio->uVirtqMsixVector[i]);
1254 pVirtio->uVirtqMsixVector[i];
1255 }
1256 }
1257
1258 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1259 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1260
1261 for (uint16_t uVirtqNbr = 0; uVirtqNbr < VIRTQ_MAX_CNT; uVirtqNbr++)
1262 virtioResetVirtq(pVirtio, uVirtqNbr);
1263}
1264
1265/**
1266 * Invoked by this implementation when guest driver resets the device.
1267 * The driver itself will not until the device has read the status change.
1268 */
1269static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1270{
1271 LogFunc(("Guest reset the device\n"));
1272
1273 /* Let the client know */
1274 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0);
1275 virtioResetDevice(pDevIns, pVirtio);
1276}
1277#endif /* IN_RING3 */
1278
1279/**
1280 * Handle accesses to Common Configuration capability
1281 *
1282 * @returns VBox status code
1283 *
1284 * @param pDevIns The device instance.
1285 * @param pVirtio Pointer to the shared virtio state.
1286 * @param pVirtioCC Pointer to the current context virtio state.
1287 * @param fWrite Set if write access, clear if read access.
1288 * @param uOffsetOfAccess The common configuration capability offset.
1289 * @param cb Number of bytes to read or write
1290 * @param pv Pointer to location to write to or read from
1291 */
1292static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1293 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1294{
1295 uint16_t uVirtqNbr = pVirtio->uVirtqSelect;
1296 int rc = VINF_SUCCESS;
1297 uint64_t val;
1298 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1299 {
1300 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1301 {
1302 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1303 * yet the linux driver attempts to write/read it back twice */
1304 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1305 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1306// return VINF_SUCCESS;
1307 return VINF_IOM_MMIO_UNUSED_00; /** @todo which is right this or VINF_SUCCESS? */
1308 }
1309 else /* Guest READ pCommonCfg->uDeviceFeatures */
1310 {
1311 switch (pVirtio->uDeviceFeaturesSelect)
1312 {
1313 case 0:
1314 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1315 memcpy(pv, &val, cb);
1316 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1317 break;
1318 case 1:
1319 val = pVirtio->uDeviceFeatures >> 32;
1320 memcpy(pv, &val, cb);
1321 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1322 break;
1323 default:
1324 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1325 pVirtio->uDeviceFeaturesSelect));
1326 return VINF_IOM_MMIO_UNUSED_00;
1327 }
1328 }
1329 }
1330 else
1331 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1332 {
1333 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1334 {
1335 switch (pVirtio->uDriverFeaturesSelect)
1336 {
1337 case 0:
1338 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1339 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1340 break;
1341 case 1:
1342 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1343 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1344 break;
1345 default:
1346 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1347 pVirtio->uDriverFeaturesSelect));
1348 return VINF_SUCCESS;
1349 }
1350 }
1351 /* Guest READ pCommonCfg->udriverFeatures */
1352 {
1353 switch (pVirtio->uDriverFeaturesSelect)
1354 {
1355 case 0:
1356 val = pVirtio->uDriverFeatures & 0xffffffff;
1357 memcpy(pv, &val, cb);
1358 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1359 break;
1360 case 1:
1361 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1362 memcpy(pv, &val, cb);
1363 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1364 break;
1365 default:
1366 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1367 pVirtio->uDriverFeaturesSelect));
1368 return VINF_IOM_MMIO_UNUSED_00;
1369 }
1370 }
1371 }
1372 else
1373 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1374 {
1375 if (fWrite)
1376 {
1377 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1378 return VINF_SUCCESS;
1379 }
1380 *(uint16_t *)pv = VIRTQ_MAX_CNT;
1381 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1382 }
1383 else
1384 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1385 {
1386 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1387 {
1388 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1389 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1390
1391 if (LogIs7Enabled())
1392 {
1393 char szOut[80] = { 0 };
1394 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1395 LogFunc(("Guest wrote fDeviceStatus ................ (%s)\n", szOut));
1396 }
1397 bool const fStatusChanged =
1398 (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) != (pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1399
1400 if (fDeviceReset || fStatusChanged)
1401 {
1402#ifdef IN_RING0
1403 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1404 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1405 Log6Func(("RING0 => RING3 (demote)\n"));
1406 return VINF_IOM_R3_MMIO_WRITE;
1407#endif
1408 }
1409
1410#ifdef IN_RING3
1411 /*
1412 * Notify client only if status actually changed from last time and when we're reset.
1413 */
1414 if (fDeviceReset)
1415 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1416
1417 if (fStatusChanged)
1418 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1419#endif
1420 /*
1421 * Save the current status for the next write so we can see what changed.
1422 */
1423 pVirtio->uPrevDeviceStatus = pVirtio->fDeviceStatus;
1424 }
1425 else /* Guest READ pCommonCfg->fDeviceStatus */
1426 {
1427 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1428
1429 if (LogIs7Enabled())
1430 {
1431 char szOut[80] = { 0 };
1432 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1433 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1434 }
1435 }
1436 }
1437 else
1438 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1439 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1440 else
1441 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1442 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1443 else
1444 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1445 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1446 else
1447 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1448 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1449 else
1450 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1451 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1452 else
1453 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1454 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uVirtqSize, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1455 else
1456 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqMsixVector, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1457 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uVirtqMsixVector, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1458 else
1459 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1460 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uVirtqEnable, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1461 else
1462 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqNotifyOff, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1463 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uVirtqNotifyOff, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1464 else
1465 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( aGCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1466 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( aGCPhysVirtqDesc, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1467 else
1468 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( aGCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1469 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( aGCPhysVirtqAvail, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1470 else
1471 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( aGCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1472 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( aGCPhysVirtqUsed, uVirtqNbr, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1473 else
1474 {
1475 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1476 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1477 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1478 }
1479
1480#ifndef IN_RING3
1481 RT_NOREF(pDevIns, pVirtioCC);
1482#endif
1483 return rc;
1484}
1485
1486/**
1487 * @callback_method_impl{FNIOMMMIONEWREAD,
1488 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1489 *
1490 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1491 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1492 * of 1, 2 or 4 bytes, only.
1493 *
1494 */
1495static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1496{
1497 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1498 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1499 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1500 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1501
1502
1503 uint32_t uOffset;
1504 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1505 {
1506#ifdef IN_RING3
1507 /*
1508 * Callback to client to manage device-specific configuration.
1509 */
1510 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1511
1512 /*
1513 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1514 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1515 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1516 */
1517 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1518 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1519 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1520
1521 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1522
1523 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1524 {
1525 ++pVirtio->uConfigGeneration;
1526 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1527 pVirtio->uConfigGeneration,
1528 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1529 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1530 pVirtio->fGenUpdatePending = false;
1531 }
1532
1533 virtioLowerInterrupt(pDevIns, 0);
1534 return rcStrict;
1535#else
1536 return VINF_IOM_R3_MMIO_READ;
1537#endif
1538 }
1539
1540 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1541 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1542
1543 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1544 {
1545 *(uint8_t *)pv = pVirtio->uISR;
1546 Log6Func(("Read and clear ISR\n"));
1547 pVirtio->uISR = 0; /* VirtIO specification requires reads of ISR to clear it */
1548 virtioLowerInterrupt(pDevIns, 0);
1549 return VINF_SUCCESS;
1550 }
1551
1552 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1553 return VINF_IOM_MMIO_UNUSED_00;
1554}
1555
1556/**
1557 * @callback_method_impl{FNIOMMMIONEWREAD,
1558 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1559 *
1560 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1561 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1562 * of 1, 2 or 4 bytes, only.
1563 */
1564static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1565{
1566 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1567 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1568
1569 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1570
1571 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1572 uint32_t uOffset;
1573 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1574 {
1575#ifdef IN_RING3
1576 /*
1577 * Foreward this MMIO write access for client to deal with.
1578 */
1579 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1580#else
1581 return VINF_IOM_R3_MMIO_WRITE;
1582#endif
1583 }
1584
1585 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1586 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1587
1588 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1589 {
1590 pVirtio->uISR = *(uint8_t *)pv;
1591 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1592 pVirtio->uISR & 0xff,
1593 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1594 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1595 return VINF_SUCCESS;
1596 }
1597
1598 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1599 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1600 {
1601 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1602 return VINF_SUCCESS;
1603 }
1604
1605 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1606 return VINF_SUCCESS;
1607}
1608
1609#ifdef IN_RING3
1610
1611/**
1612 * @callback_method_impl{FNPCICONFIGREAD}
1613 */
1614static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1615 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1616{
1617 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1618 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1619 RT_NOREF(pPciDev);
1620
1621 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
1622 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
1623 if (uAddress == pVirtio->uPciCfgDataOff)
1624 {
1625 /*
1626 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1627 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1628 * (the virtio_pci_cfg_cap capability), and access data items.
1629 */
1630 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1631 uint32_t uLength = pPciCap->uLength;
1632
1633 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1634 || cb != uLength
1635 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1636 {
1637 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1638 *pu32Value = UINT32_MAX;
1639 return VINF_SUCCESS;
1640 }
1641
1642 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
1643 Log7Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d -> %Rrc\n",
1644 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1645 return rcStrict;
1646 }
1647 return VINF_PDM_PCI_DO_DEFAULT;
1648}
1649
1650/**
1651 * @callback_method_impl{FNPCICONFIGWRITE}
1652 */
1653static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1654 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1655{
1656 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1657 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1658 RT_NOREF(pPciDev);
1659
1660 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
1661 if (uAddress == pVirtio->uPciCfgDataOff)
1662 {
1663 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1664 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1665 * (the virtio_pci_cfg_cap capability), and access data items. */
1666
1667 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1668 uint32_t uLength = pPciCap->uLength;
1669
1670 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1671 || cb != uLength
1672 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1673 {
1674 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1675 return VINF_SUCCESS;
1676 }
1677
1678 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
1679 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1680 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1681 return rcStrict;
1682 }
1683 return VINF_PDM_PCI_DO_DEFAULT;
1684}
1685
1686
1687/*********************************************************************************************************************************
1688* Saved state. *
1689*********************************************************************************************************************************/
1690
1691/**
1692 * Called from the FNSSMDEVSAVEEXEC function of the device.
1693 *
1694 * @param pVirtio Pointer to the shared virtio state.
1695 * @param pHlp The ring-3 device helpers.
1696 * @param pSSM The saved state handle.
1697 * @returns VBox status code.
1698 */
1699int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1700{
1701 LogFunc(("\n"));
1702 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1703 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1704
1705 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1706 pHlp->pfnSSMPutU8(pSSM, pVirtio->fDeviceStatus);
1707 pHlp->pfnSSMPutU8(pSSM, pVirtio->uConfigGeneration);
1708 pHlp->pfnSSMPutU8(pSSM, pVirtio->uPciCfgDataOff);
1709 pHlp->pfnSSMPutU8(pSSM, pVirtio->uISR);
1710 pHlp->pfnSSMPutU16(pSSM, pVirtio->uVirtqSelect);
1711 pHlp->pfnSSMPutU32(pSSM, pVirtio->uDeviceFeaturesSelect);
1712 pHlp->pfnSSMPutU32(pSSM, pVirtio->uDriverFeaturesSelect);
1713 pHlp->pfnSSMPutU64(pSSM, pVirtio->uDriverFeatures);
1714
1715 for (uint32_t i = 0; i < VIRTQ_MAX_CNT; i++)
1716 {
1717 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysVirtqDesc[i]);
1718 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysVirtqAvail[i]);
1719 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysVirtqUsed[i]);
1720 pHlp->pfnSSMPutU16(pSSM, pVirtio->uVirtqNotifyOff[i]);
1721 pHlp->pfnSSMPutU16(pSSM, pVirtio->uVirtqMsixVector[i]);
1722 pHlp->pfnSSMPutU16(pSSM, pVirtio->uVirtqEnable[i]);
1723 pHlp->pfnSSMPutU16(pSSM, pVirtio->uVirtqSize[i]);
1724 pHlp->pfnSSMPutU16(pSSM, pVirtio->aVirtqState[i].uAvailIdxShadow);
1725 pHlp->pfnSSMPutU16(pSSM, pVirtio->aVirtqState[i].uUsedIdxShadow);
1726 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtio->aVirtqState[i].szVirtqName, 32);
1727 AssertRCReturn(rc, rc);
1728 }
1729
1730 return VINF_SUCCESS;
1731}
1732
1733/**
1734 * Called from the FNSSMDEVLOADEXEC function of the device.
1735 *
1736 * @param pVirtio Pointer to the shared virtio state.
1737 * @param pHlp The ring-3 device helpers.
1738 * @param pSSM The saved state handle.
1739 * @returns VBox status code.
1740 */
1741int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1742{
1743 LogFunc(("\n"));
1744 /*
1745 * Check the marker and (embedded) version number.
1746 */
1747 uint64_t uMarker = 0;
1748 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1749 AssertRCReturn(rc, rc);
1750 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1751 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1752 N_("Expected marker value %#RX64 found %#RX64 instead"),
1753 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1754 uint32_t uVersion = 0;
1755 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1756 AssertRCReturn(rc, rc);
1757 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1758 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1759 N_("Unsupported virtio version: %u"), uVersion);
1760 /*
1761 * Load the state.
1762 */
1763 pHlp->pfnSSMGetBool(pSSM, &pVirtio->fGenUpdatePending);
1764 pHlp->pfnSSMGetU8(pSSM, &pVirtio->fDeviceStatus);
1765 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uConfigGeneration);
1766 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uPciCfgDataOff);
1767 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uISR);
1768 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uVirtqSelect);
1769 pHlp->pfnSSMGetU32(pSSM, &pVirtio->uDeviceFeaturesSelect);
1770 pHlp->pfnSSMGetU32(pSSM, &pVirtio->uDriverFeaturesSelect);
1771 pHlp->pfnSSMGetU64(pSSM, &pVirtio->uDriverFeatures);
1772
1773 for (uint32_t i = 0; i < VIRTQ_MAX_CNT; i++)
1774 {
1775 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysVirtqDesc[i]);
1776 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysVirtqAvail[i]);
1777 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysVirtqUsed[i]);
1778 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uVirtqNotifyOff[i]);
1779 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uVirtqMsixVector[i]);
1780 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uVirtqEnable[i]);
1781 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uVirtqSize[i]);
1782 pHlp->pfnSSMGetU16(pSSM, &pVirtio->aVirtqState[i].uAvailIdxShadow);
1783 pHlp->pfnSSMGetU16(pSSM, &pVirtio->aVirtqState[i].uUsedIdxShadow);
1784 rc = pHlp->pfnSSMGetMem(pSSM, pVirtio->aVirtqState[i].szVirtqName,
1785 sizeof(pVirtio->aVirtqState[i].szVirtqName));
1786 AssertRCReturn(rc, rc);
1787 }
1788
1789 return VINF_SUCCESS;
1790}
1791
1792
1793/*********************************************************************************************************************************
1794* Device Level *
1795*********************************************************************************************************************************/
1796
1797/**
1798 * This must be called by the client to handle VM state changes
1799 * after the client takes care of its device-specific tasks for the state change.
1800 * (i.e. Reset, suspend, power-off, resume)
1801 *
1802 * @param pDevIns The device instance.
1803 * @param pVirtio Pointer to the shared virtio state.
1804 */
1805void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
1806{
1807 LogFunc(("State changing to %s\n",
1808 virtioCoreGetStateChangeText(enmState)));
1809
1810 switch(enmState)
1811 {
1812 case kvirtIoVmStateChangedReset:
1813 virtioCoreResetAll(pVirtio);
1814 break;
1815 case kvirtIoVmStateChangedSuspend:
1816 break;
1817 case kvirtIoVmStateChangedPowerOff:
1818 break;
1819 case kvirtIoVmStateChangedResume:
1820 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, 0 /* uVirtqNbr */);
1821 break;
1822 default:
1823 LogRelFunc(("Bad enum value"));
1824 return;
1825 }
1826}
1827
1828/**
1829 * This should be called from PDMDEVREGR3::pfnDestruct.
1830 *
1831 * @param pDevIns The device instance.
1832 * @param pVirtio Pointer to the shared virtio state.
1833 * @param pVirtioCC Pointer to the ring-3 virtio state.
1834 */
1835void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1836{
1837 if (pVirtioCC->pbPrevDevSpecificCfg)
1838 {
1839 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
1840 pVirtioCC->pbPrevDevSpecificCfg = NULL;
1841 }
1842 RT_NOREF(pDevIns, pVirtio);
1843}
1844
1845
1846/**rr
1847 * Setup PCI device controller and Virtio state
1848 *
1849 * This should be called from PDMDEVREGR3::pfnConstruct.
1850 *
1851 * @param pDevIns The device instance.
1852 * @param pVirtio Pointer to the shared virtio state. This
1853 * must be the first member in the shared
1854 * device instance data!
1855 * @param pVirtioCC Pointer to the ring-3 virtio state. This
1856 * must be the first member in the ring-3
1857 * device instance data!
1858 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1859 * @param pcszInstance Device instance name (format-specifier)
1860 * @param fDevSpecificFeatures VirtIO device-specific features offered by
1861 * client
1862 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
1863 * @param pvDevSpecificCfg Address of client's dev-specific
1864 * configuration struct.
1865 */
1866int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
1867 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
1868{
1869 /*
1870 * The pVirtio state must be the first member of the shared device instance
1871 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
1872 */
1873 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1874 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
1875
1876 pVirtio->pDevInsR3 = pDevIns;
1877
1878 /*
1879 * Caller must initialize these.
1880 */
1881 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
1882 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
1883
1884#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */
1885# ifdef VBOX_WITH_MSI_DEVICES
1886 pVirtio->fMsiSupport = true;
1887# endif
1888#endif
1889
1890 /*
1891 * The host features offered include both device-specific features
1892 * and reserved feature bits (device independent)
1893 */
1894 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1895 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1896 | fDevSpecificFeatures;
1897
1898 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1899
1900 pVirtio->fDeviceStatus = 0;
1901 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
1902 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
1903 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
1904 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
1905
1906 /* Set PCI config registers (assume 32-bit mode) */
1907 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1908 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1909
1910 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
1911 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1912 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1913 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
1914 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
1915 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
1916 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
1917 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
1918 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
1919 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
1920
1921 /* Register PCI device */
1922 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1923 if (RT_FAILURE(rc))
1924 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1925
1926 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
1927 AssertRCReturn(rc, rc);
1928
1929
1930 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1931
1932#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
1933#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
1934 do { \
1935 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
1936 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
1937 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
1938 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
1939 } while (0)
1940
1941 PVIRTIO_PCI_CAP_T pCfg;
1942 uint32_t cbRegion = 0;
1943
1944 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1945 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
1946 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1947 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1948 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1949 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1950 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1951 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
1952 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1953 cbRegion += pCfg->uLength;
1954 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
1955 pVirtioCC->pCommonCfgCap = pCfg;
1956
1957 /*
1958 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
1959 * of this implementation to make each queue's uVirtqNotifyOff equal to (VirtqSelect) ordinal
1960 * value of the queue (different strategies are possible according to spec).
1961 */
1962 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1963 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1964 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1965 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1966 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1967 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1968 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
1969 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1970 pCfg->uLength = VIRTQ_MAX_CNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1971 cbRegion += pCfg->uLength;
1972 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
1973 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1974 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1975
1976 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1977 *
1978 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1979 * of spec shows it as a 32-bit field with upper bits 'reserved'
1980 * Will take spec's words more literally than the diagram for now.
1981 */
1982 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1983 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1984 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1985 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1986 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1987 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1988 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
1989 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1990 pCfg->uLength = sizeof(uint8_t);
1991 cbRegion += pCfg->uLength;
1992 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
1993 pVirtioCC->pIsrCap = pCfg;
1994
1995 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1996 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1997 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1998 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1999 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
2000 * initializing.
2001 */
2002 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
2003 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2004 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
2005 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2006 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
2007 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2008 pCfg->uBar = 0;
2009 pCfg->uOffset = 0;
2010 pCfg->uLength = 0;
2011 cbRegion += pCfg->uLength;
2012 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
2013 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
2014
2015 if (pVirtioCC->pbDevSpecificCfg)
2016 {
2017 /* Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
2018 * device-specific config fields struct and passes size to this constructor */
2019 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2020 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
2021 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2022 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2023 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2024 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2025 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
2026 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2027 pCfg->uLength = cbDevSpecificCfg;
2028 cbRegion += pCfg->uLength;
2029 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
2030 pVirtioCC->pDeviceCap = pCfg;
2031 }
2032 else
2033 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
2034
2035 if (pVirtio->fMsiSupport)
2036 {
2037 PDMMSIREG aMsiReg;
2038 RT_ZERO(aMsiReg);
2039 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
2040 aMsiReg.iMsixNextOffset = 0;
2041 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
2042 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
2043 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
2044 if (RT_FAILURE(rc))
2045 {
2046 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
2047 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
2048 pVirtio->fMsiSupport = false;
2049 }
2050 else
2051 Log2Func(("Using MSI-X for guest driver notification\n"));
2052 }
2053 else
2054 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
2055
2056 /* Set offset to first capability and enable PCI dev capabilities */
2057 PDMPciDevSetCapabilityList(pPciDev, 0x40);
2058 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
2059
2060 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s MMIO", pcszInstance);
2061 if (cbSize <= 0)
2062 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2063
2064 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
2065 * 'unknown' device-specific capability without querying the capability to figure
2066 * out size, so pad with an extra page
2067 */
2068 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE, PAGE_SIZE),
2069 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
2070 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
2071 pVirtioCC->pcszMmioName,
2072 &pVirtio->hMmioPciCap);
2073 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
2074 /*
2075 * Statistics.
2076 */
2077 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2078 "Total number of allocated descriptor chains", "DescChainsAllocated");
2079 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2080 "Total number of freed descriptor chains", "DescChainsFreed");
2081 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2082 "Total number of inbound segments", "DescChainsSegsIn");
2083 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2084 "Total number of outbound segments", "DescChainsSegsOut");
2085
2086 return VINF_SUCCESS;
2087}
2088
2089#else /* !IN_RING3 */
2090
2091/**
2092 * Sets up the core ring-0/raw-mode virtio bits.
2093 *
2094 * @returns VBox status code.
2095 * @param pDevIns The device instance.
2096 * @param pVirtio Pointer to the shared virtio state. This must be the first
2097 * member in the shared device instance data!
2098 */
2099int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
2100{
2101 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2102
2103#ifdef FUTURE_OPTIMIZATION
2104 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
2105 AssertRCReturn(rc, rc);
2106#endif
2107 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
2108 AssertRCReturn(rc, rc);
2109 return rc;
2110}
2111
2112#endif /* !IN_RING3 */
2113
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette