VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 86997

Last change on this file since 86997 was 85819, checked in by vboxsync, 4 years ago

Fix assert when guest uses raw PCI bus access to get 1, 2, 4 byte quantities in early boot

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 82.1 KB
Line 
1/* $Id: VirtioCore.cpp 85819 2020-08-18 12:58:30Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
45#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
46
47#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
48#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
49 (virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq) == 0)
50
51/**
52 * This macro returns true if the @a a_offAccess and access length (@a
53 * a_cbAccess) are within the range of the mapped capability struct described by
54 * @a a_LocCapData.
55 *
56 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
57 * @param[in] a_cbAccess Input: The access size.
58 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
59 * @param[in] a_LocCapData Input: The capability location info.
60 */
61#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
62 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
63 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
64
65
66/** Marks the start of the virtio saved state (just for sanity). */
67#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
68/** The current saved state version for the virtio core. */
69#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75
76
77/** @name virtq related flags
78 * @{ */
79#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
80#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
81#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
82
83#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
84#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
85/** @} */
86
87/**
88 * virtq related structs
89 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
90 */
91typedef struct virtq_desc
92{
93 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
94 uint32_t cb; /**< len Buffer length */
95 uint16_t fFlags; /**< flags Buffer specific flags */
96 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
97} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
98
99typedef struct virtq_avail
100{
101 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
102 uint16_t uIdx; /**< idx Index of next free ring slot */
103 RT_FLEXIBLE_ARRAY_EXTENSION
104 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
105 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
106} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
107
108typedef struct virtq_used_elem
109{
110 uint32_t uDescIdx; /**< idx Start of used desc chain */
111 uint32_t cbElem; /**< len Total len of used desc chain */
112} VIRTQ_USED_ELEM_T;
113
114typedef struct virt_used
115{
116 uint16_t fFlags; /**< flags used ring host-to-guest flags */
117 uint16_t uIdx; /**< idx Index of next ring slot */
118 RT_FLEXIBLE_ARRAY_EXTENSION
119 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
120 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
121} VIRTQ_USED_T, *PVIRTQ_USED_T;
122
123
124const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
125{
126 switch (enmState)
127 {
128 case kvirtIoVmStateChangedReset: return "VM RESET";
129 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
130 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
131 case kvirtIoVmStateChangedResume: return "VM RESUME";
132 default: return "<BAD ENUM>";
133 }
134}
135
136/* Internal Functions */
137
138static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
139static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
140
141/** @name Internal queue operations
142 * @{ */
143
144/**
145 * Accessor for virtq descriptor
146 */
147#ifdef IN_RING3
148DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
149 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
150{
151 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
152 RT_NOREF(pVirtio);
153 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
154 PDMDevHlpPCIPhysRead(pDevIns,
155 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
156 pDesc, sizeof(VIRTQ_DESC_T));
157}
158#endif
159
160/**
161 * Accessors for virtq avail ring
162 */
163#ifdef IN_RING3
164DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
165{
166 uint16_t uDescIdx;
167 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
168 RT_NOREF(pVirtio);
169 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
170 PDMDevHlpPCIPhysRead(pDevIns,
171 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
172 &uDescIdx, sizeof(uDescIdx));
173 return uDescIdx;
174}
175
176DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
177{
178 uint16_t uUsedEventIdx;
179 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
180 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
181 RT_NOREF(pVirtio);
182 PDMDevHlpPCIPhysRead(pDevIns,
183 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uSize]),
184 &uUsedEventIdx, sizeof(uUsedEventIdx));
185 return uUsedEventIdx;
186}
187#endif
188
189DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
190{
191 uint16_t uIdx = 0;
192 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
193 RT_NOREF(pVirtio);
194 PDMDevHlpPCIPhysRead(pDevIns,
195 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
196 &uIdx, sizeof(uIdx));
197 return uIdx;
198}
199
200DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
201{
202 uint16_t fFlags = 0;
203 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
204 RT_NOREF(pVirtio);
205 PDMDevHlpPCIPhysRead(pDevIns,
206 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
207 &fFlags, sizeof(fFlags));
208 return fFlags;
209}
210
211/** @} */
212
213/** @name Accessors for virtq used ring
214 * @{
215 */
216
217#ifdef IN_RING3
218DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
219 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
220{
221 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
222 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
223 RT_NOREF(pVirtio);
224 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
225 PDMDevHlpPCIPhysWrite(pDevIns,
226 pVirtq->GCPhysVirtqUsed
227 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
228 &elem, sizeof(elem));
229}
230
231DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
232{
233 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
234 RT_NOREF(pVirtio);
235 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
236 PDMDevHlpPCIPhysWrite(pDevIns,
237 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
238 &fFlags, sizeof(fFlags));
239}
240#endif
241
242DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
243{
244 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
245 RT_NOREF(pVirtio);
246 PDMDevHlpPCIPhysWrite(pDevIns,
247 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
248 &uIdx, sizeof(uIdx));
249}
250
251
252#ifdef IN_RING3
253DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
254{
255 uint16_t uIdx = 0;
256 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
257 RT_NOREF(pVirtio);
258 PDMDevHlpPCIPhysRead(pDevIns,
259 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
260 &uIdx, sizeof(uIdx));
261 return uIdx;
262}
263
264DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
265{
266 uint16_t fFlags = 0;
267 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
268 RT_NOREF(pVirtio);
269 PDMDevHlpPCIPhysRead(pDevIns,
270 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
271 &fFlags, sizeof(fFlags));
272 return fFlags;
273}
274
275DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
276{
277 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
278 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
279 RT_NOREF(pVirtio);
280 PDMDevHlpPCIPhysWrite(pDevIns,
281 pVirtq->GCPhysVirtqUsed
282 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uSize]),
283 &uAvailEventIdx, sizeof(uAvailEventIdx));
284}
285#endif
286
287DECLINLINE(uint16_t) virtioCoreVirtqAvailBufCount_inline(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
288{
289 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
290 uint16_t uIdxShadow = pVirtq->uAvailIdxShadow;
291 uint16_t uIdxDelta;
292
293 if (uIdxActual < uIdxShadow)
294 uIdxDelta = (uIdxActual + VIRTQ_MAX_ENTRIES) - uIdxShadow;
295 else
296 uIdxDelta = uIdxActual - uIdxShadow;
297
298 LogFunc(("%s has %u %s (idx=%u shadow=%u)\n",
299 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries",
300 uIdxActual, uIdxShadow));
301
302 return uIdxDelta;
303}
304/**
305 * Get count of new (e.g. pending) elements in available ring.
306 *
307 * @param pDevIns The device instance.
308 * @param pVirtio Pointer to the shared virtio state.
309 * @param uVirtq Virtq number
310 *
311 * @returns how many entries have been added to ring as a delta of the consumer's
312 * avail index and the queue's guest-side current avail index.
313 */
314uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
315{
316 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
317 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
318 if (!IS_DRIVER_OK(pVirtio) || !pVirtq->uEnable)
319 {
320 LogRelFunc(("Driver not ready or queue not enabled\n"));
321 return 0;
322 }
323
324 return virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq);
325}
326
327#ifdef IN_RING3
328
329/** API Function: See header file*/
330void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp)
331{
332 static struct
333 {
334 uint64_t fFeatureBit;
335 const char *pcszDesc;
336 } const s_aFeatures[] =
337 {
338 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
339 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
340 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },
341 };
342
343#define MAXLINE 80
344 /* Display as a single buf to prevent interceding log messages */
345 uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132;
346 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
347 Assert(pszBuf);
348 char *cp = pszBuf;
349 for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i)
350 {
351 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
352 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
353 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
354 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
355 }
356 if (pHlp)
357 pHlp->pfnPrintf(pHlp, "VirtIO Core Features Configuration\n\n"
358 " Offered Accepted Feature Description\n"
359 " ------- -------- ------- -----------\n"
360 "%s\n", pszBuf);
361#ifdef LOG_ENABLED
362 else
363 Log3(("VirtIO Core Features Configuration\n\n"
364 " Offered Accepted Feature Description\n"
365 " ------- -------- ------- -----------\n"
366 "%s\n", pszBuf));
367#endif
368 RTMemFree(pszBuf);
369}
370#endif
371
372#ifdef LOG_ENABLED
373
374/** API Function: See header file */
375void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
376{
377#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
378 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
379 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
380 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
381 if (pszTitle)
382 {
383 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
384 ADJCURSOR(cbPrint);
385 }
386 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
387 {
388 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
389 ADJCURSOR(cbPrint);
390 for (uint8_t col = 0; col < 16; col++)
391 {
392 uint32_t idx = row * 16 + col;
393 if (idx >= cb)
394 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
395 else
396 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
397 ADJCURSOR(cbPrint);
398 }
399 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
400 {
401 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
402 ADJCURSOR(cbPrint);
403 }
404 *pszOut++ = '\n';
405 --cbRemain;
406 }
407 Log(("%s\n", pszBuf));
408 RTMemFree(pszBuf);
409 RT_NOREF2(uBase, pv);
410#undef ADJCURSOR
411}
412
413/* API FUnction: See header file */
414void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
415{
416#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
417 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
418 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
419 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
420 if (pszTitle)
421 {
422 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
423 ADJCURSOR(cbPrint);
424 }
425 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
426 {
427 uint8_t c;
428 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
429 ADJCURSOR(cbPrint);
430 for (uint8_t col = 0; col < 16; col++)
431 {
432 uint32_t idx = row * 16 + col;
433 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
434 if (idx >= cb)
435 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
436 else
437 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
438 ADJCURSOR(cbPrint);
439 }
440 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
441 {
442 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
443 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
444 ADJCURSOR(cbPrint);
445 }
446 *pszOut++ = '\n';
447 --cbRemain;
448 }
449 Log(("%s\n", pszBuf));
450 RTMemFree(pszBuf);
451 RT_NOREF(uBase);
452#undef ADJCURSOR
453}
454#endif /* LOG_ENABLED */
455
456/** API function: See header file */
457void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
458 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
459 int fHasIndex, uint32_t idx)
460{
461 if (!LogIs6Enabled())
462 return;
463
464 char szIdx[16];
465 if (fHasIndex)
466 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
467 else
468 szIdx[0] = '\0';
469
470 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
471 {
472 char szDepiction[64];
473 size_t cchDepiction;
474 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
475 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
476 pszMember, szIdx, uOffset, uOffset + cb - 1);
477 else
478 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
479
480 /* padding */
481 if (cchDepiction < 30)
482 szDepiction[cchDepiction++] = ' ';
483 while (cchDepiction < 30)
484 szDepiction[cchDepiction++] = '.';
485 szDepiction[cchDepiction] = '\0';
486
487 RTUINT64U uValue;
488 uValue.u = 0;
489 memcpy(uValue.au8, pv, cb);
490 Log6(("%-23s: Guest %s %s %#0*RX64\n",
491 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
492 }
493 else /* odd number or oversized access, ... log inline hex-dump style */
494 {
495 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
496 pszFunc, fWrite ? "wrote" : "read ", pszMember,
497 szIdx, uOffset, uOffset + cb, cb, pv));
498 }
499 RT_NOREF2(fWrite, pszFunc);
500}
501
502/**
503 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
504 * keep the output clean during multi-threaded activity)
505 */
506DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
507{
508
509#define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
510
511 memset(pszBuf, 0, uSize);
512 size_t len;
513 char *cp = pszBuf;
514 char *sep = (char *)"";
515
516 if (bStatus == 0) {
517 RTStrPrintf(cp, uSize, "RESET");
518 return;
519 }
520 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
521 {
522 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
523 ADJCURSOR(len);
524 }
525 if (bStatus & VIRTIO_STATUS_DRIVER)
526 {
527 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
528 ADJCURSOR(len);
529 }
530 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
531 {
532 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
533 ADJCURSOR(len);
534 }
535 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
536 {
537 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
538 ADJCURSOR(len);
539 }
540 if (bStatus & VIRTIO_STATUS_FAILED)
541 {
542 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
543 ADJCURSOR(len);
544 }
545 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
546 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
547
548#undef ADJCURSOR
549}
550
551#ifdef IN_RING3
552
553int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
554{
555 LogFunc(("%s\n", pcszName));
556 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
557 pVirtq->uVirtq = uVirtq;
558 pVirtq->uAvailIdxShadow = 0;
559 pVirtq->uUsedIdxShadow = 0;
560 pVirtq->fUsedRingEvent = false;
561 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
562 return VINF_SUCCESS;
563}
564
565/** API Fuunction: See header file */
566void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtq)
567{
568 RT_NOREF(pszArgs);
569 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
570 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
571
572 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
573// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
574
575 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
576 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
577
578 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
579 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
580
581 PVIRTQBUF pVirtqBuf = NULL;
582
583 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
584
585 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
586
587 int cSendSegs = 0, cReturnSegs = 0;
588 if (!fEmpty)
589 {
590 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, &pVirtqBuf);
591 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
592 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
593 }
594
595 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
596 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
597
598
599 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
600 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uSize);
601 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
602 if (pVirtio->fMsiSupport)
603 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsix);
604 pHlp->pfnPrintf(pHlp, "\n");
605 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
606 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
607 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
608 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
609 pHlp->pfnPrintf(pHlp, "\n");
610 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
611 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
612 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
613 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
614 pHlp->pfnPrintf(pHlp, "\n");
615 if (!fEmpty)
616 {
617 pHlp->pfnPrintf(pHlp, " desc chain:\n");
618 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
619 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
620 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
621 pHlp->pfnPrintf(pHlp, "\n");
622 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
623 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
624 if (cSendSegs)
625 {
626 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
627 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
628 }
629 pHlp->pfnPrintf(pHlp, "\n");
630 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
631 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
632 if (cReturnSegs)
633 {
634 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
635 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
636 }
637 } else
638 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
639 pHlp->pfnPrintf(pHlp, "\n");
640
641}
642
643/** API Function: See header file */
644uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
645{
646 AssertReturn(pVirtqBuf, UINT32_MAX);
647 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
648 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
649 Assert(cRefs > 1);
650 Assert(cRefs < 16);
651 return cRefs;
652}
653
654
655/** API Function: See header file */
656uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
657{
658 if (!pVirtqBuf)
659 return 0;
660 AssertReturn(pVirtqBuf, 0);
661 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
662 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
663 Assert(cRefs < 16);
664 if (cRefs == 0)
665 {
666 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
667 RTMemFree(pVirtqBuf);
668 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
669 }
670 return cRefs;
671}
672
673/** API Function: See header file */
674void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
675{
676 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
677}
678
679/** API Function: See header file */
680void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
681{
682
683 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
684 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
685
686 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
687 {
688 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
689
690 if (fEnable)
691 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
692 else
693 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
694
695 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
696 }
697}
698
699/** API function: See Header file */
700void virtioCoreResetAll(PVIRTIOCORE pVirtio)
701{
702 LogFunc(("\n"));
703 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
704 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
705 {
706 pVirtio->fGenUpdatePending = true;
707 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
708 }
709}
710
711/** API function: See Header file */
712int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
713 PPVIRTQBUF ppVirtqBuf)
714{
715 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, ppVirtqBuf, false);
716}
717
718/** API function: See Header file */
719int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
720{
721 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
722 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
723
724 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
725 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
726
727 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
728 return VERR_NOT_AVAILABLE;
729
730 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
731 pVirtq->uAvailIdxShadow++;
732
733 return VINF_SUCCESS;
734}
735
736
737/** API Function: See header file */
738int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
739 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
740{
741 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
742 *ppVirtqBuf = NULL;
743
744 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
745 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
746
747 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
748
749 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
750 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
751
752 uint16_t uDescIdx = uHeadIdx;
753
754 Log6Func(("%s DESC CHAIN: (head) desc_idx=%u\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
755
756 /*
757 * Allocate and initialize the descriptor chain structure.
758 */
759 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
760 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
761 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
762 pVirtqBuf->cRefs = 1;
763 pVirtqBuf->uHeadIdx = uHeadIdx;
764 pVirtqBuf->uVirtq = uVirtq;
765 *ppVirtqBuf = pVirtqBuf;
766
767 /*
768 * Gather segments.
769 */
770 VIRTQ_DESC_T desc;
771
772 uint32_t cbIn = 0;
773 uint32_t cbOut = 0;
774 uint32_t cSegsIn = 0;
775 uint32_t cSegsOut = 0;
776
777 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
778 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
779
780 do
781 {
782 PVIRTIOSGSEG pSeg;
783
784 /*
785 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
786 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
787 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
788 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
789 */
790 if (cSegsIn + cSegsOut >= VIRTQ_MAX_ENTRIES)
791 {
792 static volatile uint32_t s_cMessages = 0;
793 static volatile uint32_t s_cThreshold = 1;
794 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
795 {
796 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
797 if (ASMAtomicReadU32(&s_cMessages) != 1)
798 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
799 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
800 }
801 break;
802 }
803 RT_UNTRUSTED_VALIDATED_FENCE();
804
805 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
806
807 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
808 {
809 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
810 cbIn += desc.cb;
811 pSeg = &paSegsIn[cSegsIn++];
812 }
813 else
814 {
815 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
816 cbOut += desc.cb;
817 pSeg = &paSegsOut[cSegsOut++];
818#ifdef DEEP_DEBUG
819 if (LogIs11Enabled())
820 {
821 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
822 Log(("\n"));
823 }
824#endif
825 }
826
827 pSeg->GCPhys = desc.GCPhysBuf;
828 pSeg->cbSeg = desc.cb;
829
830 uDescIdx = desc.uDescIdxNext;
831 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
832
833 /*
834 * Add segments to the descriptor chain structure.
835 */
836 if (cSegsIn)
837 {
838 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
839 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
840 pVirtqBuf->cbPhysReturn = cbIn;
841 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
842 }
843
844 if (cSegsOut)
845 {
846 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
847 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
848 pVirtqBuf->cbPhysSend = cbOut;
849 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
850 }
851
852 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
853 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
854 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
855
856 return VINF_SUCCESS;
857}
858
859/** API function: See Header file */
860int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
861 PPVIRTQBUF ppVirtqBuf, bool fRemove)
862{
863 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
864 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
865
866 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
867 return VERR_NOT_AVAILABLE;
868
869 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
870
871 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
872 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
873
874 if (fRemove)
875 pVirtq->uAvailIdxShadow++;
876
877 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, ppVirtqBuf);
878 return rc;
879}
880
881/** API function: See Header file */
882int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
883 PVIRTQBUF pVirtqBuf, bool fFence)
884{
885 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
886 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
887
888 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
889
890 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
891 Assert(pVirtqBuf->cRefs > 0);
892
893 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
894
895 Log6Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
896 VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
897
898 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
899
900 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
901
902 if (pSgVirtReturn)
903 {
904 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
905 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
906 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
907 virtioCoreGCPhysChainReset(pSgPhysReturn);
908 while (cbRemain)
909 {
910 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
911 Assert(cbCopy > 0);
912 PDMDevHlpPhysWrite(pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
913 RTSgBufAdvance(pSgVirtReturn, cbCopy);
914 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
915 cbRemain -= cbCopy;
916 }
917
918 if (fFence)
919 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
920
921 Assert(!(cbCopy >> 32));
922 }
923
924 /* If this write-ahead crosses threshold where the driver wants to get an event flag it */
925 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
926 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
927 pVirtq->fUsedRingEvent = true;
928
929 /*
930 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
931 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */
932 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
933
934 if (pSgVirtReturn)
935 Log6Func((".... Copied %zu bytes in %d segs to %u byte buffer, residual=%zu\n",
936 cbTotal - cbRemain, pSgVirtReturn->cSegs, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
937
938 Log6Func(("Write ahead used_idx=%u, %s used_idx=%u\n",
939 pVirtq->uUsedIdxShadow, VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
940
941 return VINF_SUCCESS;
942}
943
944
945#endif /* IN_RING3 */
946
947/** API function: See Header file */
948int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
949{
950 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
951 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
952
953 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
954 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
955
956 Log6Func(("Updating %s used_idx to %u\n", pVirtq->szName, pVirtq->uUsedIdxShadow));
957
958 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
959 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
960
961 return VINF_SUCCESS;
962}
963
964/**
965 * This is called from the MMIO callback code when the guest does an MMIO access to the
966 * mapped queue notification capability area corresponding to a particular queue, to notify
967 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
968 *
969 * @param pDevIns The device instance.
970 * @param pVirtio Pointer to the shared virtio state.
971 * @param uVirtq Virtq to check for guest interrupt handling preference
972 * @param uNotifyIdx Notification index
973 */
974static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
975{
976 PVIRTIOCORECC pVirtioCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
977
978 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match.
979 * Disregarding this notification may cause throughput to stop, however there's no way to know
980 * which was queue was intended for wake-up if the two parameters disagree. */
981
982 AssertMsg(uNotifyIdx == uVirtq,
983 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
984 uVirtq, uNotifyIdx));
985 RT_NOREF(uNotifyIdx);
986
987 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
988 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
989
990 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,
991 virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq)));
992
993 /* Inform client */
994 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
995 RT_NOREF2(pVirtio, pVirtq);
996}
997
998/**
999 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1000 * the specified virtq, depending on the interrupt configuration of the device
1001 * and depending on negotiated and realtime constraints flagged by the guest driver.
1002 *
1003 * See VirtIO 1.0 specification (section 2.4.7).
1004 *
1005 * @param pDevIns The device instance.
1006 * @param pVirtio Pointer to the shared virtio state.
1007 * @param uVirtq Virtq to check for guest interrupt handling preference
1008 */
1009static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1010{
1011 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1012 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1013
1014 if (!IS_DRIVER_OK(pVirtio))
1015 {
1016 LogFunc(("Guest driver not in ready state.\n"));
1017 return;
1018 }
1019
1020 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1021 {
1022 if (pVirtq->fUsedRingEvent)
1023 {
1024#ifdef IN_RING3
1025 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1026 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1027#endif
1028 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix);
1029 pVirtq->fUsedRingEvent = false;
1030 return;
1031 }
1032#ifdef IN_RING3
1033 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1034 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1035#endif
1036 }
1037 else
1038 {
1039 /** If guest driver hasn't suppressed interrupts, interrupt */
1040 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1041 {
1042 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix);
1043 return;
1044 }
1045 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1046 }
1047}
1048
1049/**
1050 * Raise interrupt or MSI-X
1051 *
1052 * @param pDevIns The device instance.
1053 * @param pVirtio Pointer to the shared virtio state.
1054 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1055 * @param uVec MSI-X vector, if enabled
1056 */
1057static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixtor)
1058{
1059 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1060 Log6Func(("reason: buffer added to 'used' ring.\n"));
1061 else
1062 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1063 Log6Func(("reason: device config change\n"));
1064
1065 if (!pVirtio->fMsiSupport)
1066 {
1067 pVirtio->uISR |= uCause;
1068 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1069 }
1070 else if (uMsixtor != VIRTIO_MSI_NO_VECTOR)
1071 PDMDevHlpPCISetIrq(pDevIns, uMsixtor, 1);
1072 return VINF_SUCCESS;
1073}
1074
1075/**
1076 * Lower interrupt (Called when guest reads ISR and when resetting)
1077 *
1078 * @param pDevIns The device instance.
1079 */
1080static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixtor)
1081{
1082 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1083 if (!pVirtio->fMsiSupport)
1084 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1085 else if (uMsixtor != VIRTIO_MSI_NO_VECTOR)
1086 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1087}
1088
1089#ifdef IN_RING3
1090static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1091{
1092 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1093 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1094
1095 pVirtq->uAvailIdxShadow = 0;
1096 pVirtq->uUsedIdxShadow = 0;
1097 pVirtq->uEnable = false;
1098 pVirtq->uSize = VIRTQ_MAX_ENTRIES;
1099 pVirtq->uNotifyOffset = uVirtq;
1100 pVirtq->uMsix = uVirtq + 2;
1101 pVirtq->fUsedRingEvent = false;
1102
1103 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1104 pVirtq->uMsix = VIRTIO_MSI_NO_VECTOR;
1105
1106 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsix);
1107}
1108
1109static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1110{
1111 Log2Func(("\n"));
1112 pVirtio->uDeviceFeaturesSelect = 0;
1113 pVirtio->uDriverFeaturesSelect = 0;
1114 pVirtio->uConfigGeneration = 0;
1115 pVirtio->fDeviceStatus = 0;
1116 pVirtio->uISR = 0;
1117
1118 if (!pVirtio->fMsiSupport)
1119 virtioLowerInterrupt(pDevIns, 0);
1120 else
1121 {
1122 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1123 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1124 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsix);
1125 }
1126
1127 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1128 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1129
1130 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1131 virtioResetVirtq(pVirtio, uVirtq);
1132}
1133
1134/**
1135 * Invoked by this implementation when guest driver resets the device.
1136 * The driver itself will not until the device has read the status change.
1137 */
1138static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1139{
1140 LogFunc(("Guest reset the device\n"));
1141
1142 /* Let the client know */
1143 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0);
1144 virtioResetDevice(pDevIns, pVirtio);
1145}
1146#endif /* IN_RING3 */
1147
1148/**
1149 * Handle accesses to Common Configuration capability
1150 *
1151 * @returns VBox status code
1152 *
1153 * @param pDevIns The device instance.
1154 * @param pVirtio Pointer to the shared virtio state.
1155 * @param pVirtioCC Pointer to the current context virtio state.
1156 * @param fWrite Set if write access, clear if read access.
1157 * @param uOffsetOfAccess The common configuration capability offset.
1158 * @param cb Number of bytes to read or write
1159 * @param pv Pointer to location to write to or read from
1160 */
1161static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1162 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1163{
1164 uint16_t uVirtq = pVirtio->uVirtqSelect;
1165 int rc = VINF_SUCCESS;
1166 uint64_t val;
1167 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1168 {
1169 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1170 {
1171 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1172 * yet the linux driver attempts to write/read it back twice */
1173 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1174 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1175 return VINF_IOM_MMIO_UNUSED_00;
1176 }
1177 else /* Guest READ pCommonCfg->uDeviceFeatures */
1178 {
1179 switch (pVirtio->uDeviceFeaturesSelect)
1180 {
1181 case 0:
1182 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1183 memcpy(pv, &val, cb);
1184 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1185 break;
1186 case 1:
1187 val = pVirtio->uDeviceFeatures >> 32;
1188 memcpy(pv, &val, cb);
1189 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1190 break;
1191 default:
1192 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1193 pVirtio->uDeviceFeaturesSelect));
1194 return VINF_IOM_MMIO_UNUSED_00;
1195 }
1196 }
1197 }
1198 else
1199 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1200 {
1201 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1202 {
1203 switch (pVirtio->uDriverFeaturesSelect)
1204 {
1205 case 0:
1206 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1207 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1208 break;
1209 case 1:
1210 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1211 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1212 break;
1213 default:
1214 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1215 pVirtio->uDriverFeaturesSelect));
1216 return VINF_SUCCESS;
1217 }
1218 }
1219 /* Guest READ pCommonCfg->udriverFeatures */
1220 {
1221 switch (pVirtio->uDriverFeaturesSelect)
1222 {
1223 case 0:
1224 val = pVirtio->uDriverFeatures & 0xffffffff;
1225 memcpy(pv, &val, cb);
1226 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1227 break;
1228 case 1:
1229 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1230 memcpy(pv, &val, cb);
1231 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1232 break;
1233 default:
1234 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1235 pVirtio->uDriverFeaturesSelect));
1236 return VINF_IOM_MMIO_UNUSED_00;
1237 }
1238 }
1239 }
1240 else
1241 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1242 {
1243 if (fWrite)
1244 {
1245 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1246 return VINF_SUCCESS;
1247 }
1248 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1249 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1250 }
1251 else
1252 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1253 {
1254 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1255 {
1256 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1257 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1258
1259 if (LogIs7Enabled())
1260 {
1261 char szOut[80] = { 0 };
1262 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1263 LogFunc(("Guest wrote fDeviceStatus ................ (%s)\n", szOut));
1264 }
1265 bool const fStatusChanged =
1266 (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) != (pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1267
1268 if (fDeviceReset || fStatusChanged)
1269 {
1270#ifdef IN_RING0
1271 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1272 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1273 Log6Func(("RING0 => RING3 (demote)\n"));
1274 return VINF_IOM_R3_MMIO_WRITE;
1275#endif
1276 }
1277
1278#ifdef IN_RING3
1279 /*
1280 * Notify client only if status actually changed from last time and when we're reset.
1281 */
1282 if (fDeviceReset)
1283 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1284
1285 if (fStatusChanged)
1286 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1287#endif
1288 /*
1289 * Save the current status for the next write so we can see what changed.
1290 */
1291 pVirtio->uPrevDeviceStatus = pVirtio->fDeviceStatus;
1292 }
1293 else /* Guest READ pCommonCfg->fDeviceStatus */
1294 {
1295 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1296
1297 if (LogIs7Enabled())
1298 {
1299 char szOut[80] = { 0 };
1300 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1301 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1302 }
1303 }
1304 }
1305 else
1306 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1307 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1308 else
1309 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1310 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1311 else
1312 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1313 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1314 else
1315 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1316 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1317 else
1318 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1319 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1320 else
1321 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1322 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1323 else
1324 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1325 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1326 else
1327 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1328 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1329 else
1330 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1331 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1332 else
1333 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1334 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1335 else
1336 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1337 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1338 else
1339 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsix, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1340 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsix, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1341 else
1342 {
1343 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1344 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1345 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1346 }
1347
1348#ifndef IN_RING3
1349 RT_NOREF(pDevIns, pVirtioCC);
1350#endif
1351 return rc;
1352}
1353
1354/**
1355 * @callback_method_impl{FNIOMMMIONEWREAD,
1356 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1357 *
1358 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1359 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1360 * of 1, 2 or 4 bytes, only.
1361 *
1362 */
1363static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1364{
1365 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1366 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1367 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1368 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1369
1370 uint32_t uOffset;
1371 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1372 {
1373#ifdef IN_RING3
1374 /*
1375 * Callback to client to manage device-specific configuration.
1376 */
1377 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1378
1379 /*
1380 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1381 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1382 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1383 */
1384 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1385 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1386 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1387
1388 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1389
1390 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1391 {
1392 ++pVirtio->uConfigGeneration;
1393 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1394 pVirtio->uConfigGeneration,
1395 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1396 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1397 pVirtio->fGenUpdatePending = false;
1398 }
1399
1400 virtioLowerInterrupt(pDevIns, 0);
1401 return rcStrict;
1402#else
1403 return VINF_IOM_R3_MMIO_READ;
1404#endif
1405 }
1406
1407 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1408 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1409
1410 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1411 {
1412 *(uint8_t *)pv = pVirtio->uISR;
1413 Log6Func(("Read and clear ISR\n"));
1414 pVirtio->uISR = 0; /* VirtIO specification requires reads of ISR to clear it */
1415 virtioLowerInterrupt(pDevIns, 0);
1416 return VINF_SUCCESS;
1417 }
1418
1419 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1420 return VINF_IOM_MMIO_UNUSED_00;
1421}
1422
1423/**
1424 * @callback_method_impl{FNIOMMMIONEWREAD,
1425 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1426 *
1427 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1428 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1429 * of 1, 2 or 4 bytes, only.
1430 */
1431static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1432{
1433 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1434 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1435
1436 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1437
1438 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1439 uint32_t uOffset;
1440 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1441 {
1442#ifdef IN_RING3
1443 /*
1444 * Foreward this MMIO write access for client to deal with.
1445 */
1446 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1447#else
1448 return VINF_IOM_R3_MMIO_WRITE;
1449#endif
1450 }
1451
1452 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1453 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1454
1455 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1456 {
1457 pVirtio->uISR = *(uint8_t *)pv;
1458 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1459 pVirtio->uISR & 0xff,
1460 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1461 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1462 return VINF_SUCCESS;
1463 }
1464
1465 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1466 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1467 {
1468 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1469 return VINF_SUCCESS;
1470 }
1471
1472 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1473 return VINF_SUCCESS;
1474}
1475
1476#ifdef IN_RING3
1477
1478/**
1479 * @callback_method_impl{FNPCICONFIGREAD}
1480 */
1481static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1482 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1483{
1484 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1485 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1486 RT_NOREF(pPciDev);
1487
1488 if (uAddress == pVirtio->uPciCfgDataOff)
1489 {
1490 /*
1491 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1492 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1493 * (the virtio_pci_cfg_cap capability), and access data items.
1494 */
1495 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1496 uint32_t uLength = pPciCap->uLength;
1497
1498 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u uLength=%d, bar=%d\n",
1499 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, uLength, pPciCap->uBar));
1500
1501 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1502 || cb != uLength
1503 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1504 {
1505 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. "
1506 "Ignoring\n"));
1507 *pu32Value = UINT32_MAX;
1508 return VINF_SUCCESS;
1509 }
1510
1511 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
1512 Log7Func((" Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=0x%x -> %Rrc\n",
1513 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1514 return rcStrict;
1515 }
1516 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
1517 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
1518 return VINF_PDM_PCI_DO_DEFAULT;
1519}
1520
1521/**
1522 * @callback_method_impl{FNPCICONFIGWRITE}
1523 */
1524static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1525 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1526{
1527 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1528 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1529 RT_NOREF(pPciDev);
1530
1531 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
1532 if (uAddress == pVirtio->uPciCfgDataOff)
1533 {
1534 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1535 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1536 * (the virtio_pci_cfg_cap capability), and access data items. */
1537
1538 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1539 uint32_t uLength = pPciCap->uLength;
1540
1541 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1542 || cb != uLength
1543 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1544 {
1545 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1546 return VINF_SUCCESS;
1547 }
1548
1549 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
1550 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1551 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1552 return rcStrict;
1553 }
1554 return VINF_PDM_PCI_DO_DEFAULT;
1555}
1556
1557
1558/*********************************************************************************************************************************
1559* Saved state. *
1560*********************************************************************************************************************************/
1561
1562/**
1563 * Called from the FNSSMDEVSAVEEXEC function of the device.
1564 *
1565 * @param pVirtio Pointer to the shared virtio state.
1566 * @param pHlp The ring-3 device helpers.
1567 * @param pSSM The saved state handle.
1568 * @returns VBox status code.
1569 */
1570int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1571{
1572 LogFunc(("\n"));
1573 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1574 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1575
1576 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1577 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
1578 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
1579 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
1580 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
1581 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
1582 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
1583 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
1584 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
1585
1586 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1587 {
1588 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1589
1590 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
1591 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
1592 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
1593 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
1594 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsix);
1595 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
1596 pHlp->pfnSSMPutU16( pSSM, pVirtq->uSize);
1597 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
1598 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
1599 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
1600 AssertRCReturn(rc, rc);
1601 }
1602
1603 return VINF_SUCCESS;
1604}
1605
1606/**
1607 * Called from the FNSSMDEVLOADEXEC function of the device.
1608 *
1609 * @param pVirtio Pointer to the shared virtio state.
1610 * @param pHlp The ring-3 device helpers.
1611 * @param pSSM The saved state handle.
1612 * @returns VBox status code.
1613 */
1614int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1615{
1616 LogFunc(("\n"));
1617 /*
1618 * Check the marker and (embedded) version number.
1619 */
1620 uint64_t uMarker = 0;
1621 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1622 AssertRCReturn(rc, rc);
1623 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1624 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1625 N_("Expected marker value %#RX64 found %#RX64 instead"),
1626 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1627 uint32_t uVersion = 0;
1628 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1629 AssertRCReturn(rc, rc);
1630 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1631 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1632 N_("Unsupported virtio version: %u"), uVersion);
1633 /*
1634 * Load the state.
1635 */
1636 pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
1637 pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
1638 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
1639 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
1640 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
1641 pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
1642 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
1643 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
1644 pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
1645
1646 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1647 {
1648 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1649
1650 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
1651 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
1652 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
1653 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
1654 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsix);
1655 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
1656 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uSize);
1657 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
1658 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
1659 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
1660 AssertRCReturn(rc, rc);
1661 }
1662
1663 return VINF_SUCCESS;
1664}
1665
1666
1667/*********************************************************************************************************************************
1668* Device Level *
1669*********************************************************************************************************************************/
1670
1671/**
1672 * This must be called by the client to handle VM state changes
1673 * after the client takes care of its device-specific tasks for the state change.
1674 * (i.e. Reset, suspend, power-off, resume)
1675 *
1676 * @param pDevIns The device instance.
1677 * @param pVirtio Pointer to the shared virtio state.
1678 */
1679void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
1680{
1681 LogFunc(("State changing to %s\n",
1682 virtioCoreGetStateChangeText(enmState)));
1683
1684 switch(enmState)
1685 {
1686 case kvirtIoVmStateChangedReset:
1687 virtioCoreResetAll(pVirtio);
1688 break;
1689 case kvirtIoVmStateChangedSuspend:
1690 break;
1691 case kvirtIoVmStateChangedPowerOff:
1692 break;
1693 case kvirtIoVmStateChangedResume:
1694 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1695 {
1696 if (pVirtio->aVirtqueues[uVirtq].uEnable)
1697 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
1698 }
1699 break;
1700 default:
1701 LogRelFunc(("Bad enum value"));
1702 return;
1703 }
1704}
1705
1706/**
1707 * This should be called from PDMDEVREGR3::pfnDestruct.
1708 *
1709 * @param pDevIns The device instance.
1710 * @param pVirtio Pointer to the shared virtio state.
1711 * @param pVirtioCC Pointer to the ring-3 virtio state.
1712 */
1713void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1714{
1715 if (pVirtioCC->pbPrevDevSpecificCfg)
1716 {
1717 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
1718 pVirtioCC->pbPrevDevSpecificCfg = NULL;
1719 }
1720 RT_NOREF(pDevIns, pVirtio);
1721}
1722
1723/** API Function: See header file */
1724int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
1725 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
1726{
1727 /*
1728 * The pVirtio state must be the first member of the shared device instance
1729 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
1730 */
1731 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1732 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
1733
1734 pVirtio->pDevInsR3 = pDevIns;
1735
1736 /*
1737 * Caller must initialize these.
1738 */
1739 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
1740 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
1741
1742#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */
1743# ifdef VBOX_WITH_MSI_DEVICES
1744 pVirtio->fMsiSupport = true;
1745# endif
1746#endif
1747
1748 /*
1749 * The host features offered include both device-specific features
1750 * and reserved feature bits (device independent)
1751 */
1752 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1753 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1754 | fDevSpecificFeatures;
1755
1756 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1757
1758 pVirtio->fDeviceStatus = 0;
1759 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
1760 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
1761 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
1762 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
1763
1764 /* Set PCI config registers (assume 32-bit mode) */
1765 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1766 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1767
1768 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
1769 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1770 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1771 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
1772 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
1773 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
1774 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
1775 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
1776 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
1777 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
1778
1779 /* Register PCI device */
1780 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1781 if (RT_FAILURE(rc))
1782 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1783
1784 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
1785 AssertRCReturn(rc, rc);
1786
1787
1788 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1789
1790#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
1791#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
1792 do { \
1793 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
1794 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
1795 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
1796 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
1797 } while (0)
1798
1799 PVIRTIO_PCI_CAP_T pCfg;
1800 uint32_t cbRegion = 0;
1801
1802 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1803 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
1804 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1805 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1806 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1807 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1808 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1809 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
1810 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1811 cbRegion += pCfg->uLength;
1812 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
1813 pVirtioCC->pCommonCfgCap = pCfg;
1814
1815 /*
1816 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
1817 * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal
1818 * value of the queue (different strategies are possible according to spec).
1819 */
1820 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1821 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1822 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1823 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1824 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1825 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1826 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
1827 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1828 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1829 cbRegion += pCfg->uLength;
1830 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
1831 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1832 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1833
1834 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1835 *
1836 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1837 * of spec shows it as a 32-bit field with upper bits 'reserved'
1838 * Will take spec's words more literally than the diagram for now.
1839 */
1840 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1841 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1842 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1843 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1844 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1845 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1846 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
1847 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1848 pCfg->uLength = sizeof(uint8_t);
1849 cbRegion += pCfg->uLength;
1850 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
1851 pVirtioCC->pIsrCap = pCfg;
1852
1853 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1854 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1855 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1856 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1857 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
1858 * initializing.
1859 */
1860 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
1861 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1862 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1863 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1864 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1865 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1866 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1867 pCfg->uOffset = 0;
1868 pCfg->uLength = 4;
1869 cbRegion += pCfg->uLength;
1870 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
1871 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1872
1873 if (pVirtioCC->pbDevSpecificCfg)
1874 {
1875 /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6).
1876 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
1877 * to inform this. */
1878 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1879 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1880 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1881 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1882 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1883 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1884 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
1885 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1886 pCfg->uLength = cbDevSpecificCfg;
1887 cbRegion += pCfg->uLength;
1888 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
1889 pVirtioCC->pDeviceCap = pCfg;
1890 }
1891 else
1892 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
1893
1894 if (pVirtio->fMsiSupport)
1895 {
1896 PDMMSIREG aMsiReg;
1897 RT_ZERO(aMsiReg);
1898 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1899 aMsiReg.iMsixNextOffset = 0;
1900 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
1901 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
1902 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1903 if (RT_FAILURE(rc))
1904 {
1905 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
1906 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
1907 pVirtio->fMsiSupport = false;
1908 }
1909 else
1910 Log2Func(("Using MSI-X for guest driver notification\n"));
1911 }
1912 else
1913 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
1914
1915 /* Set offset to first capability and enable PCI dev capabilities */
1916 PDMPciDevSetCapabilityList(pPciDev, 0x40);
1917 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
1918
1919 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s MMIO", pcszInstance);
1920 if (cbSize <= 0)
1921 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
1922
1923 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1924 * 'unknown' device-specific capability without querying the capability to figure
1925 * out size, so pad with an extra page
1926 */
1927 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE, PAGE_SIZE),
1928 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
1929 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
1930 pVirtioCC->pcszMmioName,
1931 &pVirtio->hMmioPciCap);
1932 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
1933 /*
1934 * Statistics.
1935 */
1936 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1937 "Total number of allocated descriptor chains", "DescChainsAllocated");
1938 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1939 "Total number of freed descriptor chains", "DescChainsFreed");
1940 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1941 "Total number of inbound segments", "DescChainsSegsIn");
1942 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1943 "Total number of outbound segments", "DescChainsSegsOut");
1944
1945 return VINF_SUCCESS;
1946}
1947
1948#else /* !IN_RING3 */
1949
1950/**
1951 * Sets up the core ring-0/raw-mode virtio bits.
1952 *
1953 * @returns VBox status code.
1954 * @param pDevIns The device instance.
1955 * @param pVirtio Pointer to the shared virtio state. This must be the first
1956 * member in the shared device instance data!
1957 */
1958int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1959{
1960 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1961
1962#ifdef FUTURE_OPTIMIZATION
1963 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1964 AssertRCReturn(rc, rc);
1965#endif
1966 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
1967 AssertRCReturn(rc, rc);
1968 return rc;
1969}
1970
1971#endif /* !IN_RING3 */
1972
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette