VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 85672

Last change on this file since 85672 was 85415, checked in by vboxsync, 4 years ago

Network/DevVirtioNet_1_0.cpp: Fixed pause/resume/poweroff, added more support for multiqueue (MQ) handling (see bugref:8651, Comment 91), More little cleanup, comment fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 81.8 KB
Line 
1/* $Id: VirtioCore.cpp 85415 2020-07-22 14:44:19Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
45#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
46
47#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
48#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
49 (virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq) == 0)
50
51/**
52 * This macro returns true if the @a a_offAccess and access length (@a
53 * a_cbAccess) are within the range of the mapped capability struct described by
54 * @a a_LocCapData.
55 *
56 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
57 * @param[in] a_cbAccess Input: The access size.
58 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
59 * @param[in] a_LocCapData Input: The capability location info.
60 */
61#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
62 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
63 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
64
65
66/** Marks the start of the virtio saved state (just for sanity). */
67#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
68/** The current saved state version for the virtio core. */
69#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75
76
77/** @name virtq related flags
78 * @{ */
79#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
80#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
81#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
82
83#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
84#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
85/** @} */
86
87/**
88 * virtq related structs
89 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
90 */
91typedef struct virtq_desc
92{
93 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
94 uint32_t cb; /**< len Buffer length */
95 uint16_t fFlags; /**< flags Buffer specific flags */
96 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
97} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
98
99typedef struct virtq_avail
100{
101 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
102 uint16_t uIdx; /**< idx Index of next free ring slot */
103 RT_FLEXIBLE_ARRAY_EXTENSION
104 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
105 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
106} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
107
108typedef struct virtq_used_elem
109{
110 uint32_t uDescIdx; /**< idx Start of used desc chain */
111 uint32_t cbElem; /**< len Total len of used desc chain */
112} VIRTQ_USED_ELEM_T;
113
114typedef struct virt_used
115{
116 uint16_t fFlags; /**< flags used ring host-to-guest flags */
117 uint16_t uIdx; /**< idx Index of next ring slot */
118 RT_FLEXIBLE_ARRAY_EXTENSION
119 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
120 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
121} VIRTQ_USED_T, *PVIRTQ_USED_T;
122
123
124const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
125{
126 switch (enmState)
127 {
128 case kvirtIoVmStateChangedReset: return "VM RESET";
129 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
130 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
131 case kvirtIoVmStateChangedResume: return "VM RESUME";
132 default: return "<BAD ENUM>";
133 }
134}
135
136/* Internal Functions */
137
138static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
139static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
140
141/** @name Internal queue operations
142 * @{ */
143
144/**
145 * Accessor for virtq descriptor
146 */
147#ifdef IN_RING3
148DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
149 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
150{
151 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
152 RT_NOREF(pVirtio);
153 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
154 PDMDevHlpPCIPhysRead(pDevIns,
155 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
156 pDesc, sizeof(VIRTQ_DESC_T));
157}
158#endif
159
160/**
161 * Accessors for virtq avail ring
162 */
163#ifdef IN_RING3
164DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
165{
166 uint16_t uDescIdx;
167 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
168 RT_NOREF(pVirtio);
169 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
170 PDMDevHlpPCIPhysRead(pDevIns,
171 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
172 &uDescIdx, sizeof(uDescIdx));
173 return uDescIdx;
174}
175
176DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
177{
178 uint16_t uUsedEventIdx;
179 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
180 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
181 RT_NOREF(pVirtio);
182 PDMDevHlpPCIPhysRead(pDevIns,
183 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uSize]),
184 &uUsedEventIdx, sizeof(uUsedEventIdx));
185 return uUsedEventIdx;
186}
187#endif
188
189DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
190{
191 uint16_t uIdx = 0;
192 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
193 RT_NOREF(pVirtio);
194 PDMDevHlpPCIPhysRead(pDevIns,
195 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
196 &uIdx, sizeof(uIdx));
197 return uIdx;
198}
199
200DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
201{
202 uint16_t fFlags = 0;
203 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
204 RT_NOREF(pVirtio);
205 PDMDevHlpPCIPhysRead(pDevIns,
206 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
207 &fFlags, sizeof(fFlags));
208 return fFlags;
209}
210
211/** @} */
212
213/** @name Accessors for virtq used ring
214 * @{
215 */
216
217#ifdef IN_RING3
218DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
219 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
220{
221 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
222 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
223 RT_NOREF(pVirtio);
224 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
225 PDMDevHlpPCIPhysWrite(pDevIns,
226 pVirtq->GCPhysVirtqUsed
227 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
228 &elem, sizeof(elem));
229}
230
231DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
232{
233 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
234 RT_NOREF(pVirtio);
235 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
236 PDMDevHlpPCIPhysWrite(pDevIns,
237 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
238 &fFlags, sizeof(fFlags));
239}
240#endif
241
242DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
243{
244 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
245 RT_NOREF(pVirtio);
246 PDMDevHlpPCIPhysWrite(pDevIns,
247 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
248 &uIdx, sizeof(uIdx));
249}
250
251
252#ifdef IN_RING3
253DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
254{
255 uint16_t uIdx = 0;
256 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
257 RT_NOREF(pVirtio);
258 PDMDevHlpPCIPhysRead(pDevIns,
259 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
260 &uIdx, sizeof(uIdx));
261 return uIdx;
262}
263
264DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
265{
266 uint16_t fFlags = 0;
267 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
268 RT_NOREF(pVirtio);
269 PDMDevHlpPCIPhysRead(pDevIns,
270 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
271 &fFlags, sizeof(fFlags));
272 return fFlags;
273}
274
275DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
276{
277 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
278 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
279 RT_NOREF(pVirtio);
280 PDMDevHlpPCIPhysWrite(pDevIns,
281 pVirtq->GCPhysVirtqUsed
282 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uSize]),
283 &uAvailEventIdx, sizeof(uAvailEventIdx));
284}
285#endif
286
287DECLINLINE(uint16_t) virtioCoreVirtqAvailBufCount_inline(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
288{
289 uint16_t uIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
290 uint16_t uShadow = pVirtq->uAvailIdxShadow;
291
292 uint16_t uDelta;
293 if (uIdx < uShadow)
294 uDelta = (uIdx + VIRTQ_MAX_ENTRIES) - uShadow;
295 else
296 uDelta = uIdx - uShadow;
297
298 LogFunc(("%s has %u %s (idx=%u shadow=%u)\n",
299 pVirtq->szName, uDelta, uDelta == 1 ? "entry" : "entries",
300 uIdx, uShadow));
301
302 return uDelta;
303}
304/**
305 * Get count of new (e.g. pending) elements in available ring.
306 *
307 * @param pDevIns The device instance.
308 * @param pVirtio Pointer to the shared virtio state.
309 * @param uVirtq Virtq number
310 *
311 * @returns how many entries have been added to ring as a delta of the consumer's
312 * avail index and the queue's guest-side current avail index.
313 */
314uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
315{
316 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
317 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
318 if (!IS_DRIVER_OK(pVirtio) || !pVirtq->uEnable)
319 {
320 LogRelFunc(("Driver not ready or queue not enabled\n"));
321 return 0;
322 }
323
324 return virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq);
325}
326
327#ifdef IN_RING3
328
329/** API Function: See header file*/
330void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp)
331{
332 static struct
333 {
334 uint64_t fFeatureBit;
335 const char *pcszDesc;
336 } const s_aFeatures[] =
337 {
338 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
339 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
340 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },
341 };
342
343#define MAXLINE 80
344 /* Display as a single buf to prevent interceding log messages */
345 uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132;
346 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
347 Assert(pszBuf);
348 char *cp = pszBuf;
349 for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i)
350 {
351 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
352 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
353 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
354 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
355 }
356 if (pHlp)
357 pHlp->pfnPrintf(pHlp, "VirtIO Core Features Configuration\n\n"
358 " Offered Accepted Feature Description\n"
359 " ------- -------- ------- -----------\n"
360 "%s\n", pszBuf);
361#ifdef LOG_ENABLED
362 else
363 Log3(("VirtIO Core Features Configuration\n\n"
364 " Offered Accepted Feature Description\n"
365 " ------- -------- ------- -----------\n"
366 "%s\n", pszBuf));
367#endif
368 RTMemFree(pszBuf);
369}
370#endif
371
372#ifdef LOG_ENABLED
373
374/** API Function: See header file */
375void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
376{
377#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
378 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
379 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
380 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
381 if (pszTitle)
382 {
383 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
384 ADJCURSOR(cbPrint);
385 }
386 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
387 {
388 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
389 ADJCURSOR(cbPrint);
390 for (uint8_t col = 0; col < 16; col++)
391 {
392 uint32_t idx = row * 16 + col;
393 if (idx >= cb)
394 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
395 else
396 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
397 ADJCURSOR(cbPrint);
398 }
399 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
400 {
401 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
402 ADJCURSOR(cbPrint);
403 }
404 *pszOut++ = '\n';
405 --cbRemain;
406 }
407 Log(("%s\n", pszBuf));
408 RTMemFree(pszBuf);
409 RT_NOREF2(uBase, pv);
410#undef ADJCURSOR
411}
412
413/* API FUnction: See header file */
414void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
415{
416#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
417 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
418 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
419 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
420 if (pszTitle)
421 {
422 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
423 ADJCURSOR(cbPrint);
424 }
425 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
426 {
427 uint8_t c;
428 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
429 ADJCURSOR(cbPrint);
430 for (uint8_t col = 0; col < 16; col++)
431 {
432 uint32_t idx = row * 16 + col;
433 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
434 if (idx >= cb)
435 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
436 else
437 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
438 ADJCURSOR(cbPrint);
439 }
440 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
441 {
442 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
443 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
444 ADJCURSOR(cbPrint);
445 }
446 *pszOut++ = '\n';
447 --cbRemain;
448 }
449 Log(("%s\n", pszBuf));
450 RTMemFree(pszBuf);
451 RT_NOREF(uBase);
452#undef ADJCURSOR
453}
454#endif /* LOG_ENABLED */
455
456/** API function: See header file */
457void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
458 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
459 int fHasIndex, uint32_t idx)
460{
461 if (!LogIs6Enabled())
462 return;
463
464 char szIdx[16];
465 if (fHasIndex)
466 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
467 else
468 szIdx[0] = '\0';
469
470 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
471 {
472 char szDepiction[64];
473 size_t cchDepiction;
474 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
475 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
476 pszMember, szIdx, uOffset, uOffset + cb - 1);
477 else
478 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
479
480 /* padding */
481 if (cchDepiction < 30)
482 szDepiction[cchDepiction++] = ' ';
483 while (cchDepiction < 30)
484 szDepiction[cchDepiction++] = '.';
485 szDepiction[cchDepiction] = '\0';
486
487 RTUINT64U uValue;
488 uValue.u = 0;
489 memcpy(uValue.au8, pv, cb);
490 Log6(("%-23s: Guest %s %s %#0*RX64\n",
491 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
492 }
493 else /* odd number or oversized access, ... log inline hex-dump style */
494 {
495 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
496 pszFunc, fWrite ? "wrote" : "read ", pszMember,
497 szIdx, uOffset, uOffset + cb, cb, pv));
498 }
499 RT_NOREF2(fWrite, pszFunc);
500}
501
502/**
503 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
504 * keep the output clean during multi-threaded activity)
505 */
506DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
507{
508
509#define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
510
511 memset(pszBuf, 0, uSize);
512 size_t len;
513 char *cp = pszBuf;
514 char *sep = (char *)"";
515
516 if (bStatus == 0) {
517 RTStrPrintf(cp, uSize, "RESET");
518 return;
519 }
520 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
521 {
522 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
523 ADJCURSOR(len);
524 }
525 if (bStatus & VIRTIO_STATUS_DRIVER)
526 {
527 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
528 ADJCURSOR(len);
529 }
530 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
531 {
532 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
533 ADJCURSOR(len);
534 }
535 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
536 {
537 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
538 ADJCURSOR(len);
539 }
540 if (bStatus & VIRTIO_STATUS_FAILED)
541 {
542 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
543 ADJCURSOR(len);
544 }
545 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
546 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
547
548#undef ADJCURSOR
549}
550
551#ifdef IN_RING3
552
553int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
554{
555 LogFunc(("%s\n", pcszName));
556 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
557 pVirtq->uVirtq = uVirtq;
558 pVirtq->uAvailIdxShadow = 0;
559 pVirtq->uUsedIdxShadow = 0;
560 pVirtq->fUsedRingEvent = false;
561 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
562 return VINF_SUCCESS;
563}
564
565/** API Fuunction: See header file */
566void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtq)
567{
568 RT_NOREF(pszArgs);
569 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
570 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
571
572 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
573// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
574
575 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
576 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
577
578 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
579 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
580
581 PVIRTQBUF pVirtqBuf = NULL;
582
583 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
584
585 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
586
587 int cSendSegs = 0, cReturnSegs = 0;
588 if (!fEmpty)
589 {
590 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, &pVirtqBuf);
591 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
592 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
593 }
594
595 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
596 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
597
598
599 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
600 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uSize);
601 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
602 if (pVirtio->fMsiSupport)
603 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsix);
604 pHlp->pfnPrintf(pHlp, "\n");
605 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
606 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
607 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
608 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
609 pHlp->pfnPrintf(pHlp, "\n");
610 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
611 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
612 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
613 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
614 pHlp->pfnPrintf(pHlp, "\n");
615 if (!fEmpty)
616 {
617 pHlp->pfnPrintf(pHlp, " desc chain:\n");
618 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
619 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
620 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
621 pHlp->pfnPrintf(pHlp, "\n");
622 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
623 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
624 if (cSendSegs)
625 {
626 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
627 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
628 }
629 pHlp->pfnPrintf(pHlp, "\n");
630 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
631 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
632 if (cReturnSegs)
633 {
634 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
635 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
636 }
637 } else
638 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
639 pHlp->pfnPrintf(pHlp, "\n");
640
641}
642
643/** API Function: See header file */
644uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
645{
646 AssertReturn(pVirtqBuf, UINT32_MAX);
647 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
648 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
649 Assert(cRefs > 1);
650 Assert(cRefs < 16);
651 return cRefs;
652}
653
654
655/** API Function: See header file */
656uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
657{
658 if (!pVirtqBuf)
659 return 0;
660 AssertReturn(pVirtqBuf, 0);
661 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
662 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
663 Assert(cRefs < 16);
664 if (cRefs == 0)
665 {
666 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
667 RTMemFree(pVirtqBuf);
668 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
669 }
670 return cRefs;
671}
672
673/** API Function: See header file */
674void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
675{
676 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
677}
678
679/** API Function: See header file */
680void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
681{
682
683 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
684 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
685
686 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
687 {
688 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
689
690 if (fEnable)
691 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
692 else
693 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
694
695 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
696 }
697}
698
699/** API function: See Header file */
700void virtioCoreResetAll(PVIRTIOCORE pVirtio)
701{
702 LogFunc(("\n"));
703 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
704 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
705 {
706 pVirtio->fGenUpdatePending = true;
707 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
708 }
709}
710
711/** API function: See Header file */
712int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
713 PPVIRTQBUF ppVirtqBuf)
714{
715 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, ppVirtqBuf, false);
716}
717
718/** API function: See Header file */
719int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
720{
721 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
722 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
723
724 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
725 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
726
727 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
728 return VERR_NOT_AVAILABLE;
729
730 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
731 pVirtq->uAvailIdxShadow++;
732
733 return VINF_SUCCESS;
734}
735
736
737/** API Function: See header file */
738int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
739 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
740{
741 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
742 *ppVirtqBuf = NULL;
743
744 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
745 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
746
747 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
748
749 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
750 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
751
752 uint16_t uDescIdx = uHeadIdx;
753
754 Log6Func(("%s DESC CHAIN: (head) desc_idx=%u\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
755
756 /*
757 * Allocate and initialize the descriptor chain structure.
758 */
759 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
760 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
761 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
762 pVirtqBuf->cRefs = 1;
763 pVirtqBuf->uHeadIdx = uHeadIdx;
764 pVirtqBuf->uVirtq = uVirtq;
765 *ppVirtqBuf = pVirtqBuf;
766
767 /*
768 * Gather segments.
769 */
770 VIRTQ_DESC_T desc;
771
772 uint32_t cbIn = 0;
773 uint32_t cbOut = 0;
774 uint32_t cSegsIn = 0;
775 uint32_t cSegsOut = 0;
776
777 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
778 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
779
780 do
781 {
782 PVIRTIOSGSEG pSeg;
783
784 /*
785 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
786 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
787 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
788 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
789 */
790 if (cSegsIn + cSegsOut >= VIRTQ_MAX_ENTRIES)
791 {
792 static volatile uint32_t s_cMessages = 0;
793 static volatile uint32_t s_cThreshold = 1;
794 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
795 {
796 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
797 if (ASMAtomicReadU32(&s_cMessages) != 1)
798 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
799 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
800 }
801 break;
802 }
803 RT_UNTRUSTED_VALIDATED_FENCE();
804
805 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
806
807 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
808 {
809 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
810 cbIn += desc.cb;
811 pSeg = &paSegsIn[cSegsIn++];
812 }
813 else
814 {
815 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
816 cbOut += desc.cb;
817 pSeg = &paSegsOut[cSegsOut++];
818#ifdef DEEP_DEBUG
819 if (LogIs11Enabled())
820 {
821 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
822 Log(("\n"));
823 }
824#endif
825 }
826
827 pSeg->GCPhys = desc.GCPhysBuf;
828 pSeg->cbSeg = desc.cb;
829
830 uDescIdx = desc.uDescIdxNext;
831 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
832
833 /*
834 * Add segments to the descriptor chain structure.
835 */
836 if (cSegsIn)
837 {
838 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
839 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
840 pVirtqBuf->cbPhysReturn = cbIn;
841 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
842 }
843
844 if (cSegsOut)
845 {
846 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
847 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
848 pVirtqBuf->cbPhysSend = cbOut;
849 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
850 }
851
852 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
853 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
854 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
855
856 return VINF_SUCCESS;
857}
858
859/** API function: See Header file */
860int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
861 PPVIRTQBUF ppVirtqBuf, bool fRemove)
862{
863 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
864 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
865
866 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
867 return VERR_NOT_AVAILABLE;
868
869 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
870
871 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
872 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
873
874 if (fRemove)
875 pVirtq->uAvailIdxShadow++;
876
877 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, ppVirtqBuf);
878 return rc;
879}
880
881/** API function: See Header file */
882int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
883 PVIRTQBUF pVirtqBuf, bool fFence)
884{
885 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
886 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
887
888 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
889
890 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
891 Assert(pVirtqBuf->cRefs > 0);
892
893 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
894
895 Log6Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
896 VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
897
898 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
899
900 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
901
902 if (pSgVirtReturn)
903 {
904 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
905 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
906 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
907 virtioCoreGCPhysChainReset(pSgPhysReturn);
908 while (cbRemain)
909 {
910 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
911 Assert(cbCopy > 0);
912 PDMDevHlpPhysWrite(pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
913 RTSgBufAdvance(pSgVirtReturn, cbCopy);
914 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
915 cbRemain -= cbCopy;
916 }
917
918 if (fFence)
919 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
920
921 Assert(!(cbCopy >> 32));
922 }
923
924 /* If this write-ahead crosses threshold where the driver wants to get an event flag it */
925 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
926 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
927 pVirtq->fUsedRingEvent = true;
928
929 /*
930 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
931 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */
932 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
933
934 if (pSgVirtReturn)
935 Log6Func((".... Copied %zu bytes in %d segs to %u byte buffer, residual=%zu\n",
936 cbTotal - cbRemain, pSgVirtReturn->cSegs, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
937
938 Log6Func(("Write ahead used_idx=%u, %s used_idx=%u\n",
939 pVirtq->uUsedIdxShadow, VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
940
941 return VINF_SUCCESS;
942}
943
944
945#endif /* IN_RING3 */
946
947/** API function: See Header file */
948int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
949{
950 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
951 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
952
953 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
954 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
955
956 Log6Func(("Updating %s used_idx to %u\n", pVirtq->szName, pVirtq->uUsedIdxShadow));
957
958 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
959 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
960
961 return VINF_SUCCESS;
962}
963
964/**
965 * This is called from the MMIO callback code when the guest does an MMIO access to the
966 * mapped queue notification capability area corresponding to a particular queue, to notify
967 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
968 *
969 * @param pDevIns The device instance.
970 * @param pVirtio Pointer to the shared virtio state.
971 * @param uVirtq Virtq to check for guest interrupt handling preference
972 * @param uNotifyIdx Notification index
973 */
974static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
975{
976 PVIRTIOCORECC pVirtioCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
977
978 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match.
979 * Disregarding this notification may cause throughput to stop, however there's no way to know
980 * which was queue was intended for wake-up if the two parameters disagree. */
981
982 AssertMsg(uNotifyIdx == uVirtq,
983 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
984 uVirtq, uNotifyIdx));
985 RT_NOREF(uNotifyIdx);
986
987 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
988 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
989
990 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,
991 virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq)));
992
993 /* Inform client */
994 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
995 RT_NOREF2(pVirtio, pVirtq);
996}
997
998/**
999 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1000 * the specified virtq, depending on the interrupt configuration of the device
1001 * and depending on negotiated and realtime constraints flagged by the guest driver.
1002 *
1003 * See VirtIO 1.0 specification (section 2.4.7).
1004 *
1005 * @param pDevIns The device instance.
1006 * @param pVirtio Pointer to the shared virtio state.
1007 * @param uVirtq Virtq to check for guest interrupt handling preference
1008 */
1009static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1010{
1011 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1012 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1013
1014 if (!IS_DRIVER_OK(pVirtio))
1015 {
1016 LogFunc(("Guest driver not in ready state.\n"));
1017 return;
1018 }
1019
1020 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1021 {
1022 if (pVirtq->fUsedRingEvent)
1023 {
1024#ifdef IN_RING3
1025 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1026 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1027#endif
1028 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix);
1029 pVirtq->fUsedRingEvent = false;
1030 return;
1031 }
1032#ifdef IN_RING3
1033 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1034 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1035#endif
1036 }
1037 else
1038 {
1039 /** If guest driver hasn't suppressed interrupts, interrupt */
1040 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1041 {
1042 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix);
1043 return;
1044 }
1045 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1046 }
1047}
1048
1049/**
1050 * Raise interrupt or MSI-X
1051 *
1052 * @param pDevIns The device instance.
1053 * @param pVirtio Pointer to the shared virtio state.
1054 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1055 * @param uVec MSI-X vector, if enabled
1056 */
1057static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixtor)
1058{
1059 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1060 Log6Func(("reason: buffer added to 'used' ring.\n"));
1061 else
1062 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1063 Log6Func(("reason: device config change\n"));
1064
1065 if (!pVirtio->fMsiSupport)
1066 {
1067 pVirtio->uISR |= uCause;
1068 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1069 }
1070 else if (uMsixtor != VIRTIO_MSI_NO_VECTOR)
1071 PDMDevHlpPCISetIrq(pDevIns, uMsixtor, 1);
1072 return VINF_SUCCESS;
1073}
1074
1075/**
1076 * Lower interrupt (Called when guest reads ISR and when resetting)
1077 *
1078 * @param pDevIns The device instance.
1079 */
1080static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixtor)
1081{
1082 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1083 if (!pVirtio->fMsiSupport)
1084 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1085 else if (uMsixtor != VIRTIO_MSI_NO_VECTOR)
1086 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1087}
1088
1089#ifdef IN_RING3
1090static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1091{
1092 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1093 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1094
1095 pVirtq->uAvailIdxShadow = 0;
1096 pVirtq->uUsedIdxShadow = 0;
1097 pVirtq->uEnable = false;
1098 pVirtq->uSize = VIRTQ_MAX_ENTRIES;
1099 pVirtq->uNotifyOffset = uVirtq;
1100 pVirtq->uMsix = uVirtq + 2;
1101 pVirtq->fUsedRingEvent = false;
1102
1103 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1104 pVirtq->uMsix = VIRTIO_MSI_NO_VECTOR;
1105
1106 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsix);
1107}
1108
1109static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1110{
1111 Log2Func(("\n"));
1112 pVirtio->uDeviceFeaturesSelect = 0;
1113 pVirtio->uDriverFeaturesSelect = 0;
1114 pVirtio->uConfigGeneration = 0;
1115 pVirtio->fDeviceStatus = 0;
1116 pVirtio->uISR = 0;
1117
1118 if (!pVirtio->fMsiSupport)
1119 virtioLowerInterrupt(pDevIns, 0);
1120 else
1121 {
1122 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1123 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1124 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsix);
1125 }
1126
1127 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1128 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1129
1130 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1131 virtioResetVirtq(pVirtio, uVirtq);
1132}
1133
1134/**
1135 * Invoked by this implementation when guest driver resets the device.
1136 * The driver itself will not until the device has read the status change.
1137 */
1138static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1139{
1140 LogFunc(("Guest reset the device\n"));
1141
1142 /* Let the client know */
1143 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0);
1144 virtioResetDevice(pDevIns, pVirtio);
1145}
1146#endif /* IN_RING3 */
1147
1148/**
1149 * Handle accesses to Common Configuration capability
1150 *
1151 * @returns VBox status code
1152 *
1153 * @param pDevIns The device instance.
1154 * @param pVirtio Pointer to the shared virtio state.
1155 * @param pVirtioCC Pointer to the current context virtio state.
1156 * @param fWrite Set if write access, clear if read access.
1157 * @param uOffsetOfAccess The common configuration capability offset.
1158 * @param cb Number of bytes to read or write
1159 * @param pv Pointer to location to write to or read from
1160 */
1161static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1162 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1163{
1164 uint16_t uVirtq = pVirtio->uVirtqSelect;
1165 int rc = VINF_SUCCESS;
1166 uint64_t val;
1167 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1168 {
1169 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1170 {
1171 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1172 * yet the linux driver attempts to write/read it back twice */
1173 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1174 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1175 return VINF_IOM_MMIO_UNUSED_00;
1176 }
1177 else /* Guest READ pCommonCfg->uDeviceFeatures */
1178 {
1179 switch (pVirtio->uDeviceFeaturesSelect)
1180 {
1181 case 0:
1182 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1183 memcpy(pv, &val, cb);
1184 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1185 break;
1186 case 1:
1187 val = pVirtio->uDeviceFeatures >> 32;
1188 memcpy(pv, &val, cb);
1189 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1190 break;
1191 default:
1192 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1193 pVirtio->uDeviceFeaturesSelect));
1194 return VINF_IOM_MMIO_UNUSED_00;
1195 }
1196 }
1197 }
1198 else
1199 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1200 {
1201 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1202 {
1203 switch (pVirtio->uDriverFeaturesSelect)
1204 {
1205 case 0:
1206 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1207 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1208 break;
1209 case 1:
1210 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1211 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1212 break;
1213 default:
1214 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1215 pVirtio->uDriverFeaturesSelect));
1216 return VINF_SUCCESS;
1217 }
1218 }
1219 /* Guest READ pCommonCfg->udriverFeatures */
1220 {
1221 switch (pVirtio->uDriverFeaturesSelect)
1222 {
1223 case 0:
1224 val = pVirtio->uDriverFeatures & 0xffffffff;
1225 memcpy(pv, &val, cb);
1226 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1227 break;
1228 case 1:
1229 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1230 memcpy(pv, &val, cb);
1231 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1232 break;
1233 default:
1234 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1235 pVirtio->uDriverFeaturesSelect));
1236 return VINF_IOM_MMIO_UNUSED_00;
1237 }
1238 }
1239 }
1240 else
1241 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1242 {
1243 if (fWrite)
1244 {
1245 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1246 return VINF_SUCCESS;
1247 }
1248 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1249 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1250 }
1251 else
1252 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1253 {
1254 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1255 {
1256 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1257 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1258
1259 if (LogIs7Enabled())
1260 {
1261 char szOut[80] = { 0 };
1262 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1263 LogFunc(("Guest wrote fDeviceStatus ................ (%s)\n", szOut));
1264 }
1265 bool const fStatusChanged =
1266 (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) != (pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1267
1268 if (fDeviceReset || fStatusChanged)
1269 {
1270#ifdef IN_RING0
1271 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1272 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1273 Log6Func(("RING0 => RING3 (demote)\n"));
1274 return VINF_IOM_R3_MMIO_WRITE;
1275#endif
1276 }
1277
1278#ifdef IN_RING3
1279 /*
1280 * Notify client only if status actually changed from last time and when we're reset.
1281 */
1282 if (fDeviceReset)
1283 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1284
1285 if (fStatusChanged)
1286 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1287#endif
1288 /*
1289 * Save the current status for the next write so we can see what changed.
1290 */
1291 pVirtio->uPrevDeviceStatus = pVirtio->fDeviceStatus;
1292 }
1293 else /* Guest READ pCommonCfg->fDeviceStatus */
1294 {
1295 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1296
1297 if (LogIs7Enabled())
1298 {
1299 char szOut[80] = { 0 };
1300 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1301 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1302 }
1303 }
1304 }
1305 else
1306 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1307 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1308 else
1309 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1310 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1311 else
1312 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1313 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1314 else
1315 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1316 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1317 else
1318 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1319 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1320 else
1321 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1322 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1323 else
1324 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1325 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1326 else
1327 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1328 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1329 else
1330 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1331 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1332 else
1333 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1334 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1335 else
1336 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1337 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1338 else
1339 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsix, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1340 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsix, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1341 else
1342 {
1343 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1344 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1345 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1346 }
1347
1348#ifndef IN_RING3
1349 RT_NOREF(pDevIns, pVirtioCC);
1350#endif
1351 return rc;
1352}
1353
1354/**
1355 * @callback_method_impl{FNIOMMMIONEWREAD,
1356 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1357 *
1358 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1359 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1360 * of 1, 2 or 4 bytes, only.
1361 *
1362 */
1363static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1364{
1365 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1366 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1367 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1368 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1369
1370 uint32_t uOffset;
1371 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1372 {
1373#ifdef IN_RING3
1374 /*
1375 * Callback to client to manage device-specific configuration.
1376 */
1377 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1378
1379 /*
1380 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1381 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1382 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1383 */
1384 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1385 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1386 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1387
1388 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1389
1390 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1391 {
1392 ++pVirtio->uConfigGeneration;
1393 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1394 pVirtio->uConfigGeneration,
1395 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1396 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1397 pVirtio->fGenUpdatePending = false;
1398 }
1399
1400 virtioLowerInterrupt(pDevIns, 0);
1401 return rcStrict;
1402#else
1403 return VINF_IOM_R3_MMIO_READ;
1404#endif
1405 }
1406
1407 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1408 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1409
1410 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1411 {
1412 *(uint8_t *)pv = pVirtio->uISR;
1413 Log6Func(("Read and clear ISR\n"));
1414 pVirtio->uISR = 0; /* VirtIO specification requires reads of ISR to clear it */
1415 virtioLowerInterrupt(pDevIns, 0);
1416 return VINF_SUCCESS;
1417 }
1418
1419 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1420 return VINF_IOM_MMIO_UNUSED_00;
1421}
1422
1423/**
1424 * @callback_method_impl{FNIOMMMIONEWREAD,
1425 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1426 *
1427 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1428 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1429 * of 1, 2 or 4 bytes, only.
1430 */
1431static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1432{
1433 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1434 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1435
1436 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1437
1438 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1439 uint32_t uOffset;
1440 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1441 {
1442#ifdef IN_RING3
1443 /*
1444 * Foreward this MMIO write access for client to deal with.
1445 */
1446 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1447#else
1448 return VINF_IOM_R3_MMIO_WRITE;
1449#endif
1450 }
1451
1452 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1453 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1454
1455 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1456 {
1457 pVirtio->uISR = *(uint8_t *)pv;
1458 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1459 pVirtio->uISR & 0xff,
1460 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1461 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1462 return VINF_SUCCESS;
1463 }
1464
1465 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1466 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1467 {
1468 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1469 return VINF_SUCCESS;
1470 }
1471
1472 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1473 return VINF_SUCCESS;
1474}
1475
1476#ifdef IN_RING3
1477
1478/**
1479 * @callback_method_impl{FNPCICONFIGREAD}
1480 */
1481static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1482 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1483{
1484 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1485 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1486 RT_NOREF(pPciDev);
1487
1488 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
1489 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
1490 if (uAddress == pVirtio->uPciCfgDataOff)
1491 {
1492 /*
1493 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1494 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1495 * (the virtio_pci_cfg_cap capability), and access data items.
1496 */
1497 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1498 uint32_t uLength = pPciCap->uLength;
1499
1500 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1501 || cb != uLength
1502 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1503 {
1504 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1505 *pu32Value = UINT32_MAX;
1506 return VINF_SUCCESS;
1507 }
1508
1509 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
1510 Log7Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d -> %Rrc\n",
1511 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1512 return rcStrict;
1513 }
1514 return VINF_PDM_PCI_DO_DEFAULT;
1515}
1516
1517/**
1518 * @callback_method_impl{FNPCICONFIGWRITE}
1519 */
1520static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1521 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1522{
1523 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1524 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1525 RT_NOREF(pPciDev);
1526
1527 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
1528 if (uAddress == pVirtio->uPciCfgDataOff)
1529 {
1530 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1531 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1532 * (the virtio_pci_cfg_cap capability), and access data items. */
1533
1534 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1535 uint32_t uLength = pPciCap->uLength;
1536
1537 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1538 || cb != uLength
1539 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1540 {
1541 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1542 return VINF_SUCCESS;
1543 }
1544
1545 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
1546 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1547 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1548 return rcStrict;
1549 }
1550 return VINF_PDM_PCI_DO_DEFAULT;
1551}
1552
1553
1554/*********************************************************************************************************************************
1555* Saved state. *
1556*********************************************************************************************************************************/
1557
1558/**
1559 * Called from the FNSSMDEVSAVEEXEC function of the device.
1560 *
1561 * @param pVirtio Pointer to the shared virtio state.
1562 * @param pHlp The ring-3 device helpers.
1563 * @param pSSM The saved state handle.
1564 * @returns VBox status code.
1565 */
1566int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1567{
1568 LogFunc(("\n"));
1569 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1570 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1571
1572 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1573 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
1574 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
1575 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
1576 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
1577 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
1578 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
1579 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
1580 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
1581
1582 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1583 {
1584 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1585
1586 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
1587 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
1588 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
1589 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
1590 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsix);
1591 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
1592 pHlp->pfnSSMPutU16( pSSM, pVirtq->uSize);
1593 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
1594 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
1595 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
1596 AssertRCReturn(rc, rc);
1597 }
1598
1599 return VINF_SUCCESS;
1600}
1601
1602/**
1603 * Called from the FNSSMDEVLOADEXEC function of the device.
1604 *
1605 * @param pVirtio Pointer to the shared virtio state.
1606 * @param pHlp The ring-3 device helpers.
1607 * @param pSSM The saved state handle.
1608 * @returns VBox status code.
1609 */
1610int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1611{
1612 LogFunc(("\n"));
1613 /*
1614 * Check the marker and (embedded) version number.
1615 */
1616 uint64_t uMarker = 0;
1617 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1618 AssertRCReturn(rc, rc);
1619 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1620 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1621 N_("Expected marker value %#RX64 found %#RX64 instead"),
1622 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1623 uint32_t uVersion = 0;
1624 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1625 AssertRCReturn(rc, rc);
1626 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1627 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1628 N_("Unsupported virtio version: %u"), uVersion);
1629 /*
1630 * Load the state.
1631 */
1632 pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
1633 pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
1634 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
1635 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
1636 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
1637 pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
1638 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
1639 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
1640 pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
1641
1642 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1643 {
1644 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1645
1646 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
1647 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
1648 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
1649 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
1650 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsix);
1651 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
1652 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uSize);
1653 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
1654 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
1655 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
1656 AssertRCReturn(rc, rc);
1657 }
1658
1659 return VINF_SUCCESS;
1660}
1661
1662
1663/*********************************************************************************************************************************
1664* Device Level *
1665*********************************************************************************************************************************/
1666
1667/**
1668 * This must be called by the client to handle VM state changes
1669 * after the client takes care of its device-specific tasks for the state change.
1670 * (i.e. Reset, suspend, power-off, resume)
1671 *
1672 * @param pDevIns The device instance.
1673 * @param pVirtio Pointer to the shared virtio state.
1674 */
1675void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
1676{
1677 LogFunc(("State changing to %s\n",
1678 virtioCoreGetStateChangeText(enmState)));
1679
1680 switch(enmState)
1681 {
1682 case kvirtIoVmStateChangedReset:
1683 virtioCoreResetAll(pVirtio);
1684 break;
1685 case kvirtIoVmStateChangedSuspend:
1686 break;
1687 case kvirtIoVmStateChangedPowerOff:
1688 break;
1689 case kvirtIoVmStateChangedResume:
1690 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1691 {
1692 if (pVirtio->aVirtqueues[uVirtq].uEnable)
1693 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
1694 }
1695 break;
1696 default:
1697 LogRelFunc(("Bad enum value"));
1698 return;
1699 }
1700}
1701
1702/**
1703 * This should be called from PDMDEVREGR3::pfnDestruct.
1704 *
1705 * @param pDevIns The device instance.
1706 * @param pVirtio Pointer to the shared virtio state.
1707 * @param pVirtioCC Pointer to the ring-3 virtio state.
1708 */
1709void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1710{
1711 if (pVirtioCC->pbPrevDevSpecificCfg)
1712 {
1713 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
1714 pVirtioCC->pbPrevDevSpecificCfg = NULL;
1715 }
1716 RT_NOREF(pDevIns, pVirtio);
1717}
1718
1719/** API Function: See header file */
1720int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
1721 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
1722{
1723 /*
1724 * The pVirtio state must be the first member of the shared device instance
1725 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
1726 */
1727 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1728 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
1729
1730 pVirtio->pDevInsR3 = pDevIns;
1731
1732 /*
1733 * Caller must initialize these.
1734 */
1735 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
1736 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
1737
1738#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */
1739# ifdef VBOX_WITH_MSI_DEVICES
1740 pVirtio->fMsiSupport = true;
1741# endif
1742#endif
1743
1744 /*
1745 * The host features offered include both device-specific features
1746 * and reserved feature bits (device independent)
1747 */
1748 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1749 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1750 | fDevSpecificFeatures;
1751
1752 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1753
1754 pVirtio->fDeviceStatus = 0;
1755 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
1756 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
1757 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
1758 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
1759
1760 /* Set PCI config registers (assume 32-bit mode) */
1761 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1762 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1763
1764 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
1765 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1766 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1767 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
1768 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
1769 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
1770 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
1771 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
1772 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
1773 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
1774
1775 /* Register PCI device */
1776 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1777 if (RT_FAILURE(rc))
1778 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1779
1780 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
1781 AssertRCReturn(rc, rc);
1782
1783
1784 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1785
1786#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
1787#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
1788 do { \
1789 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
1790 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
1791 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
1792 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
1793 } while (0)
1794
1795 PVIRTIO_PCI_CAP_T pCfg;
1796 uint32_t cbRegion = 0;
1797
1798 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1799 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
1800 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1801 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1802 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1803 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1804 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1805 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
1806 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1807 cbRegion += pCfg->uLength;
1808 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
1809 pVirtioCC->pCommonCfgCap = pCfg;
1810
1811 /*
1812 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
1813 * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal
1814 * value of the queue (different strategies are possible according to spec).
1815 */
1816 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1817 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1818 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1819 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1820 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1821 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1822 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
1823 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1824 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1825 cbRegion += pCfg->uLength;
1826 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
1827 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1828 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1829
1830 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1831 *
1832 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1833 * of spec shows it as a 32-bit field with upper bits 'reserved'
1834 * Will take spec's words more literally than the diagram for now.
1835 */
1836 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1837 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1838 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1839 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1840 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1841 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1842 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
1843 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1844 pCfg->uLength = sizeof(uint8_t);
1845 cbRegion += pCfg->uLength;
1846 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
1847 pVirtioCC->pIsrCap = pCfg;
1848
1849 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1850 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1851 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1852 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1853 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
1854 * initializing.
1855 */
1856 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
1857 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1858 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1859 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1860 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1861 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1862 pCfg->uBar = 0;
1863 pCfg->uOffset = 0;
1864 pCfg->uLength = 0;
1865 cbRegion += pCfg->uLength;
1866 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
1867 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1868
1869 if (pVirtioCC->pbDevSpecificCfg)
1870 {
1871 /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6).
1872 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
1873 * to inform this. */
1874 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1875 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1876 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1877 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1878 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1879 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1880 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
1881 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1882 pCfg->uLength = cbDevSpecificCfg;
1883 cbRegion += pCfg->uLength;
1884 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
1885 pVirtioCC->pDeviceCap = pCfg;
1886 }
1887 else
1888 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
1889
1890 if (pVirtio->fMsiSupport)
1891 {
1892 PDMMSIREG aMsiReg;
1893 RT_ZERO(aMsiReg);
1894 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1895 aMsiReg.iMsixNextOffset = 0;
1896 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
1897 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
1898 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1899 if (RT_FAILURE(rc))
1900 {
1901 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
1902 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
1903 pVirtio->fMsiSupport = false;
1904 }
1905 else
1906 Log2Func(("Using MSI-X for guest driver notification\n"));
1907 }
1908 else
1909 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
1910
1911 /* Set offset to first capability and enable PCI dev capabilities */
1912 PDMPciDevSetCapabilityList(pPciDev, 0x40);
1913 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
1914
1915 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s MMIO", pcszInstance);
1916 if (cbSize <= 0)
1917 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
1918
1919 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1920 * 'unknown' device-specific capability without querying the capability to figure
1921 * out size, so pad with an extra page
1922 */
1923 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE, PAGE_SIZE),
1924 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
1925 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
1926 pVirtioCC->pcszMmioName,
1927 &pVirtio->hMmioPciCap);
1928 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
1929 /*
1930 * Statistics.
1931 */
1932 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1933 "Total number of allocated descriptor chains", "DescChainsAllocated");
1934 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1935 "Total number of freed descriptor chains", "DescChainsFreed");
1936 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1937 "Total number of inbound segments", "DescChainsSegsIn");
1938 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1939 "Total number of outbound segments", "DescChainsSegsOut");
1940
1941 return VINF_SUCCESS;
1942}
1943
1944#else /* !IN_RING3 */
1945
1946/**
1947 * Sets up the core ring-0/raw-mode virtio bits.
1948 *
1949 * @returns VBox status code.
1950 * @param pDevIns The device instance.
1951 * @param pVirtio Pointer to the shared virtio state. This must be the first
1952 * member in the shared device instance data!
1953 */
1954int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1955{
1956 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1957
1958#ifdef FUTURE_OPTIMIZATION
1959 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1960 AssertRCReturn(rc, rc);
1961#endif
1962 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
1963 AssertRCReturn(rc, rc);
1964 return rc;
1965}
1966
1967#endif /* !IN_RING3 */
1968
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette