VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 91705

Last change on this file since 91705 was 91705, checked in by vboxsync, 3 years ago

fix misc Windows build box gripes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 99.4 KB
Line 
1/* $Id: VirtioCore.cpp 91705 2021-10-13 03:51:19Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2021 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
45#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
46
47
48#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
49 (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0)
50
51
52#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
53#define WAS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
54
55/**
56 * This macro returns true if the @a a_offAccess and access length (@a
57 * a_cbAccess) are within the range of the mapped capability struct described by
58 * @a a_LocCapData.
59 *
60 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
61 * @param[in] a_cbAccess Input: The access size.
62 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
63 * @param[in] a_LocCapData Input: The capability location info.
64 */
65#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
66 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
67 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
68
69
70/** Marks the start of the virtio saved state (just for sanity). */
71#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
72/** The current saved state version for the virtio core. */
73#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79
80
81/** @name virtq related flags
82 * @{ */
83#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
84#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
85#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
86
87#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
88#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
89/** @} */
90
91/**
92 * virtq related structs
93 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
94 */
95typedef struct virtq_desc
96{
97 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
98 uint32_t cb; /**< len Buffer length */
99 uint16_t fFlags; /**< flags Buffer specific flags */
100 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
101} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
102
103typedef struct virtq_avail
104{
105 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
106 uint16_t uIdx; /**< idx Index of next free ring slot */
107 RT_FLEXIBLE_ARRAY_EXTENSION
108 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
109 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
110} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
111
112typedef struct virtq_used_elem
113{
114 uint32_t uDescIdx; /**< idx Start of used desc chain */
115 uint32_t cbElem; /**< len Total len of used desc chain */
116} VIRTQ_USED_ELEM_T;
117
118typedef struct virt_used
119{
120 uint16_t fFlags; /**< flags used ring host-to-guest flags */
121 uint16_t uIdx; /**< idx Index of next ring slot */
122 RT_FLEXIBLE_ARRAY_EXTENSION
123 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
124 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
125} VIRTQ_USED_T, *PVIRTQ_USED_T;
126
127
128const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
129{
130 switch (enmState)
131 {
132 case kvirtIoVmStateChangedReset: return "VM RESET";
133 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
134 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
135 case kvirtIoVmStateChangedResume: return "VM RESUME";
136 default: return "<BAD ENUM>";
137 }
138}
139
140/* Internal Functions */
141
142static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
143static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
144
145/** @name Internal queue operations
146 * @{ */
147
148/**
149 * Accessor for virtq descriptor
150 */
151#ifdef IN_RING3
152DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
153 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
154{
155 AssertMsg(IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
156 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
157
158 virtioCoreGCPhysRead(pVirtio, pDevIns,
159 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
160 pDesc, sizeof(VIRTQ_DESC_T));
161}
162#endif
163
164/**
165 * Accessors for virtq avail ring
166 */
167#ifdef IN_RING3
168DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
169{
170 uint16_t uDescIdx;
171
172 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
173 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
174 virtioCoreGCPhysRead(pVirtio, pDevIns,
175 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
176 &uDescIdx, sizeof(uDescIdx));
177 return uDescIdx;
178}
179
180DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
181{
182 uint16_t uUsedEventIdx;
183 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
184 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
185 virtioCoreGCPhysRead(pVirtio, pDevIns,
186 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]),
187 &uUsedEventIdx, sizeof(uUsedEventIdx));
188 return uUsedEventIdx;
189}
190#endif
191
192DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
193{
194 uint16_t uIdx = 0;
195 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
196 virtioCoreGCPhysRead(pVirtio, pDevIns,
197 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
198 &uIdx, sizeof(uIdx));
199 return uIdx;
200}
201
202DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
203{
204 uint16_t fFlags = 0;
205 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
206 virtioCoreGCPhysRead(pVirtio, pDevIns,
207 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
208 &fFlags, sizeof(fFlags));
209
210 return fFlags;
211}
212
213/** @} */
214
215/** @name Accessors for virtq used ring
216 * @{
217 */
218
219#ifdef IN_RING3
220DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
221 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
222{
223 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
224 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
225 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
226 virtioCoreGCPhysWrite(pVirtio, pDevIns,
227 pVirtq->GCPhysVirtqUsed
228 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
229 &elem, sizeof(elem));
230}
231
232DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
233{
234 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
235 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
236 virtioCoreGCPhysWrite(pVirtio, pDevIns,
237 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
238 &fFlags, sizeof(fFlags));
239}
240#endif
241
242DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
243{
244 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
245 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
246 virtioCoreGCPhysWrite(pVirtio, pDevIns,
247 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
248 &uIdx, sizeof(uIdx));
249}
250
251
252#ifdef IN_RING3
253DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
254{
255 uint16_t uIdx = 0;
256 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
257 virtioCoreGCPhysRead(pVirtio, pDevIns,
258 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
259 &uIdx, sizeof(uIdx));
260 return uIdx;
261}
262
263DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
264{
265 uint16_t fFlags = 0;
266 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
267 virtioCoreGCPhysRead(pVirtio, pDevIns,
268 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
269 &fFlags, sizeof(fFlags));
270 return fFlags;
271}
272
273DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
274{
275 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
276 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
277 virtioCoreGCPhysWrite(pVirtio, pDevIns,
278 pVirtq->GCPhysVirtqUsed
279 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uQueueSize]),
280 &uAvailEventIdx, sizeof(uAvailEventIdx));
281}
282#endif
283
284DECLINLINE(uint16_t) virtioCoreVirtqAvailCnt(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
285{
286 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
287 uint16_t uIdxShadow = pVirtq->uAvailIdxShadow;
288 uint16_t uIdxDelta;
289
290 if (uIdxActual < uIdxShadow)
291 uIdxDelta = (uIdxActual + VIRTQ_SIZE) - uIdxShadow;
292 else
293 uIdxDelta = uIdxActual - uIdxShadow;
294
295 LogFunc(("%s, %u %s\n",
296 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries"));
297
298 return uIdxDelta;
299}
300/**
301 * Get count of new (e.g. pending) elements in available ring.
302 *
303 * @param pDevIns The device instance.
304 * @param pVirtio Pointer to the shared virtio state.
305 * @param uVirtq Virtq number
306 *
307 * @returns how many entries have been added to ring as a delta of the consumer's
308 * avail index and the queue's guest-side current avail index.
309 */
310uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
311{
312 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
313 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
314
315 if (!IS_DRIVER_OK(pVirtio))
316 {
317 LogRelFunc(("Driver not ready\n"));
318 return 0;
319 }
320 if (!pVirtio->fLegacyDriver && !pVirtq->uEnable)
321 {
322 LogRelFunc(("virtq: %d (%s) not enabled\n", uVirtq, VIRTQNAME(pVirtio, uVirtq)));
323 return 0;
324 }
325
326 return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq);
327}
328
329#ifdef IN_RING3
330
331/** API Function: See header file*/
332void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp)
333{
334 static struct
335 {
336 uint64_t fFeatureBit;
337 const char *pcszDesc;
338 } const s_aFeatures[] =
339 {
340 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
341 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
342 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },
343 };
344
345#define MAXLINE 80
346 /* Display as a single buf to prevent interceding log messages */
347 uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132;
348 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
349 Assert(pszBuf);
350 char *cp = pszBuf;
351 for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i)
352 {
353 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
354 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
355 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
356 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
357 }
358 if (pHlp)
359 pHlp->pfnPrintf(pHlp, "VirtIO Core Features Configuration\n\n"
360 " Offered Accepted Feature Description\n"
361 " ------- -------- ------- -----------\n"
362 "%s\n", pszBuf);
363#ifdef LOG_ENABLED
364 else
365 Log3(("VirtIO Core Features Configuration\n\n"
366 " Offered Accepted Feature Description\n"
367 " ------- -------- ------- -----------\n"
368 "%s\n", pszBuf));
369#endif
370 RTMemFree(pszBuf);
371}
372#endif
373
374#ifdef LOG_ENABLED
375
376/** API Function: See header file */
377void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
378{
379#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
380 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
381 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
382 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
383 if (pszTitle)
384 {
385 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
386 ADJCURSOR(cbPrint);
387 }
388 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
389 {
390 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
391 ADJCURSOR(cbPrint);
392 for (uint8_t col = 0; col < 16; col++)
393 {
394 uint32_t idx = row * 16 + col;
395 if (idx >= cb)
396 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
397 else
398 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
399 ADJCURSOR(cbPrint);
400 }
401 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
402 {
403 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
404 ADJCURSOR(cbPrint);
405 }
406 *pszOut++ = '\n';
407 --cbRemain;
408 }
409 Log(("%s\n", pszBuf));
410 RTMemFree(pszBuf);
411 RT_NOREF2(uBase, pv);
412#undef ADJCURSOR
413}
414
415/* API FUnction: See header file */
416void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
417{
418 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
419#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
420 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
421 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
422 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
423 if (pszTitle)
424 {
425 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
426 ADJCURSOR(cbPrint);
427 }
428 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
429 {
430 uint8_t c;
431 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
432 ADJCURSOR(cbPrint);
433 for (uint8_t col = 0; col < 16; col++)
434 {
435 uint32_t idx = row * 16 + col;
436 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
437 if (idx >= cb)
438 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
439 else
440 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
441 ADJCURSOR(cbPrint);
442 }
443 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
444 {
445 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
446 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
447 ADJCURSOR(cbPrint);
448 }
449 *pszOut++ = '\n';
450 --cbRemain;
451 }
452 Log(("%s\n", pszBuf));
453 RTMemFree(pszBuf);
454 RT_NOREF(uBase);
455#undef ADJCURSOR
456}
457#endif /* LOG_ENABLED */
458
459/** API function: See header file */
460int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio)
461{
462 Log12Func(("%s", pVirtio->fLegacyDriver ? "Legacy Guest Driver handling mode\n" : ""));
463 return pVirtio->fLegacyDriver;
464}
465
466/** API function: See header file */
467void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
468 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
469 int fHasIndex, uint32_t idx)
470{
471 if (!LogIs6Enabled())
472 return;
473
474 char szIdx[16];
475 if (fHasIndex)
476 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
477 else
478 szIdx[0] = '\0';
479
480 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
481 {
482 char szDepiction[64];
483 size_t cchDepiction;
484 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
485 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
486 pszMember, szIdx, uOffset, uOffset + cb - 1);
487 else
488 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
489
490 /* padding */
491 if (cchDepiction < 30)
492 szDepiction[cchDepiction++] = ' ';
493 while (cchDepiction < 30)
494 szDepiction[cchDepiction++] = '.';
495 szDepiction[cchDepiction] = '\0';
496
497 RTUINT64U uValue;
498 uValue.u = 0;
499 memcpy(uValue.au8, pv, cb);
500 Log6(("%-23s: Guest %s %s %#0*RX64\n",
501 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
502 }
503 else /* odd number or oversized access, ... log inline hex-dump style */
504 {
505 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
506 pszFunc, fWrite ? "wrote" : "read ", pszMember,
507 szIdx, uOffset, uOffset + cb, cb, pv));
508 }
509 RT_NOREF2(fWrite, pszFunc);
510}
511
512/**
513 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
514 * keep the output clean during multi-threaded activity)
515 */
516DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
517{
518
519#define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
520
521 memset(pszBuf, 0, uSize);
522 size_t len;
523 char *cp = pszBuf;
524 char *sep = (char *)"";
525
526 if (bStatus == 0) {
527 RTStrPrintf(cp, uSize, "RESET");
528 return;
529 }
530 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
531 {
532 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
533 ADJCURSOR(len);
534 }
535 if (bStatus & VIRTIO_STATUS_DRIVER)
536 {
537 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
538 ADJCURSOR(len);
539 }
540 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
541 {
542 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
543 ADJCURSOR(len);
544 }
545 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
546 {
547 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
548 ADJCURSOR(len);
549 }
550 if (bStatus & VIRTIO_STATUS_FAILED)
551 {
552 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
553 ADJCURSOR(len);
554 }
555 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
556 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
557
558#undef ADJCURSOR
559}
560
561#ifdef IN_RING3
562
563int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
564{
565 LogFunc(("Attaching %s to VirtIO core\n", pcszName));
566 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
567 pVirtq->uVirtq = uVirtq;
568 pVirtq->uAvailIdxShadow = 0;
569 pVirtq->uUsedIdxShadow = 0;
570 pVirtq->fUsedRingEvent = false;
571 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
572 return VINF_SUCCESS;
573}
574
575/** API Fuunction: See header file */
576void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtq)
577{
578 RT_NOREF(pszArgs);
579 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
580 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
581
582 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
583// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
584
585 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
586 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
587
588 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
589 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
590
591 PVIRTQBUF pVirtqBuf = NULL;
592
593 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
594
595 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
596
597 int cSendSegs = 0, cReturnSegs = 0;
598 if (!fEmpty)
599 {
600 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, &pVirtqBuf);
601 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
602 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
603 }
604
605 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
606 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
607
608 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
609 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uQueueSize);
610 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
611 if (pVirtio->fMsiSupport)
612 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsixVector);
613 pHlp->pfnPrintf(pHlp, "\n");
614 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
615 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
616 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
617 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
618 pHlp->pfnPrintf(pHlp, "\n");
619 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
620 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
621 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
622 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
623 pHlp->pfnPrintf(pHlp, "\n");
624 if (!fEmpty)
625 {
626 pHlp->pfnPrintf(pHlp, " desc chain:\n");
627 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
628 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
629 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
630 pHlp->pfnPrintf(pHlp, "\n");
631 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
632 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
633 if (cSendSegs)
634 {
635 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
636 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
637 }
638 pHlp->pfnPrintf(pHlp, "\n");
639 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
640 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
641 if (cReturnSegs)
642 {
643 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
644 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
645 }
646 } else
647 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
648 pHlp->pfnPrintf(pHlp, "\n");
649
650}
651
652/** API Function: See header file */
653uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
654{
655 AssertReturn(pVirtqBuf, UINT32_MAX);
656 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
657 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
658 Assert(cRefs > 1);
659 Assert(cRefs < 16);
660 return cRefs;
661}
662
663
664/** API Function: See header file */
665uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
666{
667 if (!pVirtqBuf)
668 return 0;
669 AssertReturn(pVirtqBuf, 0);
670 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
671 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
672 Assert(cRefs < 16);
673 if (cRefs == 0)
674 {
675 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
676 RTMemFree(pVirtqBuf);
677#ifdef VBOX_WITH_STATISTICS
678 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
679#endif
680 }
681 RT_NOREF(pVirtio);
682 return cRefs;
683}
684
685/** API Function: See header file */
686void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
687{
688 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
689}
690
691/** API Function: See header file */
692void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
693{
694
695 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
696 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
697
698 if (IS_DRIVER_OK(pVirtio))
699 {
700 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
701
702 if (fEnable)
703 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
704 else
705 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
706
707 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
708 }
709}
710
711/** API function: See Header file */
712void virtioCoreResetAll(PVIRTIOCORE pVirtio)
713{
714 LogFunc(("\n"));
715 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
716 if (IS_DRIVER_OK(pVirtio))
717 {
718 if (!pVirtio->fLegacyDriver)
719 pVirtio->fGenUpdatePending = true;
720 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
721 }
722}
723
724/** API function: See Header file */
725int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
726 PPVIRTQBUF ppVirtqBuf)
727{
728 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, ppVirtqBuf, false);
729}
730
731/** API function: See Header file */
732int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
733{
734 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
735 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
736
737 if (!pVirtio->fLegacyDriver)
738 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
739 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
740
741 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
742 return VERR_NOT_AVAILABLE;
743
744 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
745 pVirtq->uAvailIdxShadow++;
746
747 return VINF_SUCCESS;
748}
749
750
751/** API Function: See header file */
752int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
753 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
754{
755 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
756 *ppVirtqBuf = NULL;
757
758 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
759 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
760
761 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
762
763 if (!pVirtio->fLegacyDriver)
764 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
765 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
766
767 uint16_t uDescIdx = uHeadIdx;
768
769 Log6Func(("%s DESC CHAIN: (head idx = %u)\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
770
771 /*
772 * Allocate and initialize the descriptor chain structure.
773 */
774 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
775 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
776 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
777 pVirtqBuf->cRefs = 1;
778 pVirtqBuf->uHeadIdx = uHeadIdx;
779 pVirtqBuf->uVirtq = uVirtq;
780 *ppVirtqBuf = pVirtqBuf;
781
782 /*
783 * Gather segments.
784 */
785 VIRTQ_DESC_T desc;
786
787 uint32_t cbIn = 0;
788 uint32_t cbOut = 0;
789 uint32_t cSegsIn = 0;
790 uint32_t cSegsOut = 0;
791
792 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
793 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
794
795 do
796 {
797 PVIRTIOSGSEG pSeg;
798
799 /*
800 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
801 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
802 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
803 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
804 */
805 if (cSegsIn + cSegsOut >= VIRTQ_SIZE)
806 {
807 static volatile uint32_t s_cMessages = 0;
808 static volatile uint32_t s_cThreshold = 1;
809 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
810 {
811 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
812 if (ASMAtomicReadU32(&s_cMessages) != 1)
813 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
814 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
815 }
816 break;
817 }
818 RT_UNTRUSTED_VALIDATED_FENCE();
819
820 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
821
822 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
823 {
824 Log6Func(("%s IN idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
825 cbIn += desc.cb;
826 pSeg = &paSegsIn[cSegsIn++];
827 }
828 else
829 {
830 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
831 cbOut += desc.cb;
832 pSeg = &paSegsOut[cSegsOut++];
833#ifdef DEEP_DEBUG
834 if (LogIs11Enabled())
835 {
836 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
837 Log(("\n"));
838 }
839#endif
840 }
841
842 pSeg->GCPhys = desc.GCPhysBuf;
843 pSeg->cbSeg = desc.cb;
844
845 uDescIdx = desc.uDescIdxNext;
846 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
847
848 /*
849 * Add segments to the descriptor chain structure.
850 */
851 if (cSegsIn)
852 {
853 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
854 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
855 pVirtqBuf->cbPhysReturn = cbIn;
856#ifdef VBOX_WITH_STATISTICS
857 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
858#endif
859 }
860
861 if (cSegsOut)
862 {
863 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
864 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
865 pVirtqBuf->cbPhysSend = cbOut;
866#ifdef VBOX_WITH_STATISTICS
867 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
868#endif
869 }
870
871#ifdef VBOX_WITH_STATISTICS
872 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
873#endif
874 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
875 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
876
877 return VINF_SUCCESS;
878}
879
880/** API function: See Header file */
881int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
882 PPVIRTQBUF ppVirtqBuf, bool fRemove)
883{
884 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
885 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
886
887 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
888 return VERR_NOT_AVAILABLE;
889
890 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
891
892 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
893 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
894
895 if (fRemove)
896 pVirtq->uAvailIdxShadow++;
897
898 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, ppVirtqBuf);
899 return rc;
900}
901
902/** API function: See Header file */
903int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
904 PVIRTQBUF pVirtqBuf, bool fFence)
905{
906 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
907 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
908
909 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
910
911 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
912 Assert(pVirtqBuf->cRefs > 0);
913
914 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
915
916 Log6Func((" Copying device data to %s (%s guest), desc chain idx %d\n",
917 VIRTQNAME(pVirtio, uVirtq), pVirtio->fLegacyDriver ? "legacy" : "modern", virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
918
919 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
920
921 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
922
923 if (pSgVirtReturn)
924 {
925 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
926 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
927 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
928 virtioCoreGCPhysChainReset(pSgPhysReturn);
929 while (cbRemain)
930 {
931 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
932 Assert(cbCopy > 0);
933 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
934 RTSgBufAdvance(pSgVirtReturn, cbCopy);
935 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
936 cbRemain -= cbCopy;
937 }
938
939 if (fFence)
940 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
941
942 Assert(!(cbCopy >> 32));
943 }
944
945 /* If this write-ahead crosses threshold where the driver wants to get an event, flag it */
946 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
947 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
948 pVirtq->fUsedRingEvent = true;
949
950 /*
951 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
952 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */
953 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
954
955 if (pSgVirtReturn)
956 Log6Func((" ... %d segs, %zu bytes, copied to %u byte buf. residual: %zu bytes\n",
957 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
958
959 Log6Func((" %s used_idx=%u\n", VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
960
961 return VINF_SUCCESS;
962}
963
964
965#endif /* IN_RING3 */
966
967/** API function: See Header file */
968int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
969{
970 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
971 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
972
973 if (!pVirtio->fLegacyDriver)
974 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
975 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
976
977 Log6Func((" %s ++used_idx=%u\n", pVirtq->szName, pVirtq->uUsedIdxShadow));
978
979 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
980 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
981
982 return VINF_SUCCESS;
983}
984
985/**
986 * This is called from the MMIO callback code when the guest does an MMIO access to the
987 * mapped queue notification capability area corresponding to a particular queue, to notify
988 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
989 *
990 * @param pDevIns The device instance.
991 * @param pVirtio Pointer to the shared virtio state.
992 * @param uVirtq Virtq to check for guest interrupt handling preference
993 * @param uNotifyIdx Notification index
994 */
995static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
996{
997 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
998
999 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match.
1000 * Disregarding this notification may cause throughput to stop, however there's no way to know
1001 * which was queue was intended for wake-up if the two parameters disagree. */
1002
1003 AssertMsg(uNotifyIdx == uVirtq,
1004 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
1005 uVirtq, uNotifyIdx));
1006 RT_NOREF(uNotifyIdx);
1007
1008 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1009 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1010
1011 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,
1012 virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq)));
1013
1014 /* Inform client */
1015 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
1016 RT_NOREF2(pVirtio, pVirtq);
1017}
1018
1019/**
1020 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1021 * the specified virtq, depending on the interrupt configuration of the device
1022 * and depending on negotiated and realtime constraints flagged by the guest driver.
1023 *
1024 * See VirtIO 1.0 specification (section 2.4.7).
1025 *
1026 * @param pDevIns The device instance.
1027 * @param pVirtio Pointer to the shared virtio state.
1028 * @param uVirtq Virtq to check for guest interrupt handling preference
1029 */
1030static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1031{
1032 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1033 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1034
1035 if (!IS_DRIVER_OK(pVirtio))
1036 {
1037 LogFunc(("Guest driver not in ready state.\n"));
1038 return;
1039 }
1040
1041 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1042 {
1043 if (pVirtq->fUsedRingEvent)
1044 {
1045#ifdef IN_RING3
1046 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1047 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1048#endif
1049 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1050 pVirtq->fUsedRingEvent = false;
1051 return;
1052 }
1053#ifdef IN_RING3
1054 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1055 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1056#endif
1057 }
1058 else
1059 {
1060 /** If guest driver hasn't suppressed interrupts, interrupt */
1061 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1062 {
1063 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1064 return;
1065 }
1066 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1067 }
1068}
1069
1070/**
1071 * Raise interrupt or MSI-X
1072 *
1073 * @param pDevIns The device instance.
1074 * @param pVirtio Pointer to the shared virtio state.
1075 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1076 * @param uVec MSI-X vector, if enabled
1077 */
1078static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
1079{
1080 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1081 Log6Func(("reason: buffer added to 'used' ring.\n"));
1082 else
1083 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1084 Log6Func(("reason: device config change\n"));
1085
1086 if (!pVirtio->fMsiSupport)
1087 {
1088 pVirtio->uISR |= uCause;
1089 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1090 }
1091 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1092 PDMDevHlpPCISetIrq(pDevIns, uMsixVector, 1);
1093 return VINF_SUCCESS;
1094}
1095
1096/**
1097 * Lower interrupt (Called when guest reads ISR and when resetting)
1098 *
1099 * @param pDevIns The device instance.
1100 */
1101static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixVector)
1102{
1103 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1104 if (!pVirtio->fMsiSupport)
1105 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1106 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1107 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1108}
1109
1110#ifdef IN_RING3
1111static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1112{
1113 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1114 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1115
1116 pVirtq->uQueueSize = VIRTQ_SIZE;
1117 pVirtq->uEnable = false;
1118 pVirtq->uNotifyOffset = uVirtq;
1119 pVirtq->fUsedRingEvent = false;
1120 pVirtq->uAvailIdxShadow = 0;
1121 pVirtq->uUsedIdxShadow = 0;
1122 pVirtq->uMsixVector = uVirtq + 2;
1123
1124 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1125 pVirtq->uMsixVector = VIRTIO_MSI_NO_VECTOR;
1126
1127 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsixVector);
1128}
1129
1130static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1131{
1132 LogFunc(("Resetting device VirtIO state\n"));
1133 pVirtio->fLegacyDriver = 1; /* Assume this. Cleared if VIRTIO_F_VERSION_1 feature ack'd */
1134 pVirtio->uDeviceFeaturesSelect = 0;
1135 pVirtio->uDriverFeaturesSelect = 0;
1136 pVirtio->uConfigGeneration = 0;
1137 pVirtio->fDeviceStatus = 0;
1138 pVirtio->uISR = 0;
1139
1140 if (!pVirtio->fMsiSupport)
1141 virtioLowerInterrupt(pDevIns, 0);
1142 else
1143 {
1144 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1145 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1146 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsixVector);
1147 }
1148
1149 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1150 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1151
1152 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1153 virtioResetVirtq(pVirtio, uVirtq);
1154}
1155
1156/**
1157 * Invoked by this implementation when guest driver resets the device.
1158 * The driver itself will not until the device has read the status change.
1159 */
1160static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1161{
1162 Log(("%-23s: Guest reset the device\n", __FUNCTION__));
1163
1164 /* Let the client know */
1165 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0 /* fDriverOk */);
1166 virtioResetDevice(pDevIns, pVirtio);
1167}
1168#endif /* IN_RING3 */
1169
1170/**
1171 * Handle accesses to Common Configuration capability
1172 *
1173 * @returns VBox status code
1174 *
1175 * @param pDevIns The device instance.
1176 * @param pVirtio Pointer to the shared virtio state.
1177 * @param pVirtioCC Pointer to the current context virtio state.
1178 * @param fWrite Set if write access, clear if read access.
1179 * @param uOffsetOfAccess The common configuration capability offset.
1180 * @param cb Number of bytes to read or write
1181 * @param pv Pointer to location to write to or read from
1182 */
1183static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1184 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1185{
1186 uint16_t uVirtq = pVirtio->uVirtqSelect;
1187 int rc = VINF_SUCCESS;
1188 uint64_t val;
1189 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1190 {
1191 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1192 {
1193 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1194 * yet the linux driver attempts to write/read it back twice */
1195 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1196 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1197 return VINF_IOM_MMIO_UNUSED_00;
1198 }
1199 else /* Guest READ pCommonCfg->uDeviceFeatures */
1200 {
1201 switch (pVirtio->uDeviceFeaturesSelect)
1202 {
1203 case 0:
1204 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1205 memcpy(pv, &val, cb);
1206 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1207 break;
1208 case 1:
1209 val = pVirtio->uDeviceFeatures >> 32;
1210 memcpy(pv, &val, cb);
1211 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1212 break;
1213 default:
1214 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1215 pVirtio->uDeviceFeaturesSelect));
1216 return VINF_IOM_MMIO_UNUSED_00;
1217 }
1218 }
1219 }
1220 else
1221 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1222 {
1223 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1224 {
1225 switch (pVirtio->uDriverFeaturesSelect)
1226 {
1227 case 0:
1228 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1229 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1230 break;
1231 case 1:
1232 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1233 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1)
1234 pVirtio->fLegacyDriver = 0;
1235 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1236 break;
1237 default:
1238 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1239 pVirtio->uDriverFeaturesSelect));
1240 return VINF_SUCCESS;
1241 }
1242 }
1243 else /* Guest READ pCommonCfg->udriverFeatures */
1244 {
1245 switch (pVirtio->uDriverFeaturesSelect)
1246 {
1247 case 0:
1248 val = pVirtio->uDriverFeatures & 0xffffffff;
1249 memcpy(pv, &val, cb);
1250 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1251 break;
1252 case 1:
1253 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1254 memcpy(pv, &val, cb);
1255 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1256 break;
1257 default:
1258 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1259 pVirtio->uDriverFeaturesSelect));
1260 return VINF_IOM_MMIO_UNUSED_00;
1261 }
1262 }
1263 }
1264 else
1265 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1266 {
1267 if (fWrite)
1268 {
1269 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1270 return VINF_SUCCESS;
1271 }
1272 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1273 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1274 }
1275 else
1276 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1277 {
1278 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1279 {
1280 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1281 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1282
1283 if (LogIs7Enabled())
1284 {
1285 char szOut[80] = { 0 };
1286 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1287 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1288 }
1289 bool const fStatusChanged = IS_DRIVER_OK(pVirtio) != WAS_DRIVER_OK(pVirtio);
1290
1291 if (fDeviceReset || fStatusChanged)
1292 {
1293#ifdef IN_RING0
1294 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1295 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1296 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1297 return VINF_IOM_R3_MMIO_WRITE;
1298#endif
1299 }
1300
1301#ifdef IN_RING3
1302 /*
1303 * Notify client only if status actually changed from last time and when we're reset.
1304 */
1305 if (fDeviceReset)
1306 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1307
1308 if (fStatusChanged)
1309 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, IS_DRIVER_OK(pVirtio));
1310#endif
1311 /*
1312 * Save the current status for the next write so we can see what changed.
1313 */
1314 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1315 }
1316 else /* Guest READ pCommonCfg->fDeviceStatus */
1317 {
1318 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1319
1320 if (LogIs7Enabled())
1321 {
1322 char szOut[80] = { 0 };
1323 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1324 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1325 }
1326 }
1327 }
1328 else
1329 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1330 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1331 else
1332 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1333 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1334 else
1335 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1336 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1337 else
1338 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1339 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1340 else
1341 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1342 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1343 else
1344 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1345 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1346 else
1347 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1348 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1349 else
1350 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1351 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1352 else
1353 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1354 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1355 else
1356 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1357 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1358 else
1359 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1360 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1361 else
1362 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1363 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1364 else
1365 {
1366 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1367 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1368 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1369 }
1370
1371#ifndef IN_RING3
1372 RT_NOREF(pDevIns, pVirtioCC);
1373#endif
1374 return rc;
1375}
1376
1377/**
1378 * @callback_method_impl{FNIOMIOPORTNEWIN)
1379 *
1380 * This I/O handler exists only to handle access from legacy drivers.
1381 */
1382
1383static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
1384{
1385
1386 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1387 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1388
1389 RT_NOREF(pvUser);
1390// LogFunc((" Read from port offset=%RTiop cb=%#x\n", offPort, cb));
1391
1392 void *pv = pu32; /* To use existing macros */
1393 int fWrite = 0; /* To use existing macros */
1394
1395 uint16_t uVirtq = pVirtio->uVirtqSelect;
1396
1397 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1398 {
1399 uint32_t val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1400 memcpy(pu32, &val, cb);
1401 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1402 }
1403 else
1404 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1405 {
1406 uint32_t val = pVirtio->uDriverFeatures & 0xffffffff;
1407 memcpy(pu32, &val, cb);
1408 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1409 }
1410 else
1411 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1412 {
1413 *(uint8_t *)pu32 = pVirtio->fDeviceStatus;
1414
1415 if (LogIs7Enabled())
1416 {
1417 char szOut[80] = { 0 };
1418 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1419 Log(("%-23s: Guest read fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1420 }
1421 }
1422 else
1423 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1424 {
1425 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
1426 *(uint8_t *)pu32 = pVirtio->uISR;
1427 pVirtio->uISR = 0;
1428 virtioLowerInterrupt( pDevIns, 0);
1429 Log((" ISR read and cleared\n"));
1430 }
1431 else
1432 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1433 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1434 else
1435 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1436 {
1437 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[uVirtq];
1438 *pu32 = pVirtQueue->GCPhysVirtqDesc >> PAGE_SHIFT;
1439 Log(("%-23s: Guest read uVirtqPfn .................... %#x\n", __FUNCTION__, *pu32));
1440 }
1441 else
1442 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1443 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1444 else
1445 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1446 VIRTIO_DEV_CONFIG_ACCESS( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1447#ifdef LEGACY_MSIX_SUPPORTED
1448 else
1449 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1450 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1451 else
1452 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1453 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1454#endif
1455 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
1456 {
1457 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1458#if IN_RING3
1459 /* Access device-specific configuration */
1460 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1461 int rc = pVirtioCC->pfnDevCapRead(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
1462 return rc;
1463#else
1464 return VINF_IOM_R3_IOPORT_READ;
1465#endif
1466 }
1467 else
1468 {
1469 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1470 Log2Func(("Bad guest read access to virtio_legacy_pci_common_cfg: offset=%#x, cb=%x\n",
1471 offPort, cb));
1472 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1473 "virtioLegacyIOPortIn: no valid port at offset offset=%RTiop cb=%#x\n", offPort, cb);
1474 return rc;
1475 }
1476
1477 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1478 return VINF_SUCCESS;
1479}
1480
1481
1482/**
1483 * @callback_method_impl{ * @callback_method_impl{FNIOMIOPORTNEWOUT}
1484 *
1485 * This I/O Port interface exists only to handle access from legacy drivers.
1486 */
1487static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
1488{
1489 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1490 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
1491 RT_NOREF(pvUser);
1492
1493 uint16_t uVirtq = pVirtio->uVirtqSelect;
1494 uint32_t u32OnStack = u32; /* allows us to use this impl's MMIO parsing macros */
1495 void *pv = &u32OnStack; /* To use existing macros */
1496 int fWrite = 1; /* To use existing macros */
1497
1498// LogFunc(("Write to port offset=%RTiop, cb=%#x, u32=%#x\n", offPort, cb, u32));
1499
1500 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1501 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1502 else
1503#ifdef LEGACY_MSIX_SUPPORTED
1504 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1505 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1506 else
1507 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1508 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1509 else
1510#endif
1511 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1512 {
1513 /* Check to see if guest acknowledged unsupported features */
1514 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1515 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1516 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1517 return VINF_SUCCESS;
1518 }
1519 else
1520 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1521 {
1522 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1523 if ((pVirtio->uDriverFeatures & ~VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED) == 0)
1524 {
1525 Log(("Guest asked for features host does not support! (host=%x guest=%x)\n",
1526 VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED, pVirtio->uDriverFeatures));
1527 pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED;
1528 }
1529 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1530 }
1531 else
1532 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1533 {
1534 VIRTIO_DEV_CONFIG_LOG_ACCESS(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1535 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (queue size) (ignoring)\n"));
1536 return VINF_SUCCESS;
1537 }
1538 else
1539 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1540 {
1541 bool const fDriverInitiatedReset = (pVirtio->fDeviceStatus = (uint8_t)u32) == 0;
1542 bool const fDriverStateImproved = IS_DRIVER_OK(pVirtio) && !WAS_DRIVER_OK(pVirtio);
1543
1544 if (LogIs7Enabled())
1545 {
1546 char szOut[80] = { 0 };
1547 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1548 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1549 }
1550
1551 if (fDriverStateImproved || fDriverInitiatedReset)
1552 {
1553#ifdef IN_RING0
1554 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1555 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1556 return VINF_IOM_R3_IOPORT_WRITE;
1557#endif
1558 }
1559
1560#ifdef IN_RING3
1561 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1562 if (fDriverInitiatedReset)
1563 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1564
1565 else if (fDriverStateImproved)
1566 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 1 /* fDriverOk */);
1567
1568#endif
1569 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1570 }
1571 else
1572 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1573 {
1574 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1575 uint64_t uVirtqPfn = (uint64_t)u32;
1576
1577 if (uVirtqPfn)
1578 {
1579 /* Transitional devices calculate ring physical addresses using rigid spec-defined formulae,
1580 * instead of guest conveying respective address of each ring, as "modern" VirtIO drivers do,
1581 * thus there is no virtq PFN or single base queue address stored in instance data for
1582 * this transitional device, but rather it is derived, when read back, from GCPhysVirtqDesc */
1583
1584 pVirtq->GCPhysVirtqDesc = uVirtqPfn * VIRTIO_PAGE_SIZE;
1585 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
1586 pVirtq->GCPhysVirtqUsed =
1587 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
1588 }
1589 else
1590 {
1591 /* Don't set ring addresses for queue (to meaningless values), when guest resets the virtq's PFN */
1592 pVirtq->GCPhysVirtqDesc = 0;
1593 pVirtq->GCPhysVirtqAvail = 0;
1594 pVirtq->GCPhysVirtqUsed = 0;
1595 }
1596 Log(("%-23s: Guest wrote uVirtqPfn .................... %#x:\n"
1597 "%68s... %p -> GCPhysVirtqDesc\n%68s... %p -> GCPhysVirtqAvail\n%68s... %p -> GCPhysVirtqUsed\n",
1598 __FUNCTION__, u32, " ", pVirtq->GCPhysVirtqDesc, " ", pVirtq->GCPhysVirtqAvail, " ", pVirtq->GCPhysVirtqUsed));
1599 }
1600 else
1601 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1602 {
1603#ifdef IN_RING3
1604 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
1605 pVirtio->uQueueNotify = u32 & 0xFFFF;
1606 if (uVirtq < VIRTQ_MAX_COUNT)
1607 {
1608 RT_UNTRUSTED_VALIDATED_FENCE();
1609
1610 /* Need to check that queue is configured. Legacy spec didn't have a queue enabled flag */
1611 if (pVirtio->aVirtqueues[pVirtio->uQueueNotify].GCPhysVirtqDesc)
1612 virtioCoreVirtqNotified(pDevIns, pVirtio, pVirtio->uQueueNotify, pVirtio->uQueueNotify /* uNotifyIdx */);
1613 else
1614 Log(("The queue (#%d) being notified has not been initialized.\n", pVirtio->uQueueNotify));
1615 }
1616 else
1617 Log(("Invalid queue number (%d)\n", pVirtio->uQueueNotify));
1618#else
1619 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1620 return VINF_IOM_R3_IOPORT_WRITE;
1621#endif
1622 }
1623 else
1624 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1625 {
1626 VIRTIO_DEV_CONFIG_LOG_ACCESS( fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1627 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (ISR status) (ignoring)\n"));
1628 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1629 return VINF_SUCCESS;
1630 }
1631 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
1632 {
1633#if IN_RING3
1634
1635 /* Access device-specific configuration */
1636 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1637 return pVirtioCC->pfnDevCapWrite(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
1638#else
1639 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1640 return VINF_IOM_R3_IOPORT_WRITE;
1641#endif
1642 }
1643 else
1644 {
1645 Log2Func(("Bad guest write access to virtio_legacy_pci_common_cfg: offset=%#x, cb=0x%x\n",
1646 offPort, cb));
1647 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1648 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1649 "virtioLegacyIOPortOut: no valid port at offset offset=%RTiop cb=0x%#x\n", offPort, cb);
1650 return rc;
1651 }
1652
1653 RT_NOREF(uVirtq);
1654 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1655 return VINF_SUCCESS;
1656}
1657
1658
1659/**
1660 * @callback_method_impl{FNIOMMMIONEWREAD,
1661 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1662 *
1663 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1664 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1665 * of 1, 2 or 4 bytes, only.
1666 *
1667 */
1668static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1669{
1670 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1671 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1672 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1673 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1674 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1675
1676
1677 uint32_t uOffset;
1678 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1679 {
1680#ifdef IN_RING3
1681 /*
1682 * Callback to client to manage device-specific configuration.
1683 */
1684 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1685
1686 /*
1687 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1688 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1689 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1690 */
1691 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1692 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1693 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1694
1695 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1696
1697 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1698 {
1699 ++pVirtio->uConfigGeneration;
1700 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1701 pVirtio->uConfigGeneration,
1702 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1703 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1704 pVirtio->fGenUpdatePending = false;
1705 }
1706
1707 virtioLowerInterrupt(pDevIns, 0);
1708 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1709 return rcStrict;
1710#else
1711 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1712 return VINF_IOM_R3_MMIO_READ;
1713#endif
1714 }
1715
1716 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1717 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1718
1719 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap))
1720 {
1721 *(uint8_t *)pv = pVirtio->uISR;
1722 Log6Func(("Read and clear ISR\n"));
1723 pVirtio->uISR = 0; /* VirtIO spec requires reads of ISR to clear it */
1724 virtioLowerInterrupt(pDevIns, 0);
1725 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1726 return VINF_SUCCESS;
1727 }
1728
1729 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1730 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1731 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1732 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
1733 return rc;
1734}
1735
1736/**
1737 * @callback_method_impl{FNIOMMMIONEWREAD,
1738 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1739 *
1740 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1741 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1742 * of 1, 2 or 4 bytes, only.
1743 */
1744static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1745{
1746 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1747 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1748 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1749 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1750 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
1751
1752 uint32_t uOffset;
1753 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1754 {
1755#ifdef IN_RING3
1756 /*
1757 * Foreward this MMIO write access for client to deal with.
1758 */
1759 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1760 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1761#else
1762 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1763 return VINF_IOM_R3_MMIO_WRITE;
1764#endif
1765 }
1766
1767 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1768 {
1769 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1770 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1771 }
1772
1773 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1774 {
1775 pVirtio->uISR = *(uint8_t *)pv;
1776 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1777 pVirtio->uISR & 0xff,
1778 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1779 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1780 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1781 return VINF_SUCCESS;
1782 }
1783
1784 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1785 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1786 {
1787 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1788 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1789 return VINF_SUCCESS;
1790 }
1791
1792 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1793 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1794 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1795 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
1796 return rc;
1797}
1798
1799#ifdef IN_RING3
1800
1801/**
1802 * @callback_method_impl{FNPCICONFIGREAD}
1803 */
1804static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1805 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1806{
1807 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1808 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1809 RT_NOREF(pPciDev);
1810
1811 if (uAddress == pVirtio->uPciCfgDataOff)
1812 {
1813 /*
1814 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1815 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1816 * (the virtio_pci_cfg_cap capability), and access data items.
1817 * This is used by BIOS to gain early boot access to the the storage device.
1818 */
1819 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1820 uint32_t uLength = pPciCap->uLength;
1821
1822 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u uLength=%d, bar=%d\n",
1823 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, uLength, pPciCap->uBar));
1824
1825 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1826 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1827 {
1828 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. "
1829 "Ignoring\n"));
1830 *pu32Value = UINT32_MAX;
1831 return VINF_SUCCESS;
1832 }
1833
1834 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
1835 Log7Func((" Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=0x%x -> %Rrc\n",
1836 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1837 return rcStrict;
1838 }
1839 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
1840 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
1841 return VINF_PDM_PCI_DO_DEFAULT;
1842}
1843
1844/**
1845 * @callback_method_impl{FNPCICONFIGWRITE}
1846 */
1847static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1848 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1849{
1850 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1851 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1852 RT_NOREF(pPciDev);
1853
1854 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
1855 if (uAddress == pVirtio->uPciCfgDataOff)
1856 {
1857 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1858 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1859 * (the virtio_pci_cfg_cap capability), and access data items.
1860 * This is used by BIOS to gain early boot access to the the storage device.*/
1861
1862 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1863 uint32_t uLength = pPciCap->uLength;
1864
1865 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1866 || cb != uLength
1867 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1868 {
1869 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1870 return VINF_SUCCESS;
1871 }
1872
1873 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
1874 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1875 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1876 return rcStrict;
1877 }
1878 return VINF_PDM_PCI_DO_DEFAULT;
1879}
1880
1881
1882/*********************************************************************************************************************************
1883* Saved state. *
1884*********************************************************************************************************************************/
1885
1886/**
1887 * Called from the FNSSMDEVSAVEEXEC function of the device.
1888 *
1889 * @param pVirtio Pointer to the shared virtio state.
1890 * @param pHlp The ring-3 device helpers.
1891 * @param pSSM The saved state handle.
1892 * @returns VBox status code.
1893 */
1894int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1895{
1896 LogFunc(("\n"));
1897 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1898 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1899
1900 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1901 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
1902 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
1903 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
1904 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
1905 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
1906 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
1907 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
1908 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
1909
1910 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1911 {
1912 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1913
1914 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
1915 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
1916 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
1917 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
1918 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsixVector);
1919 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
1920 pHlp->pfnSSMPutU16( pSSM, pVirtq->uQueueSize);
1921 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
1922 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
1923 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
1924 AssertRCReturn(rc, rc);
1925 }
1926
1927 return VINF_SUCCESS;
1928}
1929
1930/**
1931 * Called from the FNSSMDEVLOADEXEC function of the device.
1932 *
1933 * @param pVirtio Pointer to the shared virtio state.
1934 * @param pHlp The ring-3 device helpers.
1935 * @param pSSM The saved state handle.
1936 * @returns VBox status code.
1937 */
1938int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1939{
1940 LogFunc(("\n"));
1941 /*
1942 * Check the marker and (embedded) version number.
1943 */
1944 uint64_t uMarker = 0;
1945 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1946 AssertRCReturn(rc, rc);
1947 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1948 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1949 N_("Expected marker value %#RX64 found %#RX64 instead"),
1950 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1951 uint32_t uVersion = 0;
1952 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1953 AssertRCReturn(rc, rc);
1954 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1955 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1956 N_("Unsupported virtio version: %u"), uVersion);
1957 /*
1958 * Load the state.
1959 */
1960 pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
1961 pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
1962 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
1963 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
1964 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
1965 pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
1966 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
1967 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
1968 pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
1969
1970 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1971 {
1972 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1973
1974 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
1975 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
1976 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
1977 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
1978 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector);
1979 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
1980 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize);
1981 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
1982 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
1983 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
1984 AssertRCReturn(rc, rc);
1985 }
1986
1987 return VINF_SUCCESS;
1988}
1989
1990
1991/*********************************************************************************************************************************
1992* Device Level *
1993*********************************************************************************************************************************/
1994
1995/**
1996 * This must be called by the client to handle VM state changes
1997 * after the client takes care of its device-specific tasks for the state change.
1998 * (i.e. Reset, suspend, power-off, resume)
1999 *
2000 * @param pDevIns The device instance.
2001 * @param pVirtio Pointer to the shared virtio state.
2002 */
2003void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
2004{
2005 LogFunc(("State changing to %s\n",
2006 virtioCoreGetStateChangeText(enmState)));
2007
2008 switch(enmState)
2009 {
2010 case kvirtIoVmStateChangedReset:
2011 virtioCoreResetAll(pVirtio);
2012 break;
2013 case kvirtIoVmStateChangedSuspend:
2014 break;
2015 case kvirtIoVmStateChangedPowerOff:
2016 break;
2017 case kvirtIoVmStateChangedResume:
2018 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
2019 {
2020 if (!pVirtio->fLegacyDriver || pVirtio->aVirtqueues[uVirtq].uEnable)
2021 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
2022 }
2023 break;
2024 default:
2025 LogRelFunc(("Bad enum value"));
2026 return;
2027 }
2028}
2029
2030/**
2031 * This should be called from PDMDEVREGR3::pfnDestruct.
2032 *
2033 * @param pDevIns The device instance.
2034 * @param pVirtio Pointer to the shared virtio state.
2035 * @param pVirtioCC Pointer to the ring-3 virtio state.
2036 */
2037void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
2038{
2039 if (pVirtioCC->pbPrevDevSpecificCfg)
2040 {
2041 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
2042 pVirtioCC->pbPrevDevSpecificCfg = NULL;
2043 }
2044
2045 RT_NOREF(pDevIns, pVirtio);
2046}
2047
2048/** API Function: See header file */
2049int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
2050 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
2051{
2052
2053
2054 /*
2055 * The pVirtio state must be the first member of the shared device instance
2056 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
2057 */
2058 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2059 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
2060
2061 pVirtio->pDevInsR3 = pDevIns;
2062
2063 /*
2064 * Caller must initialize these.
2065 */
2066 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
2067 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
2068 AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768, VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */
2069
2070#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed
2071 * The legacy MSI support has not been implemented yet
2072 */
2073# ifdef VBOX_WITH_MSI_DEVICES
2074 pVirtio->fMsiSupport = true;
2075# endif
2076#endif
2077
2078
2079 /*
2080 * The host features offered include both device-specific features
2081 * and reserved feature bits (device independent)
2082 */
2083 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
2084 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
2085 | fDevSpecificFeatures;
2086
2087 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
2088 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
2089 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
2090 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
2091 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
2092
2093 /* Set PCI config registers (assume 32-bit mode) */
2094 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2095 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2096
2097 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
2098 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
2099 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
2100 PDMPciDevSetSubSystemId(pPciDev, DEVICE_PCI_NETWORK_SUBSYSTEM);
2101 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
2102 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
2103 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
2104 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
2105 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
2106 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
2107
2108 /* Register PCI device */
2109 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
2110 if (RT_FAILURE(rc))
2111 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
2112
2113 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
2114 AssertRCReturn(rc, rc);
2115
2116 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
2117
2118#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
2119#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
2120 do { \
2121 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
2122 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
2123 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
2124 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
2125 } while (0)
2126
2127 PVIRTIO_PCI_CAP_T pCfg;
2128 uint32_t cbRegion = 0;
2129
2130 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
2131 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
2132 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
2133 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2134 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2135 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2136 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2137 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
2138 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
2139 cbRegion += pCfg->uLength;
2140 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
2141 pVirtioCC->pCommonCfgCap = pCfg;
2142
2143 /*
2144 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
2145 * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal
2146 * value of the queue (different strategies are possible according to spec).
2147 */
2148 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2149 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
2150 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2151 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
2152 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2153 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2154 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
2155 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2156 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
2157 cbRegion += pCfg->uLength;
2158 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
2159 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
2160 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
2161
2162 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
2163 *
2164 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
2165 * of spec shows it as a 32-bit field with upper bits 'reserved'
2166 * Will take spec's words more literally than the diagram for now.
2167 */
2168 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2169 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
2170 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2171 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2172 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2173 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2174 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
2175 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2176 pCfg->uLength = sizeof(uint8_t);
2177 cbRegion += pCfg->uLength;
2178 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
2179 pVirtioCC->pIsrCap = pCfg;
2180
2181 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
2182 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
2183 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
2184 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
2185 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
2186 * initializing.
2187 */
2188 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
2189 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2190 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
2191 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2192 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
2193 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2194 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2195 pCfg->uOffset = 0;
2196 pCfg->uLength = 4;
2197 cbRegion += pCfg->uLength;
2198 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
2199 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
2200
2201 if (pVirtioCC->pbDevSpecificCfg)
2202 {
2203 /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6).
2204 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
2205 * to inform this. */
2206 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2207 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
2208 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2209 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2210 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2211 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2212 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
2213 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2214 pCfg->uLength = cbDevSpecificCfg;
2215 cbRegion += pCfg->uLength;
2216 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
2217 pVirtioCC->pDeviceCap = pCfg;
2218 }
2219 else
2220 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
2221
2222 if (pVirtio->fMsiSupport)
2223 {
2224 PDMMSIREG aMsiReg;
2225 RT_ZERO(aMsiReg);
2226 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
2227 aMsiReg.iMsixNextOffset = 0;
2228 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
2229 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
2230 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
2231 if (RT_FAILURE(rc))
2232 {
2233 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
2234 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
2235 pVirtio->fMsiSupport = false;
2236 }
2237 else
2238 Log2Func(("Using MSI-X for guest driver notification\n"));
2239 }
2240 else
2241 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
2242
2243 /* Set offset to first capability and enable PCI dev capabilities */
2244 PDMPciDevSetCapabilityList(pPciDev, 0x40);
2245 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
2246
2247 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s (modern)", pcszInstance);
2248 if (cbSize <= 0)
2249 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2250
2251 cbSize = RTStrPrintf(pVirtioCC->pcszPortIoName, sizeof(pVirtioCC->pcszPortIoName), "%s (legacy)", pcszInstance);
2252 if (cbSize <= 0)
2253 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2254
2255 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents
2256 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent)
2257 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO
2258 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks.
2259 * (See VirtIO 1.1, Section 4.1.4.8).
2260 */
2261 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg,
2262 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName,
2263 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts);
2264 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */")));
2265
2266 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
2267 * 'unknown' device-specific capability without querying the capability to figure
2268 * out size, so pad with an extra page
2269 */
2270 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE),
2271 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
2272 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
2273 pVirtioCC->pcszMmioName,
2274 &pVirtio->hMmioPciCap);
2275 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
2276 /*
2277 * Statistics.
2278 */
2279# ifdef VBOX_WITH_STATISTICS
2280 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2281 "Total number of allocated descriptor chains", "DescChainsAllocated");
2282 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2283 "Total number of freed descriptor chains", "DescChainsFreed");
2284 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2285 "Total number of inbound segments", "DescChainsSegsIn");
2286 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2287 "Total number of outbound segments", "DescChainsSegsOut");
2288 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
2289 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0");
2290 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC");
2291 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
2292 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0");
2293 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC");
2294# endif /* VBOX_WITH_STATISTICS */
2295
2296 virtioResetDevice(pDevIns, pVirtio); /* Reset VirtIO specific state of device */
2297
2298 return VINF_SUCCESS;
2299}
2300
2301#else /* !IN_RING3 */
2302
2303/**
2304 * Sets up the core ring-0/raw-mode virtio bits.
2305 *
2306 * @returns VBox status code.
2307 * @param pDevIns The device instance.
2308 * @param pVirtio Pointer to the shared virtio state. This must be the first
2309 * member in the shared device instance data!
2310 */
2311int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
2312{
2313 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2314
2315#ifdef FUTURE_OPTIMIZATION
2316 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
2317 AssertRCReturn(rc, rc);
2318#endif
2319 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
2320 AssertRCReturn(rc, rc);
2321
2322 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pVirtio->hLegacyIoPorts, virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/);
2323 AssertRCReturn(rc, rc);
2324
2325 return rc;
2326}
2327
2328#endif /* !IN_RING3 */
2329
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette