VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 92124

Last change on this file since 92124 was 92091, checked in by vboxsync, 3 years ago

Optimize how legacy/modern driver is determined to avoid polling, and eliminate code duplication for feature logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 99.8 KB
Line 
1/* $Id: VirtioCore.cpp 92091 2021-10-27 05:55:32Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2021 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
45#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
46
47
48#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
49 (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0)
50
51
52#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
53#define WAS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
54
55/**
56 * This macro returns true if the @a a_offAccess and access length (@a
57 * a_cbAccess) are within the range of the mapped capability struct described by
58 * @a a_LocCapData.
59 *
60 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
61 * @param[in] a_cbAccess Input: The access size.
62 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
63 * @param[in] a_LocCapData Input: The capability location info.
64 */
65#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
66 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
67 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
68
69
70/** Marks the start of the virtio saved state (just for sanity). */
71#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
72/** The current saved state version for the virtio core. */
73#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79
80
81/** @name virtq related flags
82 * @{ */
83#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
84#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
85#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
86
87#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
88#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
89/** @} */
90
91/**
92 * virtq related structs
93 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
94 */
95typedef struct virtq_desc
96{
97 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
98 uint32_t cb; /**< len Buffer length */
99 uint16_t fFlags; /**< flags Buffer specific flags */
100 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
101} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
102
103typedef struct virtq_avail
104{
105 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
106 uint16_t uIdx; /**< idx Index of next free ring slot */
107 RT_FLEXIBLE_ARRAY_EXTENSION
108 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
109 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
110} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
111
112typedef struct virtq_used_elem
113{
114 uint32_t uDescIdx; /**< idx Start of used desc chain */
115 uint32_t cbElem; /**< len Total len of used desc chain */
116} VIRTQ_USED_ELEM_T;
117
118typedef struct virt_used
119{
120 uint16_t fFlags; /**< flags used ring host-to-guest flags */
121 uint16_t uIdx; /**< idx Index of next ring slot */
122 RT_FLEXIBLE_ARRAY_EXTENSION
123 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
124 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
125} VIRTQ_USED_T, *PVIRTQ_USED_T;
126
127
128const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
129{
130 switch (enmState)
131 {
132 case kvirtIoVmStateChangedReset: return "VM RESET";
133 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
134 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
135 case kvirtIoVmStateChangedResume: return "VM RESUME";
136 default: return "<BAD ENUM>";
137 }
138}
139
140/* Internal Functions */
141
142static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
143static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
144
145/** @name Internal queue operations
146 * @{ */
147
148/**
149 * Accessor for virtq descriptor
150 */
151#ifdef IN_RING3
152DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
153 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
154{
155 AssertMsg(IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
156 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
157
158 virtioCoreGCPhysRead(pVirtio, pDevIns,
159 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
160 pDesc, sizeof(VIRTQ_DESC_T));
161}
162#endif
163
164/**
165 * Accessors for virtq avail ring
166 */
167#ifdef IN_RING3
168DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
169{
170 uint16_t uDescIdx;
171
172 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
173 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
174 virtioCoreGCPhysRead(pVirtio, pDevIns,
175 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
176 &uDescIdx, sizeof(uDescIdx));
177 return uDescIdx;
178}
179
180DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
181{
182 uint16_t uUsedEventIdx;
183 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
184 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
185 virtioCoreGCPhysRead(pVirtio, pDevIns,
186 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]),
187 &uUsedEventIdx, sizeof(uUsedEventIdx));
188 return uUsedEventIdx;
189}
190#endif
191
192DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
193{
194 uint16_t uIdx = 0;
195 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
196 virtioCoreGCPhysRead(pVirtio, pDevIns,
197 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
198 &uIdx, sizeof(uIdx));
199 return uIdx;
200}
201
202DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
203{
204 uint16_t fFlags = 0;
205 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
206 virtioCoreGCPhysRead(pVirtio, pDevIns,
207 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
208 &fFlags, sizeof(fFlags));
209
210 return fFlags;
211}
212
213/** @} */
214
215/** @name Accessors for virtq used ring
216 * @{
217 */
218
219#ifdef IN_RING3
220DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
221 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
222{
223 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
224 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
225 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
226 virtioCoreGCPhysWrite(pVirtio, pDevIns,
227 pVirtq->GCPhysVirtqUsed
228 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
229 &elem, sizeof(elem));
230}
231
232DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
233{
234 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
235 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
236 virtioCoreGCPhysWrite(pVirtio, pDevIns,
237 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
238 &fFlags, sizeof(fFlags));
239}
240#endif
241
242DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
243{
244 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
245 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
246 virtioCoreGCPhysWrite(pVirtio, pDevIns,
247 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
248 &uIdx, sizeof(uIdx));
249}
250
251
252#ifdef IN_RING3
253DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
254{
255 uint16_t uIdx = 0;
256 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
257 virtioCoreGCPhysRead(pVirtio, pDevIns,
258 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
259 &uIdx, sizeof(uIdx));
260 return uIdx;
261}
262
263DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
264{
265 uint16_t fFlags = 0;
266 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
267 virtioCoreGCPhysRead(pVirtio, pDevIns,
268 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
269 &fFlags, sizeof(fFlags));
270 return fFlags;
271}
272
273DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
274{
275 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
276 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
277 virtioCoreGCPhysWrite(pVirtio, pDevIns,
278 pVirtq->GCPhysVirtqUsed
279 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uQueueSize]),
280 &uAvailEventIdx, sizeof(uAvailEventIdx));
281}
282#endif
283
284DECLINLINE(uint16_t) virtioCoreVirtqAvailCnt(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
285{
286 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
287 uint16_t uIdxShadow = pVirtq->uAvailIdxShadow;
288 uint16_t uIdxDelta;
289
290 if (uIdxActual < uIdxShadow)
291 uIdxDelta = (uIdxActual + VIRTQ_SIZE) - uIdxShadow;
292 else
293 uIdxDelta = uIdxActual - uIdxShadow;
294
295 LogFunc(("%s, %u %s\n",
296 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries"));
297
298 return uIdxDelta;
299}
300/**
301 * Get count of new (e.g. pending) elements in available ring.
302 *
303 * @param pDevIns The device instance.
304 * @param pVirtio Pointer to the shared virtio state.
305 * @param uVirtq Virtq number
306 *
307 * @returns how many entries have been added to ring as a delta of the consumer's
308 * avail index and the queue's guest-side current avail index.
309 */
310uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
311{
312 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
313 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
314
315 if (!IS_DRIVER_OK(pVirtio))
316 {
317 LogRelFunc(("Driver not ready\n"));
318 return 0;
319 }
320 if (!pVirtio->fLegacyDriver && !pVirtq->uEnable)
321 {
322 LogRelFunc(("virtq: %d (%s) not enabled\n", uVirtq, VIRTQNAME(pVirtio, uVirtq)));
323 return 0;
324 }
325
326 return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq);
327}
328
329#ifdef IN_RING3
330
331void virtioCoreR3FeatureDump(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp, const VIRTIO_FEATURES_LIST *s_aFeatures, int cFeatures, int fBanner)
332{
333#define MAXLINE 80
334 /* Display as a single buf to prevent interceding log messages */
335 uint16_t cbBuf = cFeatures * 132;
336 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
337 Assert(pszBuf);
338 char *cp = pszBuf;
339 for (int i = 0; i < cFeatures; ++i)
340 {
341 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
342 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
343 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
344 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
345 }
346 if (pHlp) {
347 if (fBanner)
348 pHlp->pfnPrintf(pHlp, "VirtIO Features Configuration\n\n"
349 " Offered Accepted Feature Description\n"
350 " ------- -------- ------- -----------\n");
351 pHlp->pfnPrintf(pHlp, "%s\n", pszBuf);
352 }
353#ifdef LOG_ENABLED
354 else
355 {
356 if (fBanner)
357 Log(("VirtIO Features Configuration\n\n"
358 " Offered Accepted Feature Description\n"
359 " ------- -------- ------- -----------\n"));
360 Log(("%s\n", pszBuf));
361 }
362#endif
363 RTMemFree(pszBuf);
364}
365
366/** API Function: See header file*/
367void virtioCorePrintDeviceFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp,
368 const VIRTIO_FEATURES_LIST *s_aDevSpecificFeatures, int cFeatures) {
369 virtioCoreR3FeatureDump(pVirtio, pHlp, s_aCoreFeatures, RT_ELEMENTS(s_aCoreFeatures), 1 /*fBanner */);
370 virtioCoreR3FeatureDump(pVirtio, pHlp, s_aDevSpecificFeatures, cFeatures, 0 /*fBanner */);
371}
372
373#endif
374
375#ifdef LOG_ENABLED
376
377/** API Function: See header file */
378void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
379{
380#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
381 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
382 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
383 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
384 if (pszTitle)
385 {
386 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
387 ADJCURSOR(cbPrint);
388 }
389 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
390 {
391 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
392 ADJCURSOR(cbPrint);
393 for (uint8_t col = 0; col < 16; col++)
394 {
395 uint32_t idx = row * 16 + col;
396 if (idx >= cb)
397 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
398 else
399 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
400 ADJCURSOR(cbPrint);
401 }
402 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
403 {
404 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
405 ADJCURSOR(cbPrint);
406 }
407 *pszOut++ = '\n';
408 --cbRemain;
409 }
410 Log(("%s\n", pszBuf));
411 RTMemFree(pszBuf);
412 RT_NOREF2(uBase, pv);
413#undef ADJCURSOR
414}
415
416/* API FUnction: See header file */
417void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
418{
419 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
420#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
421 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
422 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
423 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
424 if (pszTitle)
425 {
426 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
427 ADJCURSOR(cbPrint);
428 }
429 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
430 {
431 uint8_t c;
432 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
433 ADJCURSOR(cbPrint);
434 for (uint8_t col = 0; col < 16; col++)
435 {
436 uint32_t idx = row * 16 + col;
437 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
438 if (idx >= cb)
439 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
440 else
441 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
442 ADJCURSOR(cbPrint);
443 }
444 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
445 {
446 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
447 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
448 ADJCURSOR(cbPrint);
449 }
450 *pszOut++ = '\n';
451 --cbRemain;
452 }
453 Log(("%s\n", pszBuf));
454 RTMemFree(pszBuf);
455 RT_NOREF(uBase);
456#undef ADJCURSOR
457}
458#endif /* LOG_ENABLED */
459
460/** API function: See header file */
461int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio)
462{
463 Log12Func(("%s", pVirtio->fLegacyDriver ? "Legacy Guest Driver handling mode\n" : ""));
464 return pVirtio->fLegacyDriver;
465}
466
467/** API function: See header file */
468void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
469 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
470 int fHasIndex, uint32_t idx)
471{
472 if (!LogIs6Enabled())
473 return;
474
475 char szIdx[16];
476 if (fHasIndex)
477 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
478 else
479 szIdx[0] = '\0';
480
481 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
482 {
483 char szDepiction[64];
484 size_t cchDepiction;
485 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
486 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
487 pszMember, szIdx, uOffset, uOffset + cb - 1);
488 else
489 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
490
491 /* padding */
492 if (cchDepiction < 30)
493 szDepiction[cchDepiction++] = ' ';
494 while (cchDepiction < 30)
495 szDepiction[cchDepiction++] = '.';
496 szDepiction[cchDepiction] = '\0';
497
498 RTUINT64U uValue;
499 uValue.u = 0;
500 memcpy(uValue.au8, pv, cb);
501 Log6(("%-23s: Guest %s %s %#0*RX64\n",
502 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
503 }
504 else /* odd number or oversized access, ... log inline hex-dump style */
505 {
506 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
507 pszFunc, fWrite ? "wrote" : "read ", pszMember,
508 szIdx, uOffset, uOffset + cb, cb, pv));
509 }
510 RT_NOREF2(fWrite, pszFunc);
511}
512
513/**
514 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
515 * keep the output clean during multi-threaded activity)
516 */
517DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
518{
519
520#define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
521
522 memset(pszBuf, 0, uSize);
523 size_t len;
524 char *cp = pszBuf;
525 char *sep = (char *)"";
526
527 if (bStatus == 0) {
528 RTStrPrintf(cp, uSize, "RESET");
529 return;
530 }
531 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
532 {
533 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
534 ADJCURSOR(len);
535 }
536 if (bStatus & VIRTIO_STATUS_DRIVER)
537 {
538 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
539 ADJCURSOR(len);
540 }
541 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
542 {
543 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
544 ADJCURSOR(len);
545 }
546 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
547 {
548 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
549 ADJCURSOR(len);
550 }
551 if (bStatus & VIRTIO_STATUS_FAILED)
552 {
553 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
554 ADJCURSOR(len);
555 }
556 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
557 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
558
559#undef ADJCURSOR
560}
561
562#ifdef IN_RING3
563
564int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
565{
566 LogFunc(("Attaching %s to VirtIO core\n", pcszName));
567 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
568 pVirtq->uVirtq = uVirtq;
569 pVirtq->uAvailIdxShadow = 0;
570 pVirtq->uUsedIdxShadow = 0;
571 pVirtq->fUsedRingEvent = false;
572 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
573 return VINF_SUCCESS;
574}
575
576/** API Fuunction: See header file */
577void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtq)
578{
579 RT_NOREF(pszArgs);
580 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
581 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
582
583 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
584// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
585
586 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
587 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
588
589 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
590 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
591
592 PVIRTQBUF pVirtqBuf = NULL;
593
594 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
595
596 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
597
598 int cSendSegs = 0, cReturnSegs = 0;
599 if (!fEmpty)
600 {
601 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, &pVirtqBuf);
602 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
603 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
604 }
605
606 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
607 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
608
609 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
610 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uQueueSize);
611 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
612 if (pVirtio->fMsiSupport)
613 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsixVector);
614 pHlp->pfnPrintf(pHlp, "\n");
615 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
616 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
617 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
618 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
619 pHlp->pfnPrintf(pHlp, "\n");
620 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
621 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
622 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
623 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
624 pHlp->pfnPrintf(pHlp, "\n");
625 if (!fEmpty)
626 {
627 pHlp->pfnPrintf(pHlp, " desc chain:\n");
628 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
629 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
630 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
631 pHlp->pfnPrintf(pHlp, "\n");
632 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
633 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
634 if (cSendSegs)
635 {
636 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
637 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
638 }
639 pHlp->pfnPrintf(pHlp, "\n");
640 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
641 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
642 if (cReturnSegs)
643 {
644 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
645 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
646 }
647 } else
648 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
649 pHlp->pfnPrintf(pHlp, "\n");
650
651}
652
653/** API Function: See header file */
654uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
655{
656 AssertReturn(pVirtqBuf, UINT32_MAX);
657 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
658 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
659 Assert(cRefs > 1);
660 Assert(cRefs < 16);
661 return cRefs;
662}
663
664
665/** API Function: See header file */
666uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
667{
668 if (!pVirtqBuf)
669 return 0;
670 AssertReturn(pVirtqBuf, 0);
671 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
672 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
673 Assert(cRefs < 16);
674 if (cRefs == 0)
675 {
676 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
677 RTMemFree(pVirtqBuf);
678#ifdef VBOX_WITH_STATISTICS
679 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
680#endif
681 }
682 RT_NOREF(pVirtio);
683 return cRefs;
684}
685
686/** API Function: See header file */
687void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
688{
689 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
690}
691
692/** API Function: See header file */
693void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
694{
695
696 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
697 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
698
699 if (IS_DRIVER_OK(pVirtio))
700 {
701 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
702
703 if (fEnable)
704 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
705 else
706 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
707
708 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
709 }
710}
711
712/** API function: See Header file */
713void virtioCoreResetAll(PVIRTIOCORE pVirtio)
714{
715 LogFunc(("\n"));
716 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
717 if (IS_DRIVER_OK(pVirtio))
718 {
719 if (!pVirtio->fLegacyDriver)
720 pVirtio->fGenUpdatePending = true;
721 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
722 }
723}
724
725/** API function: See Header file */
726int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
727 PPVIRTQBUF ppVirtqBuf)
728{
729 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, ppVirtqBuf, false);
730}
731
732/** API function: See Header file */
733int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
734{
735 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
736 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
737
738 if (!pVirtio->fLegacyDriver)
739 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
740 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
741
742 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
743 return VERR_NOT_AVAILABLE;
744
745 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
746 pVirtq->uAvailIdxShadow++;
747
748 return VINF_SUCCESS;
749}
750
751
752/** API Function: See header file */
753int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
754 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
755{
756 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
757 *ppVirtqBuf = NULL;
758
759 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
760 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
761
762 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
763
764 if (!pVirtio->fLegacyDriver)
765 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
766 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
767
768 uint16_t uDescIdx = uHeadIdx;
769
770 Log6Func(("%s DESC CHAIN: (head idx = %u)\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
771
772 /*
773 * Allocate and initialize the descriptor chain structure.
774 */
775 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
776 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
777 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
778 pVirtqBuf->cRefs = 1;
779 pVirtqBuf->uHeadIdx = uHeadIdx;
780 pVirtqBuf->uVirtq = uVirtq;
781 *ppVirtqBuf = pVirtqBuf;
782
783 /*
784 * Gather segments.
785 */
786 VIRTQ_DESC_T desc;
787
788 uint32_t cbIn = 0;
789 uint32_t cbOut = 0;
790 uint32_t cSegsIn = 0;
791 uint32_t cSegsOut = 0;
792
793 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
794 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
795
796 do
797 {
798 PVIRTIOSGSEG pSeg;
799
800 /*
801 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
802 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
803 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
804 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
805 */
806 if (cSegsIn + cSegsOut >= VIRTQ_SIZE)
807 {
808 static volatile uint32_t s_cMessages = 0;
809 static volatile uint32_t s_cThreshold = 1;
810 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
811 {
812 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
813 if (ASMAtomicReadU32(&s_cMessages) != 1)
814 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
815 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
816 }
817 break;
818 }
819 RT_UNTRUSTED_VALIDATED_FENCE();
820
821 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
822
823 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
824 {
825 Log6Func(("%s IN idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
826 cbIn += desc.cb;
827 pSeg = &paSegsIn[cSegsIn++];
828 }
829 else
830 {
831 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
832 cbOut += desc.cb;
833 pSeg = &paSegsOut[cSegsOut++];
834#ifdef DEEP_DEBUG
835 if (LogIs11Enabled())
836 {
837 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
838 Log(("\n"));
839 }
840#endif
841 }
842
843 pSeg->GCPhys = desc.GCPhysBuf;
844 pSeg->cbSeg = desc.cb;
845
846 uDescIdx = desc.uDescIdxNext;
847 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
848
849 /*
850 * Add segments to the descriptor chain structure.
851 */
852 if (cSegsIn)
853 {
854 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
855 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
856 pVirtqBuf->cbPhysReturn = cbIn;
857#ifdef VBOX_WITH_STATISTICS
858 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
859#endif
860 }
861
862 if (cSegsOut)
863 {
864 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
865 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
866 pVirtqBuf->cbPhysSend = cbOut;
867#ifdef VBOX_WITH_STATISTICS
868 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
869#endif
870 }
871
872#ifdef VBOX_WITH_STATISTICS
873 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
874#endif
875 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
876 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
877
878 return VINF_SUCCESS;
879}
880
881/** API function: See Header file */
882int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
883 PPVIRTQBUF ppVirtqBuf, bool fRemove)
884{
885 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
886 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
887
888 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
889 return VERR_NOT_AVAILABLE;
890
891 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
892
893 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
894 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
895
896 if (fRemove)
897 pVirtq->uAvailIdxShadow++;
898
899 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, ppVirtqBuf);
900 return rc;
901}
902
903/** API function: See Header file */
904int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
905 PVIRTQBUF pVirtqBuf, bool fFence)
906{
907 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
908 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
909
910 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
911
912 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
913 Assert(pVirtqBuf->cRefs > 0);
914
915 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
916
917 Log6Func((" Copying device data to %s (%s guest), desc chain idx %d\n",
918 VIRTQNAME(pVirtio, uVirtq), pVirtio->fLegacyDriver ? "legacy" : "modern", virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
919
920 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
921
922 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
923
924 if (pSgVirtReturn)
925 {
926 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
927 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
928 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
929 virtioCoreGCPhysChainReset(pSgPhysReturn);
930 while (cbRemain)
931 {
932 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
933 Assert(cbCopy > 0);
934 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
935 RTSgBufAdvance(pSgVirtReturn, cbCopy);
936 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
937 cbRemain -= cbCopy;
938 }
939
940 if (fFence)
941 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
942
943 Assert(!(cbCopy >> 32));
944 }
945
946 /* If this write-ahead crosses threshold where the driver wants to get an event, flag it */
947 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
948 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
949 pVirtq->fUsedRingEvent = true;
950
951 /*
952 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
953 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */
954 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
955
956 if (pSgVirtReturn)
957 Log6Func((" ... %d segs, %zu bytes, copied to %u byte buf. residual: %zu bytes\n",
958 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
959
960 Log6Func((" %s used_idx=%u\n", VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
961
962 return VINF_SUCCESS;
963}
964
965
966#endif /* IN_RING3 */
967
968/** API function: See Header file */
969int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
970{
971 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
972 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
973
974 if (!pVirtio->fLegacyDriver)
975 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
976 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
977
978 Log6Func((" %s ++used_idx=%u\n", pVirtq->szName, pVirtq->uUsedIdxShadow));
979
980 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
981 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
982
983 return VINF_SUCCESS;
984}
985
986/**
987 * This is called from the MMIO callback code when the guest does an MMIO access to the
988 * mapped queue notification capability area corresponding to a particular queue, to notify
989 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
990 *
991 * @param pDevIns The device instance.
992 * @param pVirtio Pointer to the shared virtio state.
993 * @param uVirtq Virtq to check for guest interrupt handling preference
994 * @param uNotifyIdx Notification index
995 */
996static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
997{
998 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
999
1000 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match.
1001 * Disregarding this notification may cause throughput to stop, however there's no way to know
1002 * which was queue was intended for wake-up if the two parameters disagree. */
1003
1004 AssertMsg(uNotifyIdx == uVirtq,
1005 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
1006 uVirtq, uNotifyIdx));
1007 RT_NOREF(uNotifyIdx);
1008
1009 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1010 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1011
1012 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,
1013 virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq)));
1014
1015 /* Inform client */
1016 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
1017 RT_NOREF2(pVirtio, pVirtq);
1018}
1019
1020/**
1021 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1022 * the specified virtq, depending on the interrupt configuration of the device
1023 * and depending on negotiated and realtime constraints flagged by the guest driver.
1024 *
1025 * See VirtIO 1.0 specification (section 2.4.7).
1026 *
1027 * @param pDevIns The device instance.
1028 * @param pVirtio Pointer to the shared virtio state.
1029 * @param uVirtq Virtq to check for guest interrupt handling preference
1030 */
1031static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1032{
1033 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1034 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1035
1036 if (!IS_DRIVER_OK(pVirtio))
1037 {
1038 LogFunc(("Guest driver not in ready state.\n"));
1039 return;
1040 }
1041
1042 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1043 {
1044 if (pVirtq->fUsedRingEvent)
1045 {
1046#ifdef IN_RING3
1047 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1048 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1049#endif
1050 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1051 pVirtq->fUsedRingEvent = false;
1052 return;
1053 }
1054#ifdef IN_RING3
1055 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1056 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1057#endif
1058 }
1059 else
1060 {
1061 /** If guest driver hasn't suppressed interrupts, interrupt */
1062 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1063 {
1064 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1065 return;
1066 }
1067 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1068 }
1069}
1070
1071/**
1072 * Raise interrupt or MSI-X
1073 *
1074 * @param pDevIns The device instance.
1075 * @param pVirtio Pointer to the shared virtio state.
1076 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1077 * @param uVec MSI-X vector, if enabled
1078 */
1079static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
1080{
1081 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1082 Log6Func(("reason: buffer added to 'used' ring.\n"));
1083 else
1084 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1085 Log6Func(("reason: device config change\n"));
1086
1087 if (!pVirtio->fMsiSupport)
1088 {
1089 pVirtio->uISR |= uCause;
1090 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1091 }
1092 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1093 PDMDevHlpPCISetIrq(pDevIns, uMsixVector, 1);
1094 return VINF_SUCCESS;
1095}
1096
1097/**
1098 * Lower interrupt (Called when guest reads ISR and when resetting)
1099 *
1100 * @param pDevIns The device instance.
1101 */
1102static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixVector)
1103{
1104 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1105 if (!pVirtio->fMsiSupport)
1106 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1107 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1108 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1109}
1110
1111#ifdef IN_RING3
1112static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1113{
1114 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1115 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1116
1117 pVirtq->uQueueSize = VIRTQ_SIZE;
1118 pVirtq->uEnable = false;
1119 pVirtq->uNotifyOffset = uVirtq;
1120 pVirtq->fUsedRingEvent = false;
1121 pVirtq->uAvailIdxShadow = 0;
1122 pVirtq->uUsedIdxShadow = 0;
1123 pVirtq->uMsixVector = uVirtq + 2;
1124
1125 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1126 pVirtq->uMsixVector = VIRTIO_MSI_NO_VECTOR;
1127
1128 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsixVector);
1129}
1130
1131static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1132{
1133 LogFunc(("Resetting device VirtIO state\n"));
1134 pVirtio->fLegacyDriver = 1; /* Assume this. Cleared if VIRTIO_F_VERSION_1 feature ack'd */
1135 pVirtio->uDeviceFeaturesSelect = 0;
1136 pVirtio->uDriverFeaturesSelect = 0;
1137 pVirtio->uConfigGeneration = 0;
1138 pVirtio->fDeviceStatus = 0;
1139 pVirtio->uISR = 0;
1140
1141 if (!pVirtio->fMsiSupport)
1142 virtioLowerInterrupt(pDevIns, 0);
1143 else
1144 {
1145 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1146 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1147 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsixVector);
1148 }
1149
1150 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1151 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1152
1153 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1154 virtioResetVirtq(pVirtio, uVirtq);
1155}
1156
1157/**
1158 * Invoked by this implementation when guest driver resets the device.
1159 * The driver itself will not until the device has read the status change.
1160 */
1161static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1162{
1163 Log(("%-23s: Guest reset the device\n", __FUNCTION__));
1164
1165 /* Let the client know */
1166 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0 /* fDriverOk */);
1167 virtioResetDevice(pDevIns, pVirtio);
1168}
1169#endif /* IN_RING3 */
1170
1171/**
1172 * Handle accesses to Common Configuration capability
1173 *
1174 * @returns VBox status code
1175 *
1176 * @param pDevIns The device instance.
1177 * @param pVirtio Pointer to the shared virtio state.
1178 * @param pVirtioCC Pointer to the current context virtio state.
1179 * @param fWrite Set if write access, clear if read access.
1180 * @param uOffsetOfAccess The common configuration capability offset.
1181 * @param cb Number of bytes to read or write
1182 * @param pv Pointer to location to write to or read from
1183 */
1184static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1185 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1186{
1187 uint16_t uVirtq = pVirtio->uVirtqSelect;
1188 int rc = VINF_SUCCESS;
1189 uint64_t val;
1190 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1191 {
1192 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1193 {
1194 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1195 * yet the linux driver attempts to write/read it back twice */
1196 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1197 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1198 return VINF_IOM_MMIO_UNUSED_00;
1199 }
1200 else /* Guest READ pCommonCfg->uDeviceFeatures */
1201 {
1202 switch (pVirtio->uDeviceFeaturesSelect)
1203 {
1204 case 0:
1205 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1206 memcpy(pv, &val, cb);
1207 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1208 break;
1209 case 1:
1210 val = pVirtio->uDeviceFeatures >> 32;
1211 memcpy(pv, &val, cb);
1212 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1213 break;
1214 default:
1215 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1216 pVirtio->uDeviceFeaturesSelect));
1217 return VINF_IOM_MMIO_UNUSED_00;
1218 }
1219 }
1220 }
1221 else
1222 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1223 {
1224 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1225 {
1226 switch (pVirtio->uDriverFeaturesSelect)
1227 {
1228 case 0:
1229 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1230 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1231 break;
1232 case 1:
1233 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1234 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1)
1235 {
1236#ifdef IN_RING0
1237 return VINF_IOM_R3_MMIO_WRITE;
1238#endif
1239#ifdef IN_RING3
1240 pVirtio->fLegacyDriver = 0;
1241 pVirtioCC->pfnGuestVersionHandler(pVirtio, 1 /* fModern */);
1242#endif
1243 }
1244 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1245 break;
1246 default:
1247 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1248 pVirtio->uDriverFeaturesSelect));
1249 return VINF_SUCCESS;
1250 }
1251 }
1252 else /* Guest READ pCommonCfg->udriverFeatures */
1253 {
1254 switch (pVirtio->uDriverFeaturesSelect)
1255 {
1256 case 0:
1257 val = pVirtio->uDriverFeatures & 0xffffffff;
1258 memcpy(pv, &val, cb);
1259 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1260 break;
1261 case 1:
1262 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1263 memcpy(pv, &val, cb);
1264 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1265 break;
1266 default:
1267 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1268 pVirtio->uDriverFeaturesSelect));
1269 return VINF_IOM_MMIO_UNUSED_00;
1270 }
1271 }
1272 }
1273 else
1274 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1275 {
1276 if (fWrite)
1277 {
1278 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1279 return VINF_SUCCESS;
1280 }
1281 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1282 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1283 }
1284 else
1285 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1286 {
1287 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1288 {
1289 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1290 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1291
1292 if (LogIs7Enabled())
1293 {
1294 char szOut[80] = { 0 };
1295 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1296 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1297 }
1298 bool const fStatusChanged = IS_DRIVER_OK(pVirtio) != WAS_DRIVER_OK(pVirtio);
1299
1300 if (fDeviceReset || fStatusChanged)
1301 {
1302#ifdef IN_RING0
1303 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1304 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1305 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1306 return VINF_IOM_R3_MMIO_WRITE;
1307#endif
1308 }
1309
1310#ifdef IN_RING3
1311 /*
1312 * Notify client only if status actually changed from last time and when we're reset.
1313 */
1314 if (fDeviceReset)
1315 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1316
1317 if (fStatusChanged)
1318 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, IS_DRIVER_OK(pVirtio));
1319#endif
1320 /*
1321 * Save the current status for the next write so we can see what changed.
1322 */
1323 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1324 }
1325 else /* Guest READ pCommonCfg->fDeviceStatus */
1326 {
1327 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1328
1329 if (LogIs7Enabled())
1330 {
1331 char szOut[80] = { 0 };
1332 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1333 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1334 }
1335 }
1336 }
1337 else
1338 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1339 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1340 else
1341 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1342 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1343 else
1344 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1345 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1346 else
1347 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1348 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1349 else
1350 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1351 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1352 else
1353 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1354 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1355 else
1356 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1357 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1358 else
1359 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1360 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1361 else
1362 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1363 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1364 else
1365 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1366 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1367 else
1368 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1369 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1370 else
1371 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1372 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1373 else
1374 {
1375 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1376 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1377 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1378 }
1379
1380#ifndef IN_RING3
1381 RT_NOREF(pDevIns, pVirtioCC);
1382#endif
1383 return rc;
1384}
1385
1386/**
1387 * @callback_method_impl{FNIOMIOPORTNEWIN)
1388 *
1389 * This I/O handler exists only to handle access from legacy drivers.
1390 */
1391
1392static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
1393{
1394
1395 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1396 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1397
1398 RT_NOREF(pvUser);
1399
1400 void *pv = pu32; /* To use existing macros */
1401 int fWrite = 0; /* To use existing macros */
1402
1403 uint16_t uVirtq = pVirtio->uVirtqSelect;
1404
1405 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1406 {
1407 uint32_t val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1408 memcpy(pu32, &val, cb);
1409 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1410 }
1411 else
1412 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1413 {
1414 uint32_t val = pVirtio->uDriverFeatures & 0xffffffff;
1415 memcpy(pu32, &val, cb);
1416 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1417 }
1418 else
1419 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1420 {
1421 *(uint8_t *)pu32 = pVirtio->fDeviceStatus;
1422
1423 if (LogIs7Enabled())
1424 {
1425 char szOut[80] = { 0 };
1426 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1427 Log(("%-23s: Guest read fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1428 }
1429 }
1430 else
1431 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1432 {
1433 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
1434 *(uint8_t *)pu32 = pVirtio->uISR;
1435 pVirtio->uISR = 0;
1436 virtioLowerInterrupt( pDevIns, 0);
1437 Log((" ISR read and cleared\n"));
1438 }
1439 else
1440 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1441 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1442 else
1443 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1444 {
1445 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[uVirtq];
1446 *pu32 = pVirtQueue->GCPhysVirtqDesc >> PAGE_SHIFT;
1447 Log(("%-23s: Guest read uVirtqPfn .................... %#x\n", __FUNCTION__, *pu32));
1448 }
1449 else
1450 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1451 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1452 else
1453 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1454 VIRTIO_DEV_CONFIG_ACCESS( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1455#ifdef LEGACY_MSIX_SUPPORTED
1456 else
1457 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1458 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1459 else
1460 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1461 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1462#endif
1463 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
1464 {
1465 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1466#ifdef IN_RING3
1467 /* Access device-specific configuration */
1468 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1469 int rc = pVirtioCC->pfnDevCapRead(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
1470 return rc;
1471#else
1472 return VINF_IOM_R3_IOPORT_READ;
1473#endif
1474 }
1475 else
1476 {
1477 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1478 Log2Func(("Bad guest read access to virtio_legacy_pci_common_cfg: offset=%#x, cb=%x\n",
1479 offPort, cb));
1480 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1481 "virtioLegacyIOPortIn: no valid port at offset offset=%RTiop cb=%#x\n", offPort, cb);
1482 return rc;
1483 }
1484
1485 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1486 return VINF_SUCCESS;
1487}
1488
1489
1490/**
1491 * @callback_method_impl{ * @callback_method_impl{FNIOMIOPORTNEWOUT}
1492 *
1493 * This I/O Port interface exists only to handle access from legacy drivers.
1494 */
1495static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
1496{
1497 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1498 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
1499 RT_NOREF(pvUser);
1500
1501 uint16_t uVirtq = pVirtio->uVirtqSelect;
1502 uint32_t u32OnStack = u32; /* allows us to use this impl's MMIO parsing macros */
1503 void *pv = &u32OnStack; /* To use existing macros */
1504 int fWrite = 1; /* To use existing macros */
1505
1506// LogFunc(("Write to port offset=%RTiop, cb=%#x, u32=%#x\n", offPort, cb, u32));
1507
1508 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1509 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1510 else
1511#ifdef LEGACY_MSIX_SUPPORTED
1512 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1513 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1514 else
1515 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1516 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1517 else
1518#endif
1519 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1520 {
1521 /* Check to see if guest acknowledged unsupported features */
1522 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1523 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1524 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1525 return VINF_SUCCESS;
1526 }
1527 else
1528 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1529 {
1530 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1531 if ((pVirtio->uDriverFeatures & ~VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED) == 0)
1532 {
1533 Log(("Guest asked for features host does not support! (host=%x guest=%x)\n",
1534 VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED, pVirtio->uDriverFeatures));
1535 pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED;
1536 }
1537 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1538 }
1539 else
1540 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1541 {
1542 VIRTIO_DEV_CONFIG_LOG_ACCESS(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1543 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (queue size) (ignoring)\n"));
1544 return VINF_SUCCESS;
1545 }
1546 else
1547 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1548 {
1549 bool const fDriverInitiatedReset = (pVirtio->fDeviceStatus = (uint8_t)u32) == 0;
1550 bool const fDriverStateImproved = IS_DRIVER_OK(pVirtio) && !WAS_DRIVER_OK(pVirtio);
1551
1552 if (LogIs7Enabled())
1553 {
1554 char szOut[80] = { 0 };
1555 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1556 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1557 }
1558
1559 if (fDriverStateImproved || fDriverInitiatedReset)
1560 {
1561#ifdef IN_RING0
1562 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1563 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1564 return VINF_IOM_R3_IOPORT_WRITE;
1565#endif
1566 }
1567
1568#ifdef IN_RING3
1569 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1570 if (fDriverInitiatedReset)
1571 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1572
1573 else if (fDriverStateImproved)
1574 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 1 /* fDriverOk */);
1575
1576#endif
1577 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1578 }
1579 else
1580 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1581 {
1582 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1583 uint64_t uVirtqPfn = (uint64_t)u32;
1584
1585 if (uVirtqPfn)
1586 {
1587 /* Transitional devices calculate ring physical addresses using rigid spec-defined formulae,
1588 * instead of guest conveying respective address of each ring, as "modern" VirtIO drivers do,
1589 * thus there is no virtq PFN or single base queue address stored in instance data for
1590 * this transitional device, but rather it is derived, when read back, from GCPhysVirtqDesc */
1591
1592 pVirtq->GCPhysVirtqDesc = uVirtqPfn * VIRTIO_PAGE_SIZE;
1593 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
1594 pVirtq->GCPhysVirtqUsed =
1595 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
1596 }
1597 else
1598 {
1599 /* Don't set ring addresses for queue (to meaningless values), when guest resets the virtq's PFN */
1600 pVirtq->GCPhysVirtqDesc = 0;
1601 pVirtq->GCPhysVirtqAvail = 0;
1602 pVirtq->GCPhysVirtqUsed = 0;
1603 }
1604 Log(("%-23s: Guest wrote uVirtqPfn .................... %#x:\n"
1605 "%68s... %p -> GCPhysVirtqDesc\n%68s... %p -> GCPhysVirtqAvail\n%68s... %p -> GCPhysVirtqUsed\n",
1606 __FUNCTION__, u32, " ", pVirtq->GCPhysVirtqDesc, " ", pVirtq->GCPhysVirtqAvail, " ", pVirtq->GCPhysVirtqUsed));
1607 }
1608 else
1609 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1610 {
1611#ifdef IN_RING3
1612 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
1613 pVirtio->uQueueNotify = u32 & 0xFFFF;
1614 if (uVirtq < VIRTQ_MAX_COUNT)
1615 {
1616 RT_UNTRUSTED_VALIDATED_FENCE();
1617
1618 /* Need to check that queue is configured. Legacy spec didn't have a queue enabled flag */
1619 if (pVirtio->aVirtqueues[pVirtio->uQueueNotify].GCPhysVirtqDesc)
1620 virtioCoreVirtqNotified(pDevIns, pVirtio, pVirtio->uQueueNotify, pVirtio->uQueueNotify /* uNotifyIdx */);
1621 else
1622 Log(("The queue (#%d) being notified has not been initialized.\n", pVirtio->uQueueNotify));
1623 }
1624 else
1625 Log(("Invalid queue number (%d)\n", pVirtio->uQueueNotify));
1626#else
1627 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1628 return VINF_IOM_R3_IOPORT_WRITE;
1629#endif
1630 }
1631 else
1632 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1633 {
1634 VIRTIO_DEV_CONFIG_LOG_ACCESS( fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1635 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (ISR status) (ignoring)\n"));
1636 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1637 return VINF_SUCCESS;
1638 }
1639 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
1640 {
1641#ifdef IN_RING3
1642
1643 /* Access device-specific configuration */
1644 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1645 return pVirtioCC->pfnDevCapWrite(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
1646#else
1647 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1648 return VINF_IOM_R3_IOPORT_WRITE;
1649#endif
1650 }
1651 else
1652 {
1653 Log2Func(("Bad guest write access to virtio_legacy_pci_common_cfg: offset=%#x, cb=0x%x\n",
1654 offPort, cb));
1655 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1656 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1657 "virtioLegacyIOPortOut: no valid port at offset offset=%RTiop cb=0x%#x\n", offPort, cb);
1658 return rc;
1659 }
1660
1661 RT_NOREF(uVirtq);
1662 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1663 return VINF_SUCCESS;
1664}
1665
1666
1667/**
1668 * @callback_method_impl{FNIOMMMIONEWREAD,
1669 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1670 *
1671 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1672 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1673 * of 1, 2 or 4 bytes, only.
1674 *
1675 */
1676static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1677{
1678 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1679 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1680 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1681 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1682 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1683
1684
1685 uint32_t uOffset;
1686 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1687 {
1688#ifdef IN_RING3
1689 /*
1690 * Callback to client to manage device-specific configuration.
1691 */
1692 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1693
1694 /*
1695 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1696 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1697 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1698 */
1699 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1700 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1701 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1702
1703 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1704
1705 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1706 {
1707 ++pVirtio->uConfigGeneration;
1708 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1709 pVirtio->uConfigGeneration,
1710 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1711 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1712 pVirtio->fGenUpdatePending = false;
1713 }
1714
1715 virtioLowerInterrupt(pDevIns, 0);
1716 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1717 return rcStrict;
1718#else
1719 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1720 return VINF_IOM_R3_MMIO_READ;
1721#endif
1722 }
1723
1724 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1725 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1726
1727 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap))
1728 {
1729 *(uint8_t *)pv = pVirtio->uISR;
1730 Log6Func(("Read and clear ISR\n"));
1731 pVirtio->uISR = 0; /* VirtIO spec requires reads of ISR to clear it */
1732 virtioLowerInterrupt(pDevIns, 0);
1733 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1734 return VINF_SUCCESS;
1735 }
1736
1737 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1738 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1739 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1740 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
1741 return rc;
1742}
1743
1744/**
1745 * @callback_method_impl{FNIOMMMIONEWREAD,
1746 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1747 *
1748 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1749 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1750 * of 1, 2 or 4 bytes, only.
1751 */
1752static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1753{
1754 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1755 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1756 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1757 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1758 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
1759
1760 uint32_t uOffset;
1761 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1762 {
1763#ifdef IN_RING3
1764 /*
1765 * Foreward this MMIO write access for client to deal with.
1766 */
1767 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1768 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1769#else
1770 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1771 return VINF_IOM_R3_MMIO_WRITE;
1772#endif
1773 }
1774
1775 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1776 {
1777 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1778 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1779 }
1780
1781 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1782 {
1783 pVirtio->uISR = *(uint8_t *)pv;
1784 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1785 pVirtio->uISR & 0xff,
1786 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1787 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1788 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1789 return VINF_SUCCESS;
1790 }
1791
1792 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1793 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1794 {
1795 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1796 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1797 return VINF_SUCCESS;
1798 }
1799
1800 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1801 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1802 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1803 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
1804 return rc;
1805}
1806
1807#ifdef IN_RING3
1808
1809/**
1810 * @callback_method_impl{FNPCICONFIGREAD}
1811 */
1812static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1813 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1814{
1815 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1816 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1817 RT_NOREF(pPciDev);
1818
1819 if (uAddress == pVirtio->uPciCfgDataOff)
1820 {
1821 /*
1822 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1823 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1824 * (the virtio_pci_cfg_cap capability), and access data items.
1825 * This is used by BIOS to gain early boot access to the the storage device.
1826 */
1827 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1828 uint32_t uLength = pPciCap->uLength;
1829
1830 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u uLength=%d, bar=%d\n",
1831 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, uLength, pPciCap->uBar));
1832
1833 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1834 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1835 {
1836 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. "
1837 "Ignoring\n"));
1838 *pu32Value = UINT32_MAX;
1839 return VINF_SUCCESS;
1840 }
1841
1842 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
1843 Log7Func((" Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=0x%x -> %Rrc\n",
1844 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1845 return rcStrict;
1846 }
1847 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
1848 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
1849 return VINF_PDM_PCI_DO_DEFAULT;
1850}
1851
1852/**
1853 * @callback_method_impl{FNPCICONFIGWRITE}
1854 */
1855static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1856 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1857{
1858 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1859 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1860 RT_NOREF(pPciDev);
1861
1862 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
1863 if (uAddress == pVirtio->uPciCfgDataOff)
1864 {
1865 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1866 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1867 * (the virtio_pci_cfg_cap capability), and access data items.
1868 * This is used by BIOS to gain early boot access to the the storage device.*/
1869
1870 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1871 uint32_t uLength = pPciCap->uLength;
1872
1873 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1874 || cb != uLength
1875 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1876 {
1877 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1878 return VINF_SUCCESS;
1879 }
1880
1881 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
1882 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1883 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1884 return rcStrict;
1885 }
1886 return VINF_PDM_PCI_DO_DEFAULT;
1887}
1888
1889
1890/*********************************************************************************************************************************
1891* Saved state. *
1892*********************************************************************************************************************************/
1893
1894/**
1895 * Called from the FNSSMDEVSAVEEXEC function of the device.
1896 *
1897 * @param pVirtio Pointer to the shared virtio state.
1898 * @param pHlp The ring-3 device helpers.
1899 * @param pSSM The saved state handle.
1900 * @returns VBox status code.
1901 */
1902int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1903{
1904 LogFunc(("\n"));
1905 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1906 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1907
1908 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1909 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
1910 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
1911 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
1912 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
1913 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
1914 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
1915 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
1916 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
1917
1918 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1919 {
1920 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1921
1922 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
1923 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
1924 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
1925 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
1926 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsixVector);
1927 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
1928 pHlp->pfnSSMPutU16( pSSM, pVirtq->uQueueSize);
1929 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
1930 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
1931 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
1932 AssertRCReturn(rc, rc);
1933 }
1934
1935 return VINF_SUCCESS;
1936}
1937
1938/**
1939 * Called from the FNSSMDEVLOADEXEC function of the device.
1940 *
1941 * @param pVirtio Pointer to the shared virtio state.
1942 * @param pHlp The ring-3 device helpers.
1943 * @param pSSM The saved state handle.
1944 * @returns VBox status code.
1945 */
1946int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1947{
1948 LogFunc(("\n"));
1949 /*
1950 * Check the marker and (embedded) version number.
1951 */
1952 uint64_t uMarker = 0;
1953 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1954 AssertRCReturn(rc, rc);
1955 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1956 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1957 N_("Expected marker value %#RX64 found %#RX64 instead"),
1958 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1959 uint32_t uVersion = 0;
1960 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1961 AssertRCReturn(rc, rc);
1962 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1963 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1964 N_("Unsupported virtio version: %u"), uVersion);
1965 /*
1966 * Load the state.
1967 */
1968 pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
1969 pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
1970 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
1971 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
1972 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
1973 pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
1974 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
1975 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
1976 pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
1977
1978 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1979 {
1980 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1981
1982 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
1983 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
1984 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
1985 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
1986 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector);
1987 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
1988 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize);
1989 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
1990 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
1991 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
1992 AssertRCReturn(rc, rc);
1993 }
1994
1995 return VINF_SUCCESS;
1996}
1997
1998
1999/*********************************************************************************************************************************
2000* Device Level *
2001*********************************************************************************************************************************/
2002
2003/**
2004 * This must be called by the client to handle VM state changes
2005 * after the client takes care of its device-specific tasks for the state change.
2006 * (i.e. Reset, suspend, power-off, resume)
2007 *
2008 * @param pDevIns The device instance.
2009 * @param pVirtio Pointer to the shared virtio state.
2010 */
2011void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
2012{
2013 LogFunc(("State changing to %s\n",
2014 virtioCoreGetStateChangeText(enmState)));
2015
2016 switch(enmState)
2017 {
2018 case kvirtIoVmStateChangedReset:
2019 virtioCoreResetAll(pVirtio);
2020 break;
2021 case kvirtIoVmStateChangedSuspend:
2022 break;
2023 case kvirtIoVmStateChangedPowerOff:
2024 break;
2025 case kvirtIoVmStateChangedResume:
2026 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
2027 {
2028 if ((!pVirtio->fLegacyDriver && pVirtio->aVirtqueues[uVirtq].uEnable)
2029 | pVirtio->aVirtqueues[uVirtq].GCPhysVirtqDesc)
2030 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
2031 }
2032 break;
2033 default:
2034 LogRelFunc(("Bad enum value"));
2035 return;
2036 }
2037}
2038
2039/**
2040 * This should be called from PDMDEVREGR3::pfnDestruct.
2041 *
2042 * @param pDevIns The device instance.
2043 * @param pVirtio Pointer to the shared virtio state.
2044 * @param pVirtioCC Pointer to the ring-3 virtio state.
2045 */
2046void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
2047{
2048 if (pVirtioCC->pbPrevDevSpecificCfg)
2049 {
2050 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
2051 pVirtioCC->pbPrevDevSpecificCfg = NULL;
2052 }
2053
2054 RT_NOREF(pDevIns, pVirtio);
2055}
2056
2057/** API Function: See header file */
2058int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
2059 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
2060{
2061 /*
2062 * The pVirtio state must be the first member of the shared device instance
2063 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
2064 */
2065 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2066 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
2067
2068 pVirtio->pDevInsR3 = pDevIns;
2069
2070 /*
2071 * Caller must initialize these.
2072 */
2073 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
2074 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
2075 AssertReturn(pVirtioCC->pfnGuestVersionHandler, VERR_INVALID_POINTER);
2076 AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768, VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */
2077
2078#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed
2079 * The legacy MSI support has not been implemented yet
2080 */
2081# ifdef VBOX_WITH_MSI_DEVICES
2082 pVirtio->fMsiSupport = true;
2083# endif
2084#endif
2085
2086 /* Tell the device-specific code that guest is in legacy mode (for now) */
2087 pVirtioCC->pfnGuestVersionHandler(pVirtio, false /* fModern */);
2088
2089 /*
2090 * The host features offered include both device-specific features
2091 * and reserved feature bits (device independent)
2092 */
2093 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
2094 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
2095 | fDevSpecificFeatures;
2096
2097 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
2098 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
2099 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
2100 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
2101 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
2102
2103 /* Set PCI config registers (assume 32-bit mode) */
2104 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2105 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2106
2107 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
2108 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
2109 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
2110 PDMPciDevSetSubSystemId(pPciDev, DEVICE_PCI_NETWORK_SUBSYSTEM);
2111 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
2112 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
2113 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
2114 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
2115 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
2116 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
2117
2118 /* Register PCI device */
2119 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
2120 if (RT_FAILURE(rc))
2121 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
2122
2123 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
2124 AssertRCReturn(rc, rc);
2125
2126 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
2127
2128#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
2129#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
2130 do { \
2131 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
2132 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
2133 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
2134 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
2135 } while (0)
2136
2137 PVIRTIO_PCI_CAP_T pCfg;
2138 uint32_t cbRegion = 0;
2139
2140 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
2141 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
2142 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
2143 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2144 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2145 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2146 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2147 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
2148 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
2149 cbRegion += pCfg->uLength;
2150 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
2151 pVirtioCC->pCommonCfgCap = pCfg;
2152
2153 /*
2154 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
2155 * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal
2156 * value of the queue (different strategies are possible according to spec).
2157 */
2158 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2159 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
2160 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2161 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
2162 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2163 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2164 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
2165 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2166 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
2167 cbRegion += pCfg->uLength;
2168 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
2169 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
2170 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
2171
2172 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
2173 *
2174 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
2175 * of spec shows it as a 32-bit field with upper bits 'reserved'
2176 * Will take spec's words more literally than the diagram for now.
2177 */
2178 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2179 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
2180 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2181 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2182 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2183 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2184 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
2185 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2186 pCfg->uLength = sizeof(uint8_t);
2187 cbRegion += pCfg->uLength;
2188 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
2189 pVirtioCC->pIsrCap = pCfg;
2190
2191 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
2192 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
2193 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
2194 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
2195 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
2196 * initializing.
2197 */
2198 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
2199 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2200 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
2201 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2202 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
2203 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2204 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2205 pCfg->uOffset = 0;
2206 pCfg->uLength = 4;
2207 cbRegion += pCfg->uLength;
2208 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
2209 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
2210
2211 if (pVirtioCC->pbDevSpecificCfg)
2212 {
2213 /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6).
2214 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
2215 * to inform this. */
2216 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2217 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
2218 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2219 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2220 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2221 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2222 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
2223 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2224 pCfg->uLength = cbDevSpecificCfg;
2225 cbRegion += pCfg->uLength;
2226 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
2227 pVirtioCC->pDeviceCap = pCfg;
2228 }
2229 else
2230 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
2231
2232 if (pVirtio->fMsiSupport)
2233 {
2234 PDMMSIREG aMsiReg;
2235 RT_ZERO(aMsiReg);
2236 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
2237 aMsiReg.iMsixNextOffset = 0;
2238 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
2239 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
2240 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
2241 if (RT_FAILURE(rc))
2242 {
2243 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
2244 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
2245 pVirtio->fMsiSupport = false;
2246 }
2247 else
2248 Log2Func(("Using MSI-X for guest driver notification\n"));
2249 }
2250 else
2251 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
2252
2253 /* Set offset to first capability and enable PCI dev capabilities */
2254 PDMPciDevSetCapabilityList(pPciDev, 0x40);
2255 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
2256
2257 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s (modern)", pcszInstance);
2258 if (cbSize <= 0)
2259 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2260
2261 cbSize = RTStrPrintf(pVirtioCC->pcszPortIoName, sizeof(pVirtioCC->pcszPortIoName), "%s (legacy)", pcszInstance);
2262 if (cbSize <= 0)
2263 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2264
2265 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents
2266 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent)
2267 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO
2268 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks.
2269 * (See VirtIO 1.1, Section 4.1.4.8).
2270 */
2271 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg,
2272 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName,
2273 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts);
2274 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */")));
2275
2276 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
2277 * 'unknown' device-specific capability without querying the capability to figure
2278 * out size, so pad with an extra page
2279 */
2280 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE),
2281 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
2282 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
2283 pVirtioCC->pcszMmioName,
2284 &pVirtio->hMmioPciCap);
2285 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
2286 /*
2287 * Statistics.
2288 */
2289# ifdef VBOX_WITH_STATISTICS
2290 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2291 "Total number of allocated descriptor chains", "DescChainsAllocated");
2292 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2293 "Total number of freed descriptor chains", "DescChainsFreed");
2294 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2295 "Total number of inbound segments", "DescChainsSegsIn");
2296 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2297 "Total number of outbound segments", "DescChainsSegsOut");
2298 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
2299 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0");
2300 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC");
2301 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
2302 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0");
2303 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC");
2304# endif /* VBOX_WITH_STATISTICS */
2305
2306 virtioResetDevice(pDevIns, pVirtio); /* Reset VirtIO specific state of device */
2307
2308 return VINF_SUCCESS;
2309}
2310
2311#else /* !IN_RING3 */
2312
2313/**
2314 * Sets up the core ring-0/raw-mode virtio bits.
2315 *
2316 * @returns VBox status code.
2317 * @param pDevIns The device instance.
2318 * @param pVirtio Pointer to the shared virtio state. This must be the first
2319 * member in the shared device instance data!
2320 */
2321int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
2322{
2323 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2324
2325#ifdef FUTURE_OPTIMIZATION
2326 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
2327 AssertRCReturn(rc, rc);
2328#endif
2329 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
2330 AssertRCReturn(rc, rc);
2331
2332 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pVirtio->hLegacyIoPorts, virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/);
2333 AssertRCReturn(rc, rc);
2334
2335 return rc;
2336}
2337
2338#endif /* !IN_RING3 */
2339
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette