VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 81662

Last change on this file since 81662 was 81662, checked in by vboxsync, 5 years ago

Virtio_1_0,DevVirtioScsi: Started converting to new PDM model. (untested) bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 75.8 KB
Line 
1/* $Id: Virtio_1_0.cpp 81662 2019-11-05 00:09:57Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <VBox/msi.h>
26#include <VBox/AssertGuest.h>
27#include <iprt/param.h>
28#include <iprt/assert.h>
29#include <iprt/uuid.h>
30#include <iprt/mem.h>
31#include <iprt/assert.h>
32#include <iprt/sg.h>
33#include <VBox/vmm/pdmdev.h>
34#include "Virtio_1_0.h"
35
36
37/*********************************************************************************************************************************
38* Defined Constants And Macros *
39*********************************************************************************************************************************/
40#define INSTANCE(a_pVirtio) (a_pVirtio)->szInstance
41#define QUEUENAME(a_pVirtio, a_idxQueue) ((a_pVirtio)->virtqState[(a_idxQueue)].szVirtqName)
42
43/**
44 * This macro returns true if the implied parameter GCPhysAddr address and access length are
45 * within the range of the mapped capability struct specified with the explicit parameters.
46 *
47 * @param[in] a_GCPhysCapData Pointer to MMIO mapped capability struct
48 * @param[in] a_pCfgCap Pointer to capability in PCI configuration area
49 * @param[out] a_fMatched True if GCPhysAddr is within the physically mapped capability.
50 *
51 * Implied parameters:
52 * - GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
53 * - cb - [input, implied] Number of bytes to access
54 *
55 * @todo r=bird: Make this a predicate macro (I will probably simplify this a
56 * lot later when 'GCPhysAddr' becomes an 'off').
57 */
58#define MATCH_VIRTIO_CAP_STRUCT_OLD(a_GCPhysCapData, a_pCfgCap, a_fMatched) \
59 bool const a_fMatched = (a_GCPhysCapData) != 0 \
60 && (a_pCfgCap) != NULL \
61 && GCPhysAddr >= (RTGCPHYS)(a_GCPhysCapData) \
62 && GCPhysAddr < ((RTGCPHYS)(a_GCPhysCapData) + ((PVIRTIO_PCI_CAP_T)(a_pCfgCap))->uLength) \
63 && cb <= ((PVIRTIO_PCI_CAP_T)a_pCfgCap)->uLength
64
65#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offIntraVar, a_LocCapData) \
66 ( ((a_offIntraVar) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
67 && (a_offIntraVar) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
68
69#define IS_DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
70
71
72/** Marks the start of the virtio saved state (just for sanity). */
73#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
74/** The current saved state version for the virtio core. */
75#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
76
77
78
79
80/*********************************************************************************************************************************
81* Structures and Typedefs *
82*********************************************************************************************************************************/
83/**
84 * virtq related structs
85 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
86 */
87typedef struct virtq_desc
88{
89 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
90 uint32_t cb; /**< len Buffer length */
91 uint16_t fFlags; /**< flags Buffer specific flags */
92 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
93} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
94
95typedef struct virtq_avail
96{
97 uint16_t fFlags; /**< flags avail ring drv to dev flags */
98 uint16_t uIdx; /**< idx Index of next free ring slot */
99 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
100 /* uint16_t uUsedEventIdx; - used_event (if VIRTQ_USED_F_EVENT_IDX) */
101} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
102
103typedef struct virtq_used_elem
104{
105 uint32_t uDescIdx; /**< idx Start of used desc chain */
106 uint32_t cbElem; /**< len Total len of used desc chain */
107} VIRTQ_USED_ELEM_T;
108
109typedef struct virt_used
110{
111 uint16_t fFlags; /**< flags used ring host-to-guest flags */
112 uint16_t uIdx; /**< idx Index of next ring slot */
113 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
114 /** @todo r=bird: From the usage, this member shouldn't be here and will only
115 * confuse compilers . */
116 /* uint16_t uAvailEventIdx; - avail_event if (VIRTQ_USED_F_EVENT_IDX) */
117} VIRTQ_USED_T, *PVIRTQ_USED_T;
118
119
120/*********************************************************************************************************************************
121* Internal Functions *
122*********************************************************************************************************************************/
123static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t idxQueue, bool fForce);
124static int virtioKick(PVIRTIOSTATE pVirtio, uint8_t uCause, uint16_t uVec, bool fForce);
125
126/** @name Internal queue operations
127 * @{ */
128
129#if 0 /* unused */
130DECLINLINE(int) virtqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
131{
132 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
133}
134#endif
135
136/**
137 * Accessor for virtq descriptor
138 */
139DECLINLINE(void) virtioReadDesc(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
140{
141 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
142 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
143 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
144 pVirtio->aGCPhysQueueDesc[idxQueue] + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[idxQueue]),
145 pDesc, sizeof(VIRTQ_DESC_T));
146}
147
148/**
149 * Accessors for virtq avail ring
150 */
151DECLINLINE(uint16_t) virtioReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t availIdx)
152{
153 uint16_t uDescIdx;
154 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
155 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
156 pVirtio->aGCPhysQueueAvail[idxQueue]
157 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[idxQueue]]),
158 &uDescIdx, sizeof(uDescIdx));
159 return uDescIdx;
160}
161
162DECLINLINE(uint16_t) virtioReadAvailRingIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
163{
164 uint16_t uIdx = 0;
165 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
166 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
167 pVirtio->aGCPhysQueueAvail[idxQueue] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
168 &uIdx, sizeof(uIdx));
169 return uIdx;
170}
171
172DECLINLINE(bool) virtqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
173{
174 return virtioReadAvailRingIdx(pVirtio, idxQueue) == pVirtio->virtqState[idxQueue].uAvailIdx;
175}
176
177#if 0 /* unused */
178DECLINLINE(uint16_t) virtioReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
179{
180 uint16_t fFlags;
181 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
182 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
183 pVirtio->aGCPhysQueueAvail[idxQueue] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
184 &fFlags, sizeof(fFlags));
185 return fFlags;
186}
187#endif
188
189DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
190{
191 uint16_t uUsedEventIdx;
192 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
193 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
194 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
195 pVirtio->aGCPhysQueueAvail[idxQueue] + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[idxQueue]]),
196 &uUsedEventIdx, sizeof(uUsedEventIdx));
197 return uUsedEventIdx;
198}
199/** @} */
200
201/** @name Accessors for virtq used ring
202 * @{
203 */
204DECLINLINE(void) virtioWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
205{
206 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
207 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
208 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
209 pVirtio->aGCPhysQueueUsed[idxQueue]
210 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % pVirtio->uQueueSize[idxQueue]]),
211 &elem, sizeof(elem));
212}
213
214DECLINLINE(void) virtioWriteUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint16_t uIdx)
215{
216 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
217 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
218 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
219 &uIdx, sizeof(uIdx));
220}
221
222#ifdef LOG_ENABLED
223DECLINLINE(uint16_t) virtioReadUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
224{
225 uint16_t uIdx;
226 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
227 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
228 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
229 &uIdx, sizeof(uIdx));
230 return uIdx;
231}
232#endif
233
234DECLINLINE(uint16_t) virtioReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
235{
236 uint16_t fFlags;
237 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
238 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
239 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
240 &fFlags, sizeof(fFlags));
241 return fFlags;
242}
243
244#if 0 /* unused */
245DECLINLINE(void) virtioWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t fFlags)
246{
247 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
248 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
249 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
250 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
251 &fFlags, sizeof(fFlags));
252}
253#endif
254
255#if 0 /* unused */
256DECLINLINE(uint16_t) virtioReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
257{
258 uint16_t uAvailEventIdx;
259 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
260 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
261 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
262 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
263 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtio->uQueueSize[idxQueue]]),
264 &uAvailEventIdx, sizeof(uAvailEventIdx));
265 return uAvailEventIdx;
266}
267#endif
268
269#if 0 /* unused */
270DECLINLINE(void) virtioWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t uAvailEventIdx)
271{
272 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
273 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
274 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
275 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtio->uQueueSize[idxQueue]]),
276 &uAvailEventIdx, sizeof(uAvailEventIdx));
277}
278#endif
279
280/** @} */
281
282#ifdef LOG_ENABLED
283
284/**
285 * Does a formatted hex dump using Log(()), recommend using VIRTIO_HEX_DUMP() macro to
286 * control enabling of logging efficiently.
287 *
288 * @param pv pointer to buffer to dump contents of
289 * @param cb count of characters to dump from buffer
290 * @param uBase base address of per-row address prefixing of hex output
291 * @param pszTitle Optional title. If present displays title that lists
292 * provided text with value of cb to indicate size next to it.
293 */
294void virtioHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
295{
296 if (pszTitle)
297 Log(("%s [%d bytes]:\n", pszTitle, cb));
298 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
299 {
300 Log(("%04x: ", row * 16 + uBase)); /* line address */
301 for (uint8_t col = 0; col < 16; col++)
302 {
303 uint32_t idx = row * 16 + col;
304 if (idx >= cb)
305 Log(("-- %s", (col + 1) % 8 ? "" : " "));
306 else
307 Log(("%02x %s", pv[idx], (col + 1) % 8 ? "" : " "));
308 }
309 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
310 Log(("%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.')));
311 Log(("\n"));
312 }
313 Log(("\n"));
314 RT_NOREF2(uBase, pv);
315}
316
317/**
318 * Log memory-mapped I/O input or output value.
319 *
320 * This is designed to be invoked by macros that can make contextual assumptions
321 * (e.g. implicitly derive MACRO parameters from the invoking function). It is exposed
322 * for the VirtIO client doing the device-specific implementation in order to log in a
323 * similar fashion accesses to the device-specific MMIO configuration structure. Macros
324 * that leverage this function are found in virtioCommonCfgAccessed() and can be
325 * used as an example of how to use this effectively for the device-specific
326 * code.
327 *
328 * @param pszFunc To avoid displaying this function's name via __FUNCTION__ or LogFunc()
329 * @param pszMember Name of struct member
330 * @param pv pointer to value
331 * @param cb size of value
332 * @param uOffset offset into member where value starts
333 * @param fWrite True if write I/O
334 * @param fHasIndex True if the member is indexed
335 * @param idx The index if fHasIndex
336 */
337void virtioLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
338 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
339 int fHasIndex, uint32_t idx)
340{
341
342#define FMTHEX(fmtout, val, cNybbles) \
343 fmtout[cNybbles] = '\0'; \
344 for (uint8_t i = 0; i < cNybbles; i++) \
345 fmtout[(cNybbles - i) - 1] = "0123456789abcdef"[(val >> (i * 4)) & 0xf];
346
347#define MAX_STRING 64
348 char pszIdx[MAX_STRING] = { 0 };
349 char pszDepiction[MAX_STRING] = { 0 };
350 char pszFormattedVal[MAX_STRING] = { 0 };
351 if (fHasIndex)
352 RTStrPrintf(pszIdx, sizeof(pszIdx), "[%d]", idx);
353 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
354 {
355 /* manually padding with 0's instead of \b due to different impl of %x precision than printf() */
356 uint64_t val = 0;
357 memcpy((char *)&val, pv, cb);
358 FMTHEX(pszFormattedVal, val, cb * 2);
359 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
360 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s[%d:%d]",
361 pszMember, pszIdx, uOffset, uOffset + cb - 1);
362 else
363 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s", pszMember, pszIdx);
364 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%-30s", pszDepiction);
365 uint32_t first = 0;
366 for (uint8_t i = 0; i < sizeof(pszDepiction); i++)
367 if (pszDepiction[i] == ' ' && first++)
368 pszDepiction[i] = '.';
369 Log6Func(("%s: Guest %s %s 0x%s\n",
370 pszFunc, fWrite ? "wrote" : "read ", pszDepiction, pszFormattedVal));
371 }
372 else /* odd number or oversized access, ... log inline hex-dump style */
373 {
374 Log6Func(("%s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
375 pszFunc, fWrite ? "wrote" : "read ", pszMember,
376 pszIdx, uOffset, uOffset + cb, cb, pv));
377 }
378 RT_NOREF2(fWrite, pszFunc);
379}
380
381#endif /* LOG_ENABLED */
382
383/**
384 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic
385 */
386DECLINLINE(void) virtioLogDeviceStatus(uint8_t bStatus)
387{
388 if (bStatus == 0)
389 Log6(("RESET"));
390 else
391 {
392 int primed = 0;
393 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
394 Log6(("%sACKNOWLEDGE", primed++ ? "" : ""));
395 if (bStatus & VIRTIO_STATUS_DRIVER)
396 Log6(("%sDRIVER", primed++ ? " | " : ""));
397 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
398 Log6(("%sFEATURES_OK", primed++ ? " | " : ""));
399 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
400 Log6(("%sDRIVER_OK", primed++ ? " | " : ""));
401 if (bStatus & VIRTIO_STATUS_FAILED)
402 Log6(("%sFAILED", primed++ ? " | " : ""));
403 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
404 Log6(("%sNEEDS_RESET", primed++ ? " | " : ""));
405 (void)primed;
406 }
407}
408
409#ifdef IN_RING3
410/**
411 * Allocate client context for client to work with VirtIO-provided with queue
412 *
413 * @param pVirtio Pointer to the virtio state.
414 * @param idxQueue Queue number
415 * @param pcszName Name to give queue
416 *
417 * @returns VBox status code.
418 */
419int virtioR3QueueAttach(PVIRTIOSTATE pVirtio, uint16_t idxQueue, const char *pcszName)
420{
421 LogFunc(("%s\n", pcszName));
422 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
423 pVirtq->uAvailIdx = 0;
424 pVirtq->uUsedIdx = 0;
425 pVirtq->fEventThresholdReached = false;
426 RTStrCopy(pVirtq->szVirtqName, sizeof(pVirtq->szVirtqName), pcszName);
427 return VINF_SUCCESS;
428}
429#endif /* IN_RING3 */
430
431#if 0 /** @todo r=bird: no prototype or docs for this one */
432/**
433 * See API comments in header file for description
434 */
435int virtioQueueSkip(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
436{
437 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
438 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
439
440 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[idxQueue],
441 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
442
443 if (virtioQueueIsEmpty(pVirtio, idxQueue))
444 return VERR_NOT_AVAILABLE;
445
446 Log2Func(("%s avail_idx=%u\n", pVirtq->szVirtqName, pVirtq->uAvailIdx));
447 pVirtq->uAvailIdx++;
448
449 return VINF_SUCCESS;
450}
451#endif
452
453/**
454 * Check if the associated queue is empty
455 *
456 * @param hVirtio Handle for VirtIO framework
457 * @param idxQueue Queue number
458 *
459 * @retval true Queue is empty or unavailable.
460 * @retval false Queue is available and has entries
461 */
462bool virtioQueueIsEmpty(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
463{
464 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
465 return virtqIsEmpty(pVirtio, idxQueue);
466 return true;
467}
468
469#ifdef IN_RING3
470
471/**
472 * Removes descriptor chain from avail ring of indicated queue and converts the descriptor
473 * chain into its OUT (to device) and IN to guest components.
474 *
475 * Additionally it converts the OUT desc chain data to a contiguous virtual
476 * memory buffer for easy consumption by the caller. The caller must return the
477 * descriptor chain pointer via virtioR3QueuePut() and then call virtioQueueSync()
478 * at some point to return the data to the guest and complete the transaction.
479 *
480 * @param pVirtio Pointer to the virtio state.
481 * @param idxQueue Queue number
482 * @param fRemove flags whether to remove desc chain from queue (false = peek)
483 * @param ppDescChain Address to store pointer to descriptor chain that contains the
484 * pre-processed transaction information pulled from the virtq.
485 *
486 * @returns VBox status code:
487 * @retval VINF_SUCCESS Success
488 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
489 * @retval VERR_NOT_AVAILABLE If the queue is empty.
490 */
491int virtioR3QueueGet(PVIRTIOSTATE pVirtio, uint16_t idxQueue, PPVIRTIO_DESC_CHAIN_T ppDescChain, bool fRemove)
492{
493 AssertReturn(ppDescChain, VERR_INVALID_PARAMETER);
494
495 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
496 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
497
498 PRTSGSEG paSegsIn = (PRTSGSEG)RTMemAlloc(VIRTQ_MAX_SIZE * sizeof(RTSGSEG));
499 AssertReturn(paSegsIn, VERR_NO_MEMORY);
500
501 PRTSGSEG paSegsOut = (PRTSGSEG)RTMemAlloc(VIRTQ_MAX_SIZE * sizeof(RTSGSEG));
502 AssertReturn(paSegsOut, VERR_NO_MEMORY);
503
504 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[idxQueue],
505 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
506
507 if (virtqIsEmpty(pVirtio, idxQueue))
508 return VERR_NOT_AVAILABLE;
509
510 uint16_t uHeadIdx = virtioReadAvailDescIdx(pVirtio, idxQueue, pVirtq->uAvailIdx);
511 uint16_t uDescIdx = uHeadIdx;
512
513 Log3Func(("%s DESC CHAIN: (head) desc_idx=%u [avail_idx=%u]\n", pVirtq->szVirtqName, uHeadIdx, pVirtq->uAvailIdx));
514
515 if (fRemove)
516 pVirtq->uAvailIdx++;
517
518 VIRTQ_DESC_T desc;
519
520 uint32_t cbIn = 0, cbOut = 0, cSegsIn = 0, cSegsOut = 0;
521
522 do
523 {
524 RTSGSEG *pSeg;
525
526 /*
527 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
528 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
529 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
530 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
531 */
532 if (cSegsIn + cSegsOut >= VIRTQ_MAX_SIZE)
533 {
534 static volatile uint32_t s_cMessages = 0;
535 static volatile uint32_t s_cThreshold = 1;
536 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
537 {
538 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
539 if (ASMAtomicReadU32(&s_cMessages) != 1)
540 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
541 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
542 }
543 break;
544 }
545 RT_UNTRUSTED_VALIDATED_FENCE();
546
547 virtioReadDesc(pVirtio, idxQueue, uDescIdx, &desc);
548
549 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
550 {
551 Log3Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", QUEUENAME(pVirtio, idxQueue), uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
552 cbIn += desc.cb;
553 pSeg = &(paSegsIn[cSegsIn++]);
554 }
555 else
556 {
557 Log3Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", QUEUENAME(pVirtio, idxQueue), uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
558 cbOut += desc.cb;
559 pSeg = &(paSegsOut[cSegsOut++]);
560 }
561
562 pSeg->pvSeg = (void *)desc.GCPhysBuf;
563 pSeg->cbSeg = desc.cb;
564
565 uDescIdx = desc.uDescIdxNext;
566 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
567
568 PRTSGBUF pSgPhysIn = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
569 AssertReturn(pSgPhysIn, VERR_NO_MEMORY);
570
571 RTSgBufInit(pSgPhysIn, (PCRTSGSEG)paSegsIn, cSegsIn);
572
573 PRTSGBUF pSgPhysOut = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
574 AssertReturn(pSgPhysOut, VERR_NO_MEMORY);
575
576 RTSgBufInit(pSgPhysOut, (PCRTSGSEG)paSegsOut, cSegsOut);
577
578 PVIRTIO_DESC_CHAIN_T pDescChain = (PVIRTIO_DESC_CHAIN_T)RTMemAllocZ(sizeof(VIRTIO_DESC_CHAIN_T));
579 AssertReturn(pDescChain, VERR_NO_MEMORY);
580
581 pDescChain->uHeadIdx = uHeadIdx;
582 pDescChain->cbPhysSend = cbOut;
583 pDescChain->pSgPhysSend = pSgPhysOut;
584 pDescChain->cbPhysReturn = cbIn;
585 pDescChain->pSgPhysReturn = pSgPhysIn;
586 *ppDescChain = pDescChain;
587
588 Log3Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n", pVirtq->szVirtqName, cSegsOut, cbOut, cSegsIn, cbIn));
589
590 return VINF_SUCCESS;
591}
592
593/**
594 * Returns data to the guest to complete a transaction initiated by virtQueueGet().
595 *
596 * The caller passes in a pointer to a scatter-gather buffer of virtual memory segments
597 * and a pointer to the descriptor chain context originally derived from the pulled
598 * queue entry, and this function will write the virtual memory s/g buffer into the
599 * guest's physical memory free the descriptor chain. The caller handles the freeing
600 * (as needed) of the virtual memory buffer.
601 *
602 * @note This does a write-ahead to the used ring of the guest's queue. The data
603 * written won't be seen by the guest until the next call to virtioQueueSync()
604 *
605 *
606 * @param pVirtio Pointer to the virtio state.
607 * @param idxQueue Queue number
608 *
609 * @param pSgVirtReturn Points toscatter-gather buffer of virtual memory
610 * segments the caller is returning to the guest.
611 *
612 * @param pDescChain This contains the context of the scatter-gather
613 * buffer originally pulled from the queue.
614 *
615 * @param fFence If true, put up copy fence (memory barrier) after
616 * copying to guest phys. mem.
617 *
618 * @returns VBox status code.
619 * @retval VINF_SUCCESS Success
620 * @retval VERR_INVALID_STATE VirtIO not in ready state
621 * @retval VERR_NOT_AVAILABLE Queue is empty
622 */
623int virtioR3QueuePut(PVIRTIOSTATE pVirtio, uint16_t idxQueue, PRTSGBUF pSgVirtReturn,
624 PVIRTIO_DESC_CHAIN_T pDescChain, bool fFence)
625{
626 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
627 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
628 PRTSGBUF pSgPhysReturn = pDescChain->pSgPhysReturn;
629
630 AssertMsgReturn(IS_DRIVER_OK(pVirtio) /*&& pVirtio->uQueueEnable[idxQueue]*/,
631 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
632
633 Log3Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
634 QUEUENAME(pVirtio, idxQueue), virtioReadUsedRingIdx(pVirtio, idxQueue)));
635
636 /*
637 * Copy s/g buf (virtual memory) to guest phys mem (IN direction). This virtual memory
638 * block will be small (fixed portion of response header + sense buffer area or
639 * control commands or error return values)... The bulk of req data xfers to phys mem
640 * is handled by client */
641
642 size_t cbCopy = 0;
643 size_t cbRemain = RTSgBufCalcTotalLength(pSgVirtReturn);
644 RTSgBufReset(pSgPhysReturn); /* Reset ptr because req data may have already been written */
645 while (cbRemain)
646 {
647 PCRTSGSEG paSeg = &pSgPhysReturn->paSegs[pSgPhysReturn->idxSeg];
648 uint64_t dstSgStart = (uint64_t)paSeg->pvSeg;
649 uint64_t dstSgLen = (uint64_t)paSeg->cbSeg;
650 uint64_t dstSgCur = (uint64_t)pSgPhysReturn->pvSegCur;
651 cbCopy = RT_MIN((uint64_t)pSgVirtReturn->cbSegLeft, dstSgLen - (dstSgCur - dstSgStart));
652 PDMDevHlpPhysWrite(pVirtio->CTX_SUFF(pDevIns),
653 (RTGCPHYS)pSgPhysReturn->pvSegCur, pSgVirtReturn->pvSegCur, cbCopy);
654 RTSgBufAdvance(pSgVirtReturn, cbCopy);
655 RTSgBufAdvance(pSgPhysReturn, cbCopy);
656 cbRemain -= cbCopy;
657 }
658
659 if (fFence)
660 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
661
662 /** If this write-ahead crosses threshold where the driver wants to get an event flag it */
663 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
664 if (pVirtq->uUsedIdx == virtioReadAvailUsedEvent(pVirtio, idxQueue))
665 pVirtq->fEventThresholdReached = true;
666
667 Assert(!(cbCopy & UINT64_C(0xffffffff00000000)));
668
669 /*
670 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
671 * That will be done with a subsequent client call to virtioQueueSync() */
672 virtioWriteUsedElem(pVirtio, idxQueue, pVirtq->uUsedIdx++, pDescChain->uHeadIdx, (uint32_t)(cbCopy & UINT32_C(0xffffffff)));
673
674 Log2Func((".... Copied %lu bytes to %lu byte buffer, residual=%lu\n",
675 cbCopy, pDescChain->cbPhysReturn, pDescChain->cbPhysReturn - cbCopy));
676
677 Log6Func(("Write ahead used_idx=%d, %s used_idx=%d\n",
678 pVirtq->uUsedIdx, QUEUENAME(pVirtio, idxQueue), virtioReadUsedRingIdx(pVirtio, idxQueue)));
679
680 RTMemFree((void *)pDescChain->pSgPhysSend->paSegs);
681 RTMemFree(pDescChain->pSgPhysSend);
682 RTMemFree((void *)pSgPhysReturn->paSegs);
683 RTMemFree(pSgPhysReturn);
684 RTMemFree(pDescChain);
685
686 return VINF_SUCCESS;
687}
688
689#endif /* IN_RING3 */
690
691/**
692 * Updates the indicated virtq's "used ring" descriptor index to match the
693 * current write-head index, thus exposing the data added to the used ring by all
694 * virtioR3QueuePut() calls since the last sync. This should be called after one or
695 * more virtQueuePut() calls to inform the guest driver there is data in the queue.
696 * Explicit notifications (e.g. interrupt or MSI-X) will be sent to the guest,
697 * depending on VirtIO features negotiated and conditions, otherwise the guest
698 * will detect the update by polling. (see VirtIO 1.0
699 * specification, Section 2.4 "Virtqueues").
700 *
701 * @param pVirtio Pointer to the virtio state.
702 * @param idxQueue Queue number
703 *
704 * @returns VBox status code.
705 * @retval VINF_SUCCESS Success
706 * @retval VERR_INVALID_STATE VirtIO not in ready state
707 */
708int virtioQueueSync(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
709{
710 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
711 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
712
713 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[idxQueue],
714 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
715
716 Log6Func(("Updating %s used_idx from %u to %u\n",
717 QUEUENAME(pVirtio, idxQueue), virtioReadUsedRingIdx(pVirtio, idxQueue), pVirtq->uUsedIdx));
718
719 virtioWriteUsedRingIdx(pVirtio, idxQueue, pVirtq->uUsedIdx);
720 virtioNotifyGuestDriver(pVirtio, idxQueue, false);
721
722 return VINF_SUCCESS;
723}
724
725#ifdef IN_RING3
726/**
727 */
728static void virtior3QueueNotified(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint16_t uNotifyIdx)
729{
730 /* See VirtIO 1.0, section 4.1.5.2 It implies that idxQueue and uNotifyIdx should match.
731 * Disregarding this notification may cause throughput to stop, however there's no way to know
732 * which was queue was intended for wake-up if the two parameters disagree. */
733
734 AssertMsg(uNotifyIdx == idxQueue,
735 ("Notification param disagreement. Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
736 idxQueue, uNotifyIdx));
737
738// AssertMsgReturn(uNotifyIdx == idxQueue,
739// ("Notification param disagreement. Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
740// idxQueue, uNotifyIdx));
741 RT_NOREF(uNotifyIdx);
742
743 AssertReturnVoid(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
744 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
745 Log6Func(("%s\n", pVirtq->szVirtqName));
746 RT_NOREF(pVirtq);
747
748 /* Inform client */
749 pVirtio->Callbacks.pfnQueueNotified(pVirtio, idxQueue);
750}
751#endif /* IN_RING3 */
752
753/**
754 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
755 * the specified virtq, depending on the interrupt configuration of the device
756 * and depending on negotiated and realtime constraints flagged by the guest driver.
757 *
758 * See VirtIO 1.0 specification (section 2.4.7).
759 *
760 * @param pVirtio Pointer to the virtio state.
761 * @param idxQueue Queue to check for guest interrupt handling preference
762 * @param fForce Overrides idxQueue, forcing notification regardless of driver's
763 * notification preferences. This is a safeguard to prevent
764 * stalls upon resuming the VM. VirtIO 1.0 specification Section 4.1.5.5
765 * indicates spurious interrupts are harmless to guest driver's state,
766 * as they only cause the guest driver to [re]scan queues for work to do.
767 */
768static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t idxQueue, bool fForce)
769{
770 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
771 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
772
773 AssertMsgReturnVoid(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"));
774 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
775 {
776 if (pVirtq->fEventThresholdReached)
777 {
778 virtioKick(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtio->uQueueMsixVector[idxQueue], fForce);
779 pVirtq->fEventThresholdReached = false;
780 return;
781 }
782 Log6Func(("...skipping interrupt: VIRTIO_F_EVENT_IDX set but threshold not reached\n"));
783 }
784 else
785 {
786 /** If guest driver hasn't suppressed interrupts, interrupt */
787 if (fForce || !(virtioReadUsedFlags(pVirtio, idxQueue) & VIRTQ_AVAIL_F_NO_INTERRUPT))
788 {
789 virtioKick(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtio->uQueueMsixVector[idxQueue], fForce);
790 return;
791 }
792 Log6Func(("...skipping interrupt. Guest flagged VIRTQ_AVAIL_F_NO_INTERRUPT for queue\n"));
793 }
794}
795
796/**
797 * Raise interrupt or MSI-X
798 *
799 * @param pVirtio The device state structure.
800 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
801 * @param uVec MSI-X vector, if enabled
802 * @param uForce True of out-of-band
803 */
804static int virtioKick(PVIRTIOSTATE pVirtio, uint8_t uCause, uint16_t uMsixVector, bool fForce)
805{
806 if (fForce)
807 Log6Func(("reason: resumed after suspend\n"));
808 else
809 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
810 Log6Func(("reason: buffer added to 'used' ring.\n"));
811 else
812 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
813 Log6Func(("reason: device config change\n"));
814
815 if (!pVirtio->fMsiSupport)
816 {
817 pVirtio->uISR |= uCause;
818 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, PDM_IRQ_LEVEL_HIGH);
819 }
820 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
821 {
822 Log6Func(("MSI-X enabled, calling PDMDevHlpPCISetIrq with vector: 0x%x\n", uMsixVector));
823 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), uMsixVector, 1);
824 }
825 return VINF_SUCCESS;
826}
827
828/**
829 * Lower interrupt. (Called when guest reads ISR)
830 *
831 * @param pVirtio The device state structure.
832 */
833static void virtioLowerInterrupt(PVIRTIOSTATE pVirtio)
834{
835 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, PDM_IRQ_LEVEL_LOW);
836}
837
838static void virtioResetQueue(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
839{
840 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
841 PVIRTQSTATE pVirtQ = &pVirtio->virtqState[idxQueue];
842 pVirtQ->uAvailIdx = 0;
843 pVirtQ->uUsedIdx = 0;
844 pVirtio->uQueueEnable[idxQueue] = false;
845 pVirtio->uQueueSize[idxQueue] = VIRTQ_MAX_SIZE;
846 pVirtio->uQueueNotifyOff[idxQueue] = idxQueue;
847
848 pVirtio->uQueueMsixVector[idxQueue] = idxQueue + 2;
849 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
850 pVirtio->uQueueMsixVector[idxQueue] = VIRTIO_MSI_NO_VECTOR;
851}
852
853static void virtioResetDevice(PVIRTIOSTATE pVirtio)
854{
855 Log2Func(("\n"));
856 pVirtio->uDeviceFeaturesSelect = 0;
857 pVirtio->uDriverFeaturesSelect = 0;
858 pVirtio->uConfigGeneration = 0;
859 pVirtio->uDeviceStatus = 0;
860 pVirtio->uISR = 0;
861
862 virtioLowerInterrupt(pVirtio);
863
864 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
865 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
866
867 pVirtio->uNumQueues = VIRTQ_MAX_CNT;
868 for (uint16_t idxQueue = 0; idxQueue < pVirtio->uNumQueues; idxQueue++)
869 virtioResetQueue(pVirtio, idxQueue);
870}
871
872#if 0 /** @todo r=bird: Probably not needed. */
873/**
874 * Enable or disable queue
875 *
876 * @param pVirtio Pointer to the virtio state.
877 * @param idxQueue Queue number
878 * @param fEnabled Flag indicating whether to enable queue or not
879 */
880void virtioQueueEnable(PVIRTIOSTATE pVirtio, uint16_t idxQueue, bool fEnabled)
881{
882 if (fEnabled)
883 pVirtio->uQueueSize[idxQueue] = VIRTQ_MAX_SIZE;
884 else
885 pVirtio->uQueueSize[idxQueue] = 0;
886}
887#endif
888
889#if 0 /** @todo r=bird: This isn't invoked by anyone. Why? */
890/**
891 * Initiate orderly reset procedure.
892 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
893 */
894void virtioResetAll(PVIRTIOSTATE pVirtio)
895{
896 LogFunc(("VIRTIO RESET REQUESTED!!!\n"));
897 pVirtio->uDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
898 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
899 {
900 pVirtio->fGenUpdatePending = true;
901 virtioKick(pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig, false /* fForce */);
902 }
903}
904#endif
905
906#ifdef IN_RING3
907/**
908 * Invoked by this implementation when guest driver resets the device.
909 * The driver itself will not until the device has read the status change.
910 */
911static void virtioGuestR3Resetted(PVIRTIOSTATE pVirtio)
912{
913 LogFunc(("Guest reset the device\n"));
914
915 /* Let the client know */
916 pVirtio->Callbacks.pfnStatusChanged(pVirtio, 0);
917 virtioResetDevice(pVirtio);
918}
919#endif /* IN_RING3 */
920
921/**
922 * Handle accesses to Common Configuration capability
923 *
924 * @returns VBox status code
925 *
926 * @param pVirtio Virtio instance state
927 * @param fWrite Set if write access, clear if read access.
928 * @param offCfg The common configuration capability offset.
929 * @param cb Number of bytes to read or write
930 * @param pv Pointer to location to write to or read from
931 */
932static int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t offCfg, unsigned cb, void *pv)
933{
934/**
935 * This macro resolves to boolean true if the implied parameters, offCfg and cb,
936 * match the field offset and size of a field in the Common Cfg struct, (or if
937 * it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
938 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
939 *
940 * @param member Member of VIRTIO_PCI_COMMON_CFG_T
941 * @param offCfg Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
942 * @param cb Implied parameter: Number of bytes to access
943 * @result true or false
944 */
945#define MATCH_COMMON_CFG(member) \
946 ( ( RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
947 && ( offCfg == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
948 || offCfg == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
949 && cb == sizeof(uint32_t)) \
950 || ( offCfg == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
951 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member)) )
952
953#ifdef LOG_ENABLED
954# define LOG_COMMON_CFG_ACCESS(member, a_offIntra) \
955 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
956 pv, cb, a_offIntra, fWrite, false, 0);
957# define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, a_offIntra) \
958 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
959 pv, cb, a_offIntra, fWrite, true, idx);
960#else
961# define LOG_COMMON_CFG_ACCESS(member, a_offIntra) do { } while (0)
962# define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, a_offIntra) do { } while (0)
963#endif
964
965#define COMMON_CFG_ACCESSOR(member) \
966 do \
967 { \
968 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
969 if (fWrite) \
970 memcpy((char *)&pVirtio->member + offIntra, (const char *)pv, cb); \
971 else \
972 memcpy(pv, (const char *)&pVirtio->member + offIntra, cb); \
973 LOG_COMMON_CFG_ACCESS(member, offIntra); \
974 } while(0)
975
976#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
977 do \
978 { \
979 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
980 if (fWrite) \
981 memcpy((char *)&pVirtio->member[idx] + offIntra, pv, cb); \
982 else \
983 memcpy(pv, (const char *)&pVirtio->member[idx] + offIntra, cb); \
984 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, offIntra); \
985 } while(0)
986
987#define COMMON_CFG_ACCESSOR_READONLY(member) \
988 do \
989 { \
990 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
991 if (fWrite) \
992 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
993 else \
994 { \
995 memcpy(pv, (const char *)&pVirtio->member + offIntra, cb); \
996 LOG_COMMON_CFG_ACCESS(member, offIntra); \
997 } \
998 } while(0)
999
1000#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
1001 do \
1002 { \
1003 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
1004 if (fWrite) \
1005 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
1006 else \
1007 { \
1008 memcpy(pv, (char const *)&pVirtio->member[idx] + offIntra, cb); \
1009 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, offIntra); \
1010 } \
1011 } while(0)
1012
1013
1014 int rc = VINF_SUCCESS;
1015 uint64_t val;
1016 if (MATCH_COMMON_CFG(uDeviceFeatures))
1017 {
1018 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1019 {
1020 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
1021 return VINF_SUCCESS;
1022 }
1023 else /* Guest READ pCommonCfg->uDeviceFeatures */
1024 {
1025 switch (pVirtio->uDeviceFeaturesSelect)
1026 {
1027 case 0:
1028 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1029 memcpy(pv, &val, cb);
1030 LOG_COMMON_CFG_ACCESS(uDeviceFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures));
1031 break;
1032 case 1:
1033 val = pVirtio->uDeviceFeatures >> 32;
1034 memcpy(pv, &val, cb);
1035 LOG_COMMON_CFG_ACCESS(uDeviceFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures) + 4);
1036 break;
1037 default:
1038 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1039 pVirtio->uDeviceFeaturesSelect));
1040 return VINF_IOM_MMIO_UNUSED_00;
1041 }
1042 }
1043 }
1044 else if (MATCH_COMMON_CFG(uDriverFeatures))
1045 {
1046 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1047 {
1048 switch (pVirtio->uDriverFeaturesSelect)
1049 {
1050 case 0:
1051 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1052 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures));
1053 break;
1054 case 1:
1055 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1056 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures) + 4);
1057 break;
1058 default:
1059 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1060 pVirtio->uDriverFeaturesSelect));
1061 return VINF_SUCCESS;
1062 }
1063 }
1064 else /* Guest READ pCommonCfg->udriverFeatures */
1065 {
1066 switch (pVirtio->uDriverFeaturesSelect)
1067 {
1068 case 0:
1069 val = pVirtio->uDriverFeatures & 0xffffffff;
1070 memcpy(pv, &val, cb);
1071 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures));
1072 break;
1073 case 1:
1074 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1075 memcpy(pv, &val, cb);
1076 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures) + 4);
1077 break;
1078 default:
1079 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1080 pVirtio->uDriverFeaturesSelect));
1081 return VINF_IOM_MMIO_UNUSED_00;
1082 }
1083 }
1084 }
1085 else if (MATCH_COMMON_CFG(uNumQueues))
1086 {
1087 if (fWrite)
1088 {
1089 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1090 return VINF_SUCCESS;
1091 }
1092 else
1093 {
1094 *(uint16_t *)pv = VIRTQ_MAX_CNT;
1095 LOG_COMMON_CFG_ACCESS(uNumQueues, 0);
1096 }
1097 }
1098 else if (MATCH_COMMON_CFG(uDeviceStatus))
1099 {
1100 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
1101 {
1102 pVirtio->uDeviceStatus = *(uint8_t *)pv;
1103 Log6Func(("Guest wrote uDeviceStatus ................ ("));
1104 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
1105 Log6((")\n"));
1106 if (pVirtio->uDeviceStatus == 0)
1107 virtioGuestR3Resetted(pVirtio);
1108 /*
1109 * Notify client only if status actually changed from last time.
1110 */
1111 uint32_t const fOkayNow = pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
1112 uint32_t const fWasOkay = pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
1113 if (fOkayNow != fWasOkay)
1114 pVirtio->Callbacks.pfnStatusChanged(pVirtio, fOkayNow);
1115 pVirtio->uPrevDeviceStatus = pVirtio->uDeviceStatus;
1116 }
1117 else /* Guest READ pCommonCfg->uDeviceStatus */
1118 {
1119 Log6Func(("Guest read uDeviceStatus ................ ("));
1120 *(uint32_t *)pv = pVirtio->uDeviceStatus; /** @todo r=bird: Why 32-bit write here, the field is 8-bit? */
1121 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
1122 Log6((")\n"));
1123 }
1124 }
1125 else
1126 if (MATCH_COMMON_CFG(uMsixConfig))
1127 COMMON_CFG_ACCESSOR(uMsixConfig);
1128 else
1129 if (MATCH_COMMON_CFG(uDeviceFeaturesSelect))
1130 COMMON_CFG_ACCESSOR(uDeviceFeaturesSelect);
1131 else
1132 if (MATCH_COMMON_CFG(uDriverFeaturesSelect))
1133 COMMON_CFG_ACCESSOR(uDriverFeaturesSelect);
1134 else
1135 if (MATCH_COMMON_CFG(uConfigGeneration))
1136 COMMON_CFG_ACCESSOR_READONLY(uConfigGeneration);
1137 else
1138 if (MATCH_COMMON_CFG(uQueueSelect))
1139 COMMON_CFG_ACCESSOR(uQueueSelect);
1140 else
1141 if (MATCH_COMMON_CFG(uQueueSize))
1142 COMMON_CFG_ACCESSOR_INDEXED(uQueueSize, pVirtio->uQueueSelect);
1143 else
1144 if (MATCH_COMMON_CFG(uQueueMsixVector))
1145 COMMON_CFG_ACCESSOR_INDEXED(uQueueMsixVector, pVirtio->uQueueSelect);
1146 else
1147 if (MATCH_COMMON_CFG(uQueueEnable))
1148 COMMON_CFG_ACCESSOR_INDEXED(uQueueEnable, pVirtio->uQueueSelect);
1149 else
1150 if (MATCH_COMMON_CFG(uQueueNotifyOff))
1151 COMMON_CFG_ACCESSOR_INDEXED_READONLY(uQueueNotifyOff, pVirtio->uQueueSelect);
1152 else
1153 if (MATCH_COMMON_CFG(aGCPhysQueueDesc))
1154 COMMON_CFG_ACCESSOR_INDEXED(aGCPhysQueueDesc, pVirtio->uQueueSelect);
1155 else
1156 if (MATCH_COMMON_CFG(aGCPhysQueueAvail))
1157 COMMON_CFG_ACCESSOR_INDEXED(aGCPhysQueueAvail, pVirtio->uQueueSelect);
1158 else
1159 if (MATCH_COMMON_CFG(aGCPhysQueueUsed))
1160 COMMON_CFG_ACCESSOR_INDEXED(aGCPhysQueueUsed, pVirtio->uQueueSelect);
1161 else
1162 {
1163 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: offCfg=%#x (%d), cb=%d\n",
1164 fWrite ? "write" : "read ", offCfg, offCfg, cb));
1165 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1166 }
1167
1168#undef COMMON_CFG_ACCESSOR_READONLY
1169#undef COMMON_CFG_ACCESSOR_INDEXED_READONLY
1170#undef COMMON_CFG_ACCESSOR_INDEXED
1171#undef COMMON_CFG_ACCESSOR
1172#undef LOG_COMMON_CFG_ACCESS_INDEXED
1173#undef LOG_COMMON_CFG_ACCESS
1174#undef MATCH_COMMON_CFG
1175 return rc;
1176}
1177
1178/**
1179 * @callback_method_impl{FNIOMMMIONEWREAD,
1180 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1181 */
1182static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1183{
1184 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1185 Assert(pVirtio == (PVIRTIOSTATE)pvUser); RT_NOREF(pvUser);
1186
1187 /** @todo r=bird: This code does not handle reads spanning more than one
1188 * capability structure/area. How does that match the spec? For instance
1189 * if the guest uses a 64-bit MOV instruction on this MMIO region, you'll
1190 * see cb=8 here. Same if it uses 16 or 32 byte reads. Intel allows all
1191 * this, so question is how it's supposed to be handled. At a minimum there
1192 * must be an explanation of that here.
1193 */
1194
1195 uint32_t offIntra;
1196 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocDeviceCap))
1197 {
1198#ifdef IN_RING3
1199 /*
1200 * Callback to client to manage device-specific configuration.
1201 */
1202 VBOXSTRICTRC rcStrict = pVirtio->Callbacks.pfnDevCapRead(pDevIns, offIntra, pv, cb);
1203
1204 /*
1205 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1206 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1207 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1208 */
1209 bool fDevSpecificFieldChanged = !!memcmp((char *)pVirtio->pvDevSpecificCfg + offIntra,
1210 (char *)pVirtio->pvPrevDevSpecificCfg + offIntra,
1211 RT_MIN(cb, pVirtio->cbDevSpecificCfg - offIntra));
1212
1213 memcpy(pVirtio->pvPrevDevSpecificCfg, pVirtio->pvDevSpecificCfg, pVirtio->cbDevSpecificCfg);
1214
1215 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1216 {
1217 ++pVirtio->uConfigGeneration;
1218 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1219 pVirtio->uConfigGeneration,
1220 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1221 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1222 pVirtio->fGenUpdatePending = false;
1223 }
1224 return rcStrict;
1225#else
1226 return VINF_IOM_R3_MMIO_READ;
1227#endif
1228 }
1229
1230 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocCommonCfgCap))
1231 return virtioCommonCfgAccessed(pVirtio, false /* fWrite */, offIntra, cb, pv);
1232
1233 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1234 {
1235 *(uint8_t *)pv = pVirtio->uISR;
1236 Log6Func(("Read and clear ISR\n"));
1237 pVirtio->uISR = 0; /* VirtIO specification requires reads of ISR to clear it */
1238 virtioLowerInterrupt(pVirtio);
1239 return VINF_SUCCESS;
1240 }
1241
1242 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1243 return VINF_IOM_MMIO_UNUSED_00;
1244}
1245
1246/**
1247 * @callback_method_impl{FNIOMMMIONEWREAD,
1248 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1249 */
1250static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1251{
1252 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1253 Assert(pVirtio == (PVIRTIOSTATE)pvUser); RT_NOREF(pvUser);
1254
1255 /** @todo r=bird: This code does not handle writes spanning more than one
1256 * capability structure/area. How does that match the spec? For instance
1257 * if the guest uses a 64-bit MOV instruction on this MMIO region, you'll
1258 * see cb=8 here. Same if it uses 16 or 32 byte reads. Intel allows all
1259 * this, so question is how it's supposed to be handled. At a minimum there
1260 * must be an explanation of that here.
1261 */
1262
1263 uint32_t offIntra;
1264 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocDeviceCap))
1265 {
1266#ifdef IN_RING3
1267 /*
1268 * Pass this MMIO write access back to the client to handle
1269 */
1270 return pVirtio->Callbacks.pfnDevCapWrite(pDevIns, offIntra, pv, cb);
1271#else
1272 return VINF_IOM_R3_MMIO_WRITE;
1273#endif
1274 }
1275
1276 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocCommonCfgCap))
1277 return virtioCommonCfgAccessed(pVirtio, true /* fWrite */, offIntra, cb, (void *)pv);
1278
1279 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1280 {
1281 pVirtio->uISR = *(uint8_t *)pv;
1282 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1283 pVirtio->uISR & 0xff,
1284 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1285 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1286 return VINF_SUCCESS;
1287 }
1288
1289 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1290 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, offIntra, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1291 {
1292#ifdef IN_RING3
1293 virtior3QueueNotified(pVirtio, offIntra / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1294#else
1295 return VINF_IOM_R3_MMIO_WRITE;
1296#endif
1297 }
1298
1299 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1300 return VINF_SUCCESS;
1301}
1302
1303#ifdef IN_RING3
1304
1305/**
1306 * @callback_method_impl{FNPCICONFIGREAD}
1307 */
1308static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1309 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1310{
1311 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1312 RT_NOREF(pPciDev);
1313
1314 LogFlowFunc(("pDevIns=%p pPciDev=%p uAddress=%#x cb=%u pu32Value=%p\n",
1315 pDevIns, pPciDev, uAddress, cb, pu32Value));
1316 if (uAddress == pVirtio->uPciCfgDataOff)
1317 {
1318 /*
1319 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1320 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1321 * (the virtio_pci_cfg_cap capability), and access data items.
1322 */
1323 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
1324 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
1325 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
1326
1327 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1328 || cb != uLength
1329 || uBar != VIRTIO_REGION_PCI_CAP)
1330 {
1331 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1332 *pu32Value = UINT32_MAX;
1333 return VINF_SUCCESS;
1334 }
1335
1336 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, uOffset, pu32Value, cb);
1337 Log2Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d -> %Rrc\n",
1338 uBar, uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1339 return rcStrict;
1340 }
1341 return VINF_PDM_PCI_DO_DEFAULT;
1342}
1343
1344/**
1345 * @callback_method_impl{FNPCICONFIGWRITE}
1346 */
1347static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1348 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1349{
1350 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1351 RT_NOREF(pPciDev);
1352
1353 LogFlowFunc(("pDevIns=%p pPciDev=%p uAddress=%#x cb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, cb, u32Value));
1354 if (uAddress == pVirtio->uPciCfgDataOff)
1355 {
1356 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1357 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1358 * (the virtio_pci_cfg_cap capability), and access data items. */
1359
1360 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
1361 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
1362 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
1363
1364 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1365 || cb != uLength
1366 || uBar != VIRTIO_REGION_PCI_CAP)
1367 {
1368 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1369 return VINF_SUCCESS;
1370 }
1371
1372 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, uOffset, &u32Value, cb);
1373 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1374 uBar, uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1375 return rcStrict;
1376 }
1377 return VINF_PDM_PCI_DO_DEFAULT;
1378}
1379
1380
1381/*********************************************************************************************************************************
1382* Saved state. *
1383*********************************************************************************************************************************/
1384
1385/**
1386 * Called from the FNSSMDEVSAVEEXEC function of the device.
1387 *
1388 * @param pVirtio Pointer to the virtio state.
1389 * @param pHlp The ring-3 device helpers.
1390 * @param pSSM The saved state handle.
1391 * @returns VBox status code.
1392 */
1393int virtioR3SaveExec(PVIRTIOSTATE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1394{
1395 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1396 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1397
1398 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1399 pHlp->pfnSSMPutU8(pSSM, pVirtio->uDeviceStatus);
1400 pHlp->pfnSSMPutU8(pSSM, pVirtio->uConfigGeneration);
1401 pHlp->pfnSSMPutU8(pSSM, pVirtio->uPciCfgDataOff);
1402 pHlp->pfnSSMPutU8(pSSM, pVirtio->uISR);
1403 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueSelect);
1404 pHlp->pfnSSMPutU32(pSSM, pVirtio->uDeviceFeaturesSelect);
1405 pHlp->pfnSSMPutU32(pSSM, pVirtio->uDriverFeaturesSelect);
1406 pHlp->pfnSSMPutU64(pSSM, pVirtio->uDriverFeatures);
1407 Assert(pVirtio->uNumQueues == VIRTQ_MAX_CNT); /** @todo r=bird: See todo in struct & virtioR3LoadExec. */
1408 pHlp->pfnSSMPutU32(pSSM, pVirtio->uNumQueues);
1409
1410 for (uint32_t i = 0; i < pVirtio->uNumQueues; i++)
1411 {
1412 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysQueueDesc[i]);
1413 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysQueueAvail[i]);
1414 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysQueueUsed[i]);
1415 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueNotifyOff[i]);
1416 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueMsixVector[i]);
1417 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueEnable[i]);
1418 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueSize[i]);
1419 pHlp->pfnSSMPutU16(pSSM, pVirtio->virtqState[i].uAvailIdx);
1420 pHlp->pfnSSMPutU16(pSSM, pVirtio->virtqState[i].uUsedIdx);
1421 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtio->virtqState[i].szVirtqName, 32);
1422 AssertRCReturn(rc, rc);
1423 }
1424
1425 return VINF_SUCCESS;
1426}
1427
1428/**
1429 * Called from the FNSSMDEVLOADEXEC function of the device.
1430 *
1431 * @param pVirtio Pointer to the virtio state.
1432 * @param pHlp The ring-3 device helpers.
1433 * @param pSSM The saved state handle.
1434 * @returns VBox status code.
1435 */
1436int virtioR3LoadExec(PVIRTIOSTATE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1437{
1438 /*
1439 * Check the marker and (embedded) version number.
1440 */
1441 uint64_t uMarker = 0;
1442 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1443 AssertRCReturn(rc, rc);
1444 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1445 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1446 N_("Expected marker value %#RX64 found %#RX64 instead"),
1447 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1448 uint32_t uVersion = 0;
1449 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1450 AssertRCReturn(rc, rc);
1451 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1452 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1453 N_("Unsupported virtio version: %u"), uVersion);
1454
1455 /*
1456 * Load the state.
1457 */
1458 pHlp->pfnSSMGetBool(pSSM, &pVirtio->fGenUpdatePending);
1459 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uDeviceStatus);
1460 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uConfigGeneration);
1461 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uPciCfgDataOff);
1462 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uISR);
1463 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueSelect);
1464 pHlp->pfnSSMGetU32(pSSM, &pVirtio->uDeviceFeaturesSelect);
1465 pHlp->pfnSSMGetU32(pSSM, &pVirtio->uDriverFeaturesSelect);
1466 pHlp->pfnSSMGetU64(pSSM, &pVirtio->uDriverFeatures);
1467
1468 /* Make sure the queue count is within expectations. */
1469 /** @todo r=bird: Turns out the expectations are exactly VIRTQ_MAX_CNT, bug? */
1470 rc = pHlp->pfnSSMGetU32(pSSM, &pVirtio->uNumQueues);
1471 AssertRCReturn(rc, rc);
1472 AssertReturn(pVirtio->uNumQueues == VIRTQ_MAX_CNT,
1473 pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1474 N_("Saved queue count %u, expected %u"), uVersion, VIRTQ_MAX_CNT));
1475 AssertCompile(RT_ELEMENTS(pVirtio->virtqState) == VIRTQ_MAX_CNT);
1476 AssertCompile(RT_ELEMENTS(pVirtio->aGCPhysQueueDesc) == VIRTQ_MAX_CNT);
1477
1478 for (uint32_t idxQueue = 0; idxQueue < pVirtio->uNumQueues; idxQueue++)
1479 {
1480 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysQueueDesc[idxQueue]);
1481 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysQueueAvail[idxQueue]);
1482 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysQueueUsed[idxQueue]);
1483 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueNotifyOff[idxQueue]);
1484 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueMsixVector[idxQueue]);
1485 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueEnable[idxQueue]);
1486 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueSize[idxQueue]);
1487 pHlp->pfnSSMGetU16(pSSM, &pVirtio->virtqState[idxQueue].uAvailIdx);
1488 pHlp->pfnSSMGetU16(pSSM, &pVirtio->virtqState[idxQueue].uUsedIdx);
1489 rc = pHlp->pfnSSMGetMem(pSSM, pVirtio->virtqState[idxQueue].szVirtqName,
1490 sizeof(pVirtio->virtqState[idxQueue].szVirtqName));
1491 AssertRCReturn(rc, rc);
1492 }
1493
1494 return VINF_SUCCESS;
1495}
1496
1497
1498/*********************************************************************************************************************************
1499* Device Level *
1500*********************************************************************************************************************************/
1501
1502/**
1503 * This should be called from PDMDEVREGR3::pfnReset.
1504 *
1505 * @param pVirtio Pointer to the virtio state.
1506 */
1507void virtioR3PropagateResetNotification(PVIRTIOSTATE pVirtio)
1508{
1509 /** @todo r=bird: You probably need to do something here. See
1510 * virtioScsiR3Reset. */
1511 RT_NOREF(pVirtio);
1512}
1513
1514
1515/**
1516 * This sends notification ('kicks') guest driver to check queues for any new
1517 * elements in the used queue to process.
1518 *
1519 * It should be called after resuming in case anything was added to the queues
1520 * during suspend/quiescing and a notification was missed, to prevent the guest
1521 * from stalling after suspend.
1522 */
1523void virtioR3PropagateResumeNotification(PVIRTIOSTATE pVirtio)
1524{
1525 virtioNotifyGuestDriver(pVirtio, (uint16_t)0 /* idxQueue */, true /* fForce */);
1526}
1527
1528
1529/**
1530 * This should be called from PDMDEVREGR3::pfnDestruct.
1531 *
1532 * @param pVirtio Pointer to the virtio state.
1533 * @param pDevIns The device instance.
1534 */
1535void virtioR3Term(PVIRTIOSTATE pVirtio, PPDMDEVINS pDevIns)
1536{
1537 if (pVirtio->pvPrevDevSpecificCfg)
1538 {
1539 RTMemFree(pVirtio->pvPrevDevSpecificCfg);
1540 pVirtio->pvPrevDevSpecificCfg = NULL;
1541 }
1542 RT_NOREF(pDevIns);
1543}
1544
1545
1546/**
1547 * Setup PCI device controller and Virtio state
1548 *
1549 * This should be called from PDMDEVREGR3::pfnConstruct.
1550 *
1551 * @param pVirtio Pointer to the virtio state. This must be
1552 * the first member in the shared device
1553 * instance data!
1554 * @param pDevIns The device instance.
1555 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1556 * @param pcszInstance Device instance name (format-specifier)
1557 * @param fDevSpecificFeatures VirtIO device-specific features offered by
1558 * client
1559 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
1560 * @param pvDevSpecificCfg Address of client's dev-specific
1561 * configuration struct.
1562 */
1563int virtioR3Init(PVIRTIOSTATE pVirtio, PPDMDEVINS pDevIns, PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
1564 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
1565{
1566 /*
1567 * The pVirtio state must be the first member of the shared device instance
1568 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
1569 */
1570 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOSTATE), VERR_STATE_CHANGED);
1571
1572
1573#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */
1574# ifdef VBOX_WITH_MSI_DEVICES
1575 pVirtio->fMsiSupport = true;
1576# endif
1577#endif
1578
1579 /*
1580 * The host features offered include both device-specific features
1581 * and reserved feature bits (device independent)
1582 */
1583 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1584 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1585 | fDevSpecificFeatures;
1586
1587 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1588
1589 pVirtio->pDevInsR3 = pDevIns;
1590 pVirtio->uDeviceStatus = 0;
1591 pVirtio->cbDevSpecificCfg = cbDevSpecificCfg;
1592 pVirtio->pvDevSpecificCfg = pvDevSpecificCfg;
1593 pVirtio->pvPrevDevSpecificCfg = RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
1594 AssertLogRelReturn(pVirtio->pvPrevDevSpecificCfg, VERR_NO_MEMORY);
1595
1596 /* Set PCI config registers (assume 32-bit mode) */
1597 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1598 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1599
1600 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
1601 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1602 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1603 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
1604 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
1605 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
1606 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
1607 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
1608 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
1609 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
1610
1611 /* Register PCI device */
1612 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1613 if (RT_FAILURE(rc))
1614 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1615
1616 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
1617 AssertRCReturn(rc, rc);
1618
1619
1620 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1621
1622 /* The following capability mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CFG_CAP_T)
1623 * as a mandatory but suboptimal alternative interface to host device capabilities, facilitating
1624 * access the memory of any BAR. If the guest uses it (the VirtIO driver on Linux doesn't),
1625 * Unlike Common, Notify, ISR and Device capabilities, it is accessed directly via PCI Config region.
1626 * therefore does not contribute to the capabilities region (BAR) the other capabilities use.
1627 */
1628#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
1629#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
1630 do { \
1631 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
1632 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
1633 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
1634 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
1635 } while (0)
1636
1637 PVIRTIO_PCI_CAP_T pCfg;
1638 uint32_t cbRegion = 0;
1639
1640 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1641 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
1642 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1643 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1644 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1645 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1646 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1647 pCfg->uOffset = RT_ALIGN_32(0, 4); /* reminder, in case someone changes offset */
1648 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1649 cbRegion += pCfg->uLength;
1650 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
1651 pVirtio->pCommonCfgCap = pCfg;
1652
1653 /*
1654 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based the choice
1655 * of this implementation that each queue's uQueueNotifyOff is set equal to (QueueSelect) ordinal
1656 * value of the queue */
1657 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1658 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1659 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1660 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1661 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1662 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1663 pCfg->uOffset = pVirtio->pCommonCfgCap->uOffset + pVirtio->pCommonCfgCap->uLength;
1664 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 2); /** @todo r=bird: Why is this word aligned rather than dword? If there is a
1665 * theoretical chance we won't allways be on a dword boundrary here, the
1666 * read/write really will need to handle cross capability reads. */
1667 pCfg->uLength = VIRTQ_MAX_CNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1668 cbRegion += pCfg->uLength;
1669 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
1670 pVirtio->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1671 pVirtio->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1672
1673 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1674 *
1675 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1676 * of spec shows it as a 32-bit field with upper bits 'reserved'
1677 * Will take spec words more literally than the diagram for now.
1678 */
1679 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1680 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1681 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1682 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1683 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1684 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1685 pCfg->uOffset = pVirtio->pNotifyCap->pciCap.uOffset + pVirtio->pNotifyCap->pciCap.uLength; /** @todo r=bird: This probably is _not_ dword aligned, given that the previous structure is 0x32 (50) bytes long. */
1686 pCfg->uLength = sizeof(uint8_t);
1687 cbRegion += pCfg->uLength;
1688 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
1689 pVirtio->pIsrCap = pCfg;
1690
1691 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1692 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1693 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1694 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1695 * even list it as present if uLength isn't non-zero and 4-byte-aligned as the linux driver is
1696 * initializing. */
1697
1698 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
1699 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1700 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1701 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1702 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1703 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtio->pvDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1704 pCfg->uBar = 0;
1705 pCfg->uOffset = 0;
1706 pCfg->uLength = 0;
1707 cbRegion += pCfg->uLength;
1708 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
1709 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1710
1711 if (pVirtio->pvDevSpecificCfg)
1712 {
1713 /* Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
1714 * device-specific config fields struct and passes size to this constructor */
1715 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1716 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1717 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1718 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1719 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1720 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1721 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1722 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1723 pCfg->uLength = cbDevSpecificCfg;
1724 cbRegion += pCfg->uLength;
1725 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
1726 //pVirtio->pDeviceCap = pCfg;
1727 }
1728 else
1729 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
1730
1731 if (pVirtio->fMsiSupport)
1732 {
1733 PDMMSIREG aMsiReg;
1734 RT_ZERO(aMsiReg);
1735 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1736 aMsiReg.iMsixNextOffset = 0;
1737 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
1738 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
1739 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1740 if (RT_FAILURE(rc))
1741 {
1742 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
1743 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
1744 pVirtio->fMsiSupport = false;
1745 }
1746 else
1747 Log2Func(("Using MSI-X for guest driver notification\n"));
1748 }
1749 else
1750 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
1751
1752
1753 /* Set offset to first capability and enable PCI dev capabilities */
1754 PDMPciDevSetCapabilityList(pPciDev, 0x40);
1755 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
1756
1757 /* Linux drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1758 * 'unknown' device-specific capability without querying the capability to figure
1759 * out size, so pad with an extra page */
1760
1761 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE, PAGE_SIZE),
1762 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
1763 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, "virtio-scsi MMIO",
1764 &pVirtio->hMmioPciCap);
1765 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
1766
1767 return rc;
1768}
1769
1770#else /* !IN_RING3 */
1771
1772/**
1773 * Sets up the core ring-0/raw-mode virtio bits.
1774 *
1775 * @returns VBox status code.
1776 * @param pVirtio Pointer to the virtio state. This must be the first
1777 * member in the shared device instance data!
1778 * @param pDevIns The device instance.
1779 */
1780int virtioRZInit(PVIRTIOSTATE pVirtio, PPDMDEVINS pDevIns)
1781{
1782 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
1783 AssertRCReturn(rc, rc);
1784 return rc;
1785}
1786
1787#endif /* !IN_RING3 */
1788
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette