VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 81661

Last change on this file since 81661 was 81661, checked in by vboxsync, 5 years ago

Virtio_1_0,DevVirtioScsi: More common virtio code cleanups. bugref:9218 bugref:9440

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 75.0 KB
Line 
1/* $Id: Virtio_1_0.cpp 81661 2019-11-04 21:50:26Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <VBox/msi.h>
26#include <iprt/param.h>
27#include <iprt/assert.h>
28#include <iprt/uuid.h>
29#include <iprt/mem.h>
30#include <iprt/assert.h>
31#include <iprt/sg.h>
32#include <VBox/vmm/pdmdev.h>
33#include "Virtio_1_0.h"
34
35
36/*********************************************************************************************************************************
37* Defined Constants And Macros *
38*********************************************************************************************************************************/
39#define INSTANCE(a_pVirtio) (a_pVirtio)->szInstance
40#define QUEUENAME(a_pVirtio, a_idxQueue) ((a_pVirtio)->virtqState[(a_idxQueue)].szVirtqName)
41
42/**
43 * This macro returns true if the implied parameter GCPhysAddr address and access length are
44 * within the range of the mapped capability struct specified with the explicit parameters.
45 *
46 * @param[in] a_GCPhysCapData Pointer to MMIO mapped capability struct
47 * @param[in] a_pCfgCap Pointer to capability in PCI configuration area
48 * @param[out] a_fMatched True if GCPhysAddr is within the physically mapped capability.
49 *
50 * Implied parameters:
51 * - GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
52 * - cb - [input, implied] Number of bytes to access
53 *
54 * @todo r=bird: Make this a predicate macro (I will probably simplify this a
55 * lot later when 'GCPhysAddr' becomes an 'off').
56 */
57#define MATCH_VIRTIO_CAP_STRUCT(a_GCPhysCapData, a_pCfgCap, a_fMatched) \
58 bool const a_fMatched = (a_GCPhysCapData) != 0 \
59 && (a_pCfgCap) != NULL \
60 && GCPhysAddr >= (RTGCPHYS)(a_GCPhysCapData) \
61 && GCPhysAddr < ((RTGCPHYS)(a_GCPhysCapData) + ((PVIRTIO_PCI_CAP_T)(a_pCfgCap))->uLength) \
62 && cb <= ((PVIRTIO_PCI_CAP_T)a_pCfgCap)->uLength
63
64#define IS_DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
65
66
67/** Marks the start of the virtio saved state (just for sanity). */
68#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
69/** The current saved state version for the virtio core. */
70#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
71
72
73
74
75/*********************************************************************************************************************************
76* Structures and Typedefs *
77*********************************************************************************************************************************/
78/**
79 * virtq related structs
80 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
81 */
82typedef struct virtq_desc
83{
84 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
85 uint32_t cb; /**< len Buffer length */
86 uint16_t fFlags; /**< flags Buffer specific flags */
87 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
88} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
89
90typedef struct virtq_avail
91{
92 uint16_t fFlags; /**< flags avail ring drv to dev flags */
93 uint16_t uIdx; /**< idx Index of next free ring slot */
94 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
95 /* uint16_t uUsedEventIdx; - used_event (if VIRTQ_USED_F_EVENT_IDX) */
96} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
97
98typedef struct virtq_used_elem
99{
100 uint32_t uDescIdx; /**< idx Start of used desc chain */
101 uint32_t cbElem; /**< len Total len of used desc chain */
102} VIRTQ_USED_ELEM_T;
103
104typedef struct virt_used
105{
106 uint16_t fFlags; /**< flags used ring host-to-guest flags */
107 uint16_t uIdx; /**< idx Index of next ring slot */
108 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
109 /** @todo r=bird: From the usage, this member shouldn't be here and will only
110 * confuse compilers . */
111 /* uint16_t uAvailEventIdx; - avail_event if (VIRTQ_USED_F_EVENT_IDX) */
112} VIRTQ_USED_T, *PVIRTQ_USED_T;
113
114
115/*********************************************************************************************************************************
116* Internal Functions *
117*********************************************************************************************************************************/
118static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t idxQueue, bool fForce);
119static int virtioKick(PVIRTIOSTATE pVirtio, uint8_t uCause, uint16_t uVec, bool fForce);
120
121/** @name Internal queue operations
122 * @{ */
123
124#if 0 /* unused */
125DECLINLINE(int) virtqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
126{
127 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
128}
129#endif
130
131/**
132 * Accessor for virtq descriptor
133 */
134DECLINLINE(void) virtioReadDesc(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
135{
136 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
137 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
138 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
139 pVirtio->aGCPhysQueueDesc[idxQueue] + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[idxQueue]),
140 pDesc, sizeof(VIRTQ_DESC_T));
141}
142
143/**
144 * Accessors for virtq avail ring
145 */
146DECLINLINE(uint16_t) virtioReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t availIdx)
147{
148 uint16_t uDescIdx;
149 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
150 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
151 pVirtio->aGCPhysQueueAvail[idxQueue]
152 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[idxQueue]]),
153 &uDescIdx, sizeof(uDescIdx));
154 return uDescIdx;
155}
156
157DECLINLINE(uint16_t) virtioReadAvailRingIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
158{
159 uint16_t uIdx = 0;
160 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
161 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
162 pVirtio->aGCPhysQueueAvail[idxQueue] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
163 &uIdx, sizeof(uIdx));
164 return uIdx;
165}
166
167DECLINLINE(bool) virtqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
168{
169 return virtioReadAvailRingIdx(pVirtio, idxQueue) == pVirtio->virtqState[idxQueue].uAvailIdx;
170}
171
172#if 0 /* unused */
173DECLINLINE(uint16_t) virtioReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
174{
175 uint16_t fFlags;
176 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
177 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
178 pVirtio->aGCPhysQueueAvail[idxQueue] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
179 &fFlags, sizeof(fFlags));
180 return fFlags;
181}
182#endif
183
184DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
185{
186 uint16_t uUsedEventIdx;
187 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
188 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
189 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
190 pVirtio->aGCPhysQueueAvail[idxQueue] + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[idxQueue]]),
191 &uUsedEventIdx, sizeof(uUsedEventIdx));
192 return uUsedEventIdx;
193}
194/** @} */
195
196/** @name Accessors for virtq used ring
197 * @{
198 */
199DECLINLINE(void) virtioWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
200{
201 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
202 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
203 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
204 pVirtio->aGCPhysQueueUsed[idxQueue]
205 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % pVirtio->uQueueSize[idxQueue]]),
206 &elem, sizeof(elem));
207}
208
209DECLINLINE(void) virtioWriteUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint16_t uIdx)
210{
211 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
212 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
213 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
214 &uIdx, sizeof(uIdx));
215}
216
217#ifdef LOG_ENABLED
218DECLINLINE(uint16_t) virtioReadUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
219{
220 uint16_t uIdx;
221 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
222 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
223 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
224 &uIdx, sizeof(uIdx));
225 return uIdx;
226}
227#endif
228
229DECLINLINE(uint16_t) virtioReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
230{
231 uint16_t fFlags;
232 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
233 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
234 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
235 &fFlags, sizeof(fFlags));
236 return fFlags;
237}
238
239#if 0 /* unused */
240DECLINLINE(void) virtioWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t fFlags)
241{
242 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
243 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
244 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
245 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
246 &fFlags, sizeof(fFlags));
247}
248#endif
249
250#if 0 /* unused */
251DECLINLINE(uint16_t) virtioReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
252{
253 uint16_t uAvailEventIdx;
254 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
255 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
256 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
257 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
258 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtio->uQueueSize[idxQueue]]),
259 &uAvailEventIdx, sizeof(uAvailEventIdx));
260 return uAvailEventIdx;
261}
262#endif
263
264#if 0 /* unused */
265DECLINLINE(void) virtioWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint32_t uAvailEventIdx)
266{
267 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
268 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
269 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
270 pVirtio->aGCPhysQueueUsed[idxQueue] + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtio->uQueueSize[idxQueue]]),
271 &uAvailEventIdx, sizeof(uAvailEventIdx));
272}
273#endif
274
275/** @} */
276
277#ifdef LOG_ENABLED
278
279/**
280 * Does a formatted hex dump using Log(()), recommend using VIRTIO_HEX_DUMP() macro to
281 * control enabling of logging efficiently.
282 *
283 * @param pv pointer to buffer to dump contents of
284 * @param cb count of characters to dump from buffer
285 * @param uBase base address of per-row address prefixing of hex output
286 * @param pszTitle Optional title. If present displays title that lists
287 * provided text with value of cb to indicate size next to it.
288 */
289void virtioHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
290{
291 if (pszTitle)
292 Log(("%s [%d bytes]:\n", pszTitle, cb));
293 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
294 {
295 Log(("%04x: ", row * 16 + uBase)); /* line address */
296 for (uint8_t col = 0; col < 16; col++)
297 {
298 uint32_t idx = row * 16 + col;
299 if (idx >= cb)
300 Log(("-- %s", (col + 1) % 8 ? "" : " "));
301 else
302 Log(("%02x %s", pv[idx], (col + 1) % 8 ? "" : " "));
303 }
304 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
305 Log(("%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.')));
306 Log(("\n"));
307 }
308 Log(("\n"));
309 RT_NOREF2(uBase, pv);
310}
311
312/**
313 * Log memory-mapped I/O input or output value.
314 *
315 * This is designed to be invoked by macros that can make contextual assumptions
316 * (e.g. implicitly derive MACRO parameters from the invoking function). It is exposed
317 * for the VirtIO client doing the device-specific implementation in order to log in a
318 * similar fashion accesses to the device-specific MMIO configuration structure. Macros
319 * that leverage this function are found in virtioCommonCfgAccessed() and can be
320 * used as an example of how to use this effectively for the device-specific
321 * code.
322 *
323 * @param pszFunc To avoid displaying this function's name via __FUNCTION__ or LogFunc()
324 * @param pszMember Name of struct member
325 * @param pv pointer to value
326 * @param cb size of value
327 * @param uOffset offset into member where value starts
328 * @param fWrite True if write I/O
329 * @param fHasIndex True if the member is indexed
330 * @param idx The index if fHasIndex
331 */
332void virtioLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
333 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
334 int fHasIndex, uint32_t idx)
335{
336
337#define FMTHEX(fmtout, val, cNybbles) \
338 fmtout[cNybbles] = '\0'; \
339 for (uint8_t i = 0; i < cNybbles; i++) \
340 fmtout[(cNybbles - i) - 1] = "0123456789abcdef"[(val >> (i * 4)) & 0xf];
341
342#define MAX_STRING 64
343 char pszIdx[MAX_STRING] = { 0 };
344 char pszDepiction[MAX_STRING] = { 0 };
345 char pszFormattedVal[MAX_STRING] = { 0 };
346 if (fHasIndex)
347 RTStrPrintf(pszIdx, sizeof(pszIdx), "[%d]", idx);
348 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
349 {
350 /* manually padding with 0's instead of \b due to different impl of %x precision than printf() */
351 uint64_t val = 0;
352 memcpy((char *)&val, pv, cb);
353 FMTHEX(pszFormattedVal, val, cb * 2);
354 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
355 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s[%d:%d]",
356 pszMember, pszIdx, uOffset, uOffset + cb - 1);
357 else
358 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s", pszMember, pszIdx);
359 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%-30s", pszDepiction);
360 uint32_t first = 0;
361 for (uint8_t i = 0; i < sizeof(pszDepiction); i++)
362 if (pszDepiction[i] == ' ' && first++)
363 pszDepiction[i] = '.';
364 Log6Func(("%s: Guest %s %s 0x%s\n",
365 pszFunc, fWrite ? "wrote" : "read ", pszDepiction, pszFormattedVal));
366 }
367 else /* odd number or oversized access, ... log inline hex-dump style */
368 {
369 Log6Func(("%s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
370 pszFunc, fWrite ? "wrote" : "read ", pszMember,
371 pszIdx, uOffset, uOffset + cb, cb, pv));
372 }
373 RT_NOREF2(fWrite, pszFunc);
374}
375
376#endif /* LOG_ENABLED */
377
378/**
379 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic
380 */
381DECLINLINE(void) virtioLogDeviceStatus(uint8_t bStatus)
382{
383 if (bStatus == 0)
384 Log6(("RESET"));
385 else
386 {
387 int primed = 0;
388 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
389 Log6(("%sACKNOWLEDGE", primed++ ? "" : ""));
390 if (bStatus & VIRTIO_STATUS_DRIVER)
391 Log6(("%sDRIVER", primed++ ? " | " : ""));
392 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
393 Log6(("%sFEATURES_OK", primed++ ? " | " : ""));
394 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
395 Log6(("%sDRIVER_OK", primed++ ? " | " : ""));
396 if (bStatus & VIRTIO_STATUS_FAILED)
397 Log6(("%sFAILED", primed++ ? " | " : ""));
398 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
399 Log6(("%sNEEDS_RESET", primed++ ? " | " : ""));
400 (void)primed;
401 }
402}
403
404#ifdef IN_RING3
405/**
406 * Allocate client context for client to work with VirtIO-provided with queue
407 *
408 * @param pVirtio Pointer to the virtio state.
409 * @param idxQueue Queue number
410 * @param pcszName Name to give queue
411 *
412 * @returns VBox status code.
413 */
414int virtioR3QueueAttach(PVIRTIOSTATE pVirtio, uint16_t idxQueue, const char *pcszName)
415{
416 LogFunc(("%s\n", pcszName));
417 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
418 pVirtq->uAvailIdx = 0;
419 pVirtq->uUsedIdx = 0;
420 pVirtq->fEventThresholdReached = false;
421 RTStrCopy(pVirtq->szVirtqName, sizeof(pVirtq->szVirtqName), pcszName);
422 return VINF_SUCCESS;
423}
424#endif /* IN_RING3 */
425
426#if 0 /** @todo r=bird: no prototype or docs for this one */
427/**
428 * See API comments in header file for description
429 */
430int virtioQueueSkip(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
431{
432 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
433 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
434
435 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[idxQueue],
436 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
437
438 if (virtioQueueIsEmpty(pVirtio, idxQueue))
439 return VERR_NOT_AVAILABLE;
440
441 Log2Func(("%s avail_idx=%u\n", pVirtq->szVirtqName, pVirtq->uAvailIdx));
442 pVirtq->uAvailIdx++;
443
444 return VINF_SUCCESS;
445}
446#endif
447
448/**
449 * Check if the associated queue is empty
450 *
451 * @param hVirtio Handle for VirtIO framework
452 * @param idxQueue Queue number
453 *
454 * @retval true Queue is empty or unavailable.
455 * @retval false Queue is available and has entries
456 */
457bool virtioQueueIsEmpty(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
458{
459 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
460 return virtqIsEmpty(pVirtio, idxQueue);
461 return true;
462}
463
464#ifdef IN_RING3
465
466/**
467 * Removes descriptor chain from avail ring of indicated queue and converts the descriptor
468 * chain into its OUT (to device) and IN to guest components.
469 *
470 * Additionally it converts the OUT desc chain data to a contiguous virtual
471 * memory buffer for easy consumption by the caller. The caller must return the
472 * descriptor chain pointer via virtioR3QueuePut() and then call virtioQueueSync()
473 * at some point to return the data to the guest and complete the transaction.
474 *
475 * @param pVirtio Pointer to the virtio state.
476 * @param idxQueue Queue number
477 * @param fRemove flags whether to remove desc chain from queue (false = peek)
478 * @param ppDescChain Address to store pointer to descriptor chain that contains the
479 * pre-processed transaction information pulled from the virtq.
480 *
481 * @returns VBox status code:
482 * @retval VINF_SUCCESS Success
483 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
484 * @retval VERR_NOT_AVAILABLE If the queue is empty.
485 */
486int virtioR3QueueGet(PVIRTIOSTATE pVirtio, uint16_t idxQueue, PPVIRTIO_DESC_CHAIN_T ppDescChain, bool fRemove)
487{
488 AssertReturn(ppDescChain, VERR_INVALID_PARAMETER);
489
490 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
491 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
492
493 PRTSGSEG paSegsIn = (PRTSGSEG)RTMemAlloc(VIRTQ_MAX_SIZE * sizeof(RTSGSEG));
494 AssertReturn(paSegsIn, VERR_NO_MEMORY);
495
496 PRTSGSEG paSegsOut = (PRTSGSEG)RTMemAlloc(VIRTQ_MAX_SIZE * sizeof(RTSGSEG));
497 AssertReturn(paSegsOut, VERR_NO_MEMORY);
498
499 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[idxQueue],
500 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
501
502 if (virtqIsEmpty(pVirtio, idxQueue))
503 return VERR_NOT_AVAILABLE;
504
505 uint16_t uHeadIdx = virtioReadAvailDescIdx(pVirtio, idxQueue, pVirtq->uAvailIdx);
506 uint16_t uDescIdx = uHeadIdx;
507
508 Log3Func(("%s DESC CHAIN: (head) desc_idx=%u [avail_idx=%u]\n", pVirtq->szVirtqName, uHeadIdx, pVirtq->uAvailIdx));
509
510 if (fRemove)
511 pVirtq->uAvailIdx++;
512
513 VIRTQ_DESC_T desc;
514
515 uint32_t cbIn = 0, cbOut = 0, cSegsIn = 0, cSegsOut = 0;
516
517 do
518 {
519 RTSGSEG *pSeg;
520
521 /*
522 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
523 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
524 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
525 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
526 */
527 if (cSegsIn + cSegsOut >= VIRTQ_MAX_SIZE)
528 {
529 static volatile uint32_t s_cMessages = 0;
530 static volatile uint32_t s_cThreshold = 1;
531 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
532 {
533 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
534 if (ASMAtomicReadU32(&s_cMessages) != 1)
535 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
536 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
537 }
538 break;
539 }
540 RT_UNTRUSTED_VALIDATED_FENCE();
541
542 virtioReadDesc(pVirtio, idxQueue, uDescIdx, &desc);
543
544 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
545 {
546 Log3Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", QUEUENAME(pVirtio, idxQueue), uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
547 cbIn += desc.cb;
548 pSeg = &(paSegsIn[cSegsIn++]);
549 }
550 else
551 {
552 Log3Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", QUEUENAME(pVirtio, idxQueue), uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
553 cbOut += desc.cb;
554 pSeg = &(paSegsOut[cSegsOut++]);
555 }
556
557 pSeg->pvSeg = (void *)desc.GCPhysBuf;
558 pSeg->cbSeg = desc.cb;
559
560 uDescIdx = desc.uDescIdxNext;
561 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
562
563 PRTSGBUF pSgPhysIn = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
564 AssertReturn(pSgPhysIn, VERR_NO_MEMORY);
565
566 RTSgBufInit(pSgPhysIn, (PCRTSGSEG)paSegsIn, cSegsIn);
567
568 PRTSGBUF pSgPhysOut = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
569 AssertReturn(pSgPhysOut, VERR_NO_MEMORY);
570
571 RTSgBufInit(pSgPhysOut, (PCRTSGSEG)paSegsOut, cSegsOut);
572
573 PVIRTIO_DESC_CHAIN_T pDescChain = (PVIRTIO_DESC_CHAIN_T)RTMemAllocZ(sizeof(VIRTIO_DESC_CHAIN_T));
574 AssertReturn(pDescChain, VERR_NO_MEMORY);
575
576 pDescChain->uHeadIdx = uHeadIdx;
577 pDescChain->cbPhysSend = cbOut;
578 pDescChain->pSgPhysSend = pSgPhysOut;
579 pDescChain->cbPhysReturn = cbIn;
580 pDescChain->pSgPhysReturn = pSgPhysIn;
581 *ppDescChain = pDescChain;
582
583 Log3Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n", pVirtq->szVirtqName, cSegsOut, cbOut, cSegsIn, cbIn));
584
585 return VINF_SUCCESS;
586}
587
588/**
589 * Returns data to the guest to complete a transaction initiated by virtQueueGet().
590 *
591 * The caller passes in a pointer to a scatter-gather buffer of virtual memory segments
592 * and a pointer to the descriptor chain context originally derived from the pulled
593 * queue entry, and this function will write the virtual memory s/g buffer into the
594 * guest's physical memory free the descriptor chain. The caller handles the freeing
595 * (as needed) of the virtual memory buffer.
596 *
597 * @note This does a write-ahead to the used ring of the guest's queue. The data
598 * written won't be seen by the guest until the next call to virtioQueueSync()
599 *
600 *
601 * @param pVirtio Pointer to the virtio state.
602 * @param idxQueue Queue number
603 *
604 * @param pSgVirtReturn Points toscatter-gather buffer of virtual memory
605 * segments the caller is returning to the guest.
606 *
607 * @param pDescChain This contains the context of the scatter-gather
608 * buffer originally pulled from the queue.
609 *
610 * @param fFence If true, put up copy fence (memory barrier) after
611 * copying to guest phys. mem.
612 *
613 * @returns VBox status code.
614 * @retval VINF_SUCCESS Success
615 * @retval VERR_INVALID_STATE VirtIO not in ready state
616 * @retval VERR_NOT_AVAILABLE Queue is empty
617 */
618int virtioR3QueuePut(PVIRTIOSTATE pVirtio, uint16_t idxQueue, PRTSGBUF pSgVirtReturn,
619 PVIRTIO_DESC_CHAIN_T pDescChain, bool fFence)
620{
621 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
622 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
623 PRTSGBUF pSgPhysReturn = pDescChain->pSgPhysReturn;
624
625 AssertMsgReturn(IS_DRIVER_OK(pVirtio) /*&& pVirtio->uQueueEnable[idxQueue]*/,
626 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
627
628 Log3Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
629 QUEUENAME(pVirtio, idxQueue), virtioReadUsedRingIdx(pVirtio, idxQueue)));
630
631 /*
632 * Copy s/g buf (virtual memory) to guest phys mem (IN direction). This virtual memory
633 * block will be small (fixed portion of response header + sense buffer area or
634 * control commands or error return values)... The bulk of req data xfers to phys mem
635 * is handled by client */
636
637 size_t cbCopy = 0;
638 size_t cbRemain = RTSgBufCalcTotalLength(pSgVirtReturn);
639 RTSgBufReset(pSgPhysReturn); /* Reset ptr because req data may have already been written */
640 while (cbRemain)
641 {
642 PCRTSGSEG paSeg = &pSgPhysReturn->paSegs[pSgPhysReturn->idxSeg];
643 uint64_t dstSgStart = (uint64_t)paSeg->pvSeg;
644 uint64_t dstSgLen = (uint64_t)paSeg->cbSeg;
645 uint64_t dstSgCur = (uint64_t)pSgPhysReturn->pvSegCur;
646 cbCopy = RT_MIN((uint64_t)pSgVirtReturn->cbSegLeft, dstSgLen - (dstSgCur - dstSgStart));
647 PDMDevHlpPhysWrite(pVirtio->CTX_SUFF(pDevIns),
648 (RTGCPHYS)pSgPhysReturn->pvSegCur, pSgVirtReturn->pvSegCur, cbCopy);
649 RTSgBufAdvance(pSgVirtReturn, cbCopy);
650 RTSgBufAdvance(pSgPhysReturn, cbCopy);
651 cbRemain -= cbCopy;
652 }
653
654 if (fFence)
655 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
656
657 /** If this write-ahead crosses threshold where the driver wants to get an event flag it */
658 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
659 if (pVirtq->uUsedIdx == virtioReadAvailUsedEvent(pVirtio, idxQueue))
660 pVirtq->fEventThresholdReached = true;
661
662 Assert(!(cbCopy & UINT64_C(0xffffffff00000000)));
663
664 /*
665 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
666 * That will be done with a subsequent client call to virtioQueueSync() */
667 virtioWriteUsedElem(pVirtio, idxQueue, pVirtq->uUsedIdx++, pDescChain->uHeadIdx, (uint32_t)(cbCopy & UINT32_C(0xffffffff)));
668
669 Log2Func((".... Copied %lu bytes to %lu byte buffer, residual=%lu\n",
670 cbCopy, pDescChain->cbPhysReturn, pDescChain->cbPhysReturn - cbCopy));
671
672 Log6Func(("Write ahead used_idx=%d, %s used_idx=%d\n",
673 pVirtq->uUsedIdx, QUEUENAME(pVirtio, idxQueue), virtioReadUsedRingIdx(pVirtio, idxQueue)));
674
675 RTMemFree((void *)pDescChain->pSgPhysSend->paSegs);
676 RTMemFree(pDescChain->pSgPhysSend);
677 RTMemFree((void *)pSgPhysReturn->paSegs);
678 RTMemFree(pSgPhysReturn);
679 RTMemFree(pDescChain);
680
681 return VINF_SUCCESS;
682}
683
684#endif /* IN_RING3 */
685
686/**
687 * Updates the indicated virtq's "used ring" descriptor index to match the
688 * current write-head index, thus exposing the data added to the used ring by all
689 * virtioR3QueuePut() calls since the last sync. This should be called after one or
690 * more virtQueuePut() calls to inform the guest driver there is data in the queue.
691 * Explicit notifications (e.g. interrupt or MSI-X) will be sent to the guest,
692 * depending on VirtIO features negotiated and conditions, otherwise the guest
693 * will detect the update by polling. (see VirtIO 1.0
694 * specification, Section 2.4 "Virtqueues").
695 *
696 * @param pVirtio Pointer to the virtio state.
697 * @param idxQueue Queue number
698 *
699 * @returns VBox status code.
700 * @retval VINF_SUCCESS Success
701 * @retval VERR_INVALID_STATE VirtIO not in ready state
702 */
703int virtioQueueSync(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
704{
705 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
706 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
707
708 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[idxQueue],
709 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
710
711 Log6Func(("Updating %s used_idx from %u to %u\n",
712 QUEUENAME(pVirtio, idxQueue), virtioReadUsedRingIdx(pVirtio, idxQueue), pVirtq->uUsedIdx));
713
714 virtioWriteUsedRingIdx(pVirtio, idxQueue, pVirtq->uUsedIdx);
715 virtioNotifyGuestDriver(pVirtio, idxQueue, false);
716
717 return VINF_SUCCESS;
718}
719
720#ifdef IN_RING3
721/**
722 */
723static void virtior3QueueNotified(PVIRTIOSTATE pVirtio, uint16_t idxQueue, uint16_t uNotifyIdx)
724{
725 /* See VirtIO 1.0, section 4.1.5.2 It implies that idxQueue and uNotifyIdx should match.
726 * Disregarding this notification may cause throughput to stop, however there's no way to know
727 * which was queue was intended for wake-up if the two parameters disagree. */
728
729 AssertMsg(uNotifyIdx == idxQueue,
730 ("Notification param disagreement. Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
731 idxQueue, uNotifyIdx));
732
733// AssertMsgReturn(uNotifyIdx == idxQueue,
734// ("Notification param disagreement. Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
735// idxQueue, uNotifyIdx));
736 RT_NOREF(uNotifyIdx);
737
738 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
739 Log6Func(("%s\n", pVirtq->szVirtqName));
740 RT_NOREF(pVirtq);
741
742 /* Inform client */
743 pVirtio->Callbacks.pfnQueueNotified(pVirtio, idxQueue);
744}
745#endif /* IN_RING3 */
746
747/**
748 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
749 * the specified virtq, depending on the interrupt configuration of the device
750 * and depending on negotiated and realtime constraints flagged by the guest driver.
751 *
752 * See VirtIO 1.0 specification (section 2.4.7).
753 *
754 * @param pVirtio Pointer to the virtio state.
755 * @param idxQueue Queue to check for guest interrupt handling preference
756 * @param fForce Overrides idxQueue, forcing notification regardless of driver's
757 * notification preferences. This is a safeguard to prevent
758 * stalls upon resuming the VM. VirtIO 1.0 specification Section 4.1.5.5
759 * indicates spurious interrupts are harmless to guest driver's state,
760 * as they only cause the guest driver to [re]scan queues for work to do.
761 */
762static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t idxQueue, bool fForce)
763{
764 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
765 PVIRTQSTATE pVirtq = &pVirtio->virtqState[idxQueue];
766
767 AssertMsgReturnVoid(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"));
768 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
769 {
770 if (pVirtq->fEventThresholdReached)
771 {
772 virtioKick(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtio->uQueueMsixVector[idxQueue], fForce);
773 pVirtq->fEventThresholdReached = false;
774 return;
775 }
776 Log6Func(("...skipping interrupt: VIRTIO_F_EVENT_IDX set but threshold not reached\n"));
777 }
778 else
779 {
780 /** If guest driver hasn't suppressed interrupts, interrupt */
781 if (fForce || !(virtioReadUsedFlags(pVirtio, idxQueue) & VIRTQ_AVAIL_F_NO_INTERRUPT))
782 {
783 virtioKick(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtio->uQueueMsixVector[idxQueue], fForce);
784 return;
785 }
786 Log6Func(("...skipping interrupt. Guest flagged VIRTQ_AVAIL_F_NO_INTERRUPT for queue\n"));
787 }
788}
789
790/**
791 * Raise interrupt or MSI-X
792 *
793 * @param pVirtio The device state structure.
794 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
795 * @param uVec MSI-X vector, if enabled
796 * @param uForce True of out-of-band
797 */
798static int virtioKick(PVIRTIOSTATE pVirtio, uint8_t uCause, uint16_t uMsixVector, bool fForce)
799{
800 if (fForce)
801 Log6Func(("reason: resumed after suspend\n"));
802 else
803 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
804 Log6Func(("reason: buffer added to 'used' ring.\n"));
805 else
806 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
807 Log6Func(("reason: device config change\n"));
808
809 if (!pVirtio->fMsiSupport)
810 {
811 pVirtio->uISR |= uCause;
812 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, PDM_IRQ_LEVEL_HIGH);
813 }
814 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
815 {
816 Log6Func(("MSI-X enabled, calling PDMDevHlpPCISetIrq with vector: 0x%x\n", uMsixVector));
817 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), uMsixVector, 1);
818 }
819 return VINF_SUCCESS;
820}
821
822/**
823 * Lower interrupt. (Called when guest reads ISR)
824 *
825 * @param pVirtio The device state structure.
826 */
827static void virtioLowerInterrupt(PVIRTIOSTATE pVirtio)
828{
829 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, PDM_IRQ_LEVEL_LOW);
830}
831
832static void virtioResetQueue(PVIRTIOSTATE pVirtio, uint16_t idxQueue)
833{
834 Assert(idxQueue < RT_ELEMENTS(pVirtio->virtqState));
835 PVIRTQSTATE pVirtQ = &pVirtio->virtqState[idxQueue];
836 pVirtQ->uAvailIdx = 0;
837 pVirtQ->uUsedIdx = 0;
838 pVirtio->uQueueEnable[idxQueue] = false;
839 pVirtio->uQueueSize[idxQueue] = VIRTQ_MAX_SIZE;
840 pVirtio->uQueueNotifyOff[idxQueue] = idxQueue;
841
842 pVirtio->uQueueMsixVector[idxQueue] = idxQueue + 2;
843 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
844 pVirtio->uQueueMsixVector[idxQueue] = VIRTIO_MSI_NO_VECTOR;
845}
846
847static void virtioResetDevice(PVIRTIOSTATE pVirtio)
848{
849 Log2Func(("\n"));
850 pVirtio->uDeviceFeaturesSelect = 0;
851 pVirtio->uDriverFeaturesSelect = 0;
852 pVirtio->uConfigGeneration = 0;
853 pVirtio->uDeviceStatus = 0;
854 pVirtio->uISR = 0;
855
856 virtioLowerInterrupt(pVirtio);
857
858 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
859 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
860
861 pVirtio->uNumQueues = VIRTQ_MAX_CNT;
862 for (uint16_t idxQueue = 0; idxQueue < pVirtio->uNumQueues; idxQueue++)
863 virtioResetQueue(pVirtio, idxQueue);
864}
865
866#if 0 /** @todo r=bird: Probably not needed. */
867/**
868 * Enable or disable queue
869 *
870 * @param pVirtio Pointer to the virtio state.
871 * @param idxQueue Queue number
872 * @param fEnabled Flag indicating whether to enable queue or not
873 */
874void virtioQueueEnable(PVIRTIOSTATE pVirtio, uint16_t idxQueue, bool fEnabled)
875{
876 if (fEnabled)
877 pVirtio->uQueueSize[idxQueue] = VIRTQ_MAX_SIZE;
878 else
879 pVirtio->uQueueSize[idxQueue] = 0;
880}
881#endif
882
883#if 0 /** @todo r=bird: This isn't invoked by anyone. Why? */
884/**
885 * Initiate orderly reset procedure.
886 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
887 */
888void virtioResetAll(PVIRTIOSTATE pVirtio)
889{
890 LogFunc(("VIRTIO RESET REQUESTED!!!\n"));
891 pVirtio->uDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
892 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
893 {
894 pVirtio->fGenUpdatePending = true;
895 virtioKick(pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig, false /* fForce */);
896 }
897}
898#endif
899
900#ifdef IN_RING3
901/**
902 * Invoked by this implementation when guest driver resets the device.
903 * The driver itself will not until the device has read the status change.
904 */
905static void virtioGuestR3Resetted(PVIRTIOSTATE pVirtio)
906{
907 LogFunc(("Guest reset the device\n"));
908
909 /* Let the client know */
910 pVirtio->Callbacks.pfnStatusChanged(pVirtio, 0);
911 virtioResetDevice(pVirtio);
912}
913#endif /* IN_RING3 */
914
915/**
916 * Handle accesses to Common Configuration capability
917 *
918 * @returns VBox status code
919 *
920 * @param pVirtio Virtio instance state
921 * @param fWrite Set if write access, clear if read access.
922 * @param offCfg The common configuration capability offset.
923 * @param cb Number of bytes to read or write
924 * @param pv Pointer to location to write to or read from
925 */
926static int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t offCfg, unsigned cb, void *pv)
927{
928/**
929 * This macro resolves to boolean true if the implied parameters, offCfg and cb,
930 * match the field offset and size of a field in the Common Cfg struct, (or if
931 * it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
932 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
933 *
934 * @param member Member of VIRTIO_PCI_COMMON_CFG_T
935 * @param offCfg Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
936 * @param cb Implied parameter: Number of bytes to access
937 * @result true or false
938 */
939#define MATCH_COMMON_CFG(member) \
940 ( ( RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
941 && ( offCfg == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
942 || offCfg == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
943 && cb == sizeof(uint32_t)) \
944 || ( offCfg == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
945 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member)) )
946
947#ifdef LOG_ENABLED
948# define LOG_COMMON_CFG_ACCESS(member, a_offIntra) \
949 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
950 pv, cb, a_offIntra, fWrite, false, 0);
951# define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, a_offIntra) \
952 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
953 pv, cb, a_offIntra, fWrite, true, idx);
954#else
955# define LOG_COMMON_CFG_ACCESS(member, a_offIntra) do { } while (0)
956# define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, a_offIntra) do { } while (0)
957#endif
958
959#define COMMON_CFG_ACCESSOR(member) \
960 do \
961 { \
962 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
963 if (fWrite) \
964 memcpy((char *)&pVirtio->member + offIntra, (const char *)pv, cb); \
965 else \
966 memcpy(pv, (const char *)&pVirtio->member + offIntra, cb); \
967 LOG_COMMON_CFG_ACCESS(member, offIntra); \
968 } while(0)
969
970#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
971 do \
972 { \
973 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
974 if (fWrite) \
975 memcpy((char *)&pVirtio->member[idx] + offIntra, pv, cb); \
976 else \
977 memcpy(pv, (const char *)&pVirtio->member[idx] + offIntra, cb); \
978 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, offIntra); \
979 } while(0)
980
981#define COMMON_CFG_ACCESSOR_READONLY(member) \
982 do \
983 { \
984 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
985 if (fWrite) \
986 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
987 else \
988 { \
989 memcpy(pv, (const char *)&pVirtio->member + offIntra, cb); \
990 LOG_COMMON_CFG_ACCESS(member, offIntra); \
991 } \
992 } while(0)
993
994#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
995 do \
996 { \
997 uint32_t offIntra = offCfg - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
998 if (fWrite) \
999 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
1000 else \
1001 { \
1002 memcpy(pv, (char const *)&pVirtio->member[idx] + offIntra, cb); \
1003 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx, offIntra); \
1004 } \
1005 } while(0)
1006
1007
1008 int rc = VINF_SUCCESS;
1009 uint64_t val;
1010 if (MATCH_COMMON_CFG(uDeviceFeatures))
1011 {
1012 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1013 {
1014 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
1015 return VINF_SUCCESS;
1016 }
1017 else /* Guest READ pCommonCfg->uDeviceFeatures */
1018 {
1019 switch (pVirtio->uDeviceFeaturesSelect)
1020 {
1021 case 0:
1022 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1023 memcpy(pv, &val, cb);
1024 LOG_COMMON_CFG_ACCESS(uDeviceFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures));
1025 break;
1026 case 1:
1027 val = pVirtio->uDeviceFeatures >> 32;
1028 memcpy(pv, &val, cb);
1029 LOG_COMMON_CFG_ACCESS(uDeviceFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures) + 4);
1030 break;
1031 default:
1032 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1033 pVirtio->uDeviceFeaturesSelect));
1034 return VINF_IOM_MMIO_UNUSED_00;
1035 }
1036 }
1037 }
1038 else if (MATCH_COMMON_CFG(uDriverFeatures))
1039 {
1040 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1041 {
1042 switch (pVirtio->uDriverFeaturesSelect)
1043 {
1044 case 0:
1045 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1046 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures));
1047 break;
1048 case 1:
1049 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1050 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures) + 4);
1051 break;
1052 default:
1053 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1054 pVirtio->uDriverFeaturesSelect));
1055 return VINF_SUCCESS;
1056 }
1057 }
1058 else /* Guest READ pCommonCfg->udriverFeatures */
1059 {
1060 switch (pVirtio->uDriverFeaturesSelect)
1061 {
1062 case 0:
1063 val = pVirtio->uDriverFeatures & 0xffffffff;
1064 memcpy(pv, &val, cb);
1065 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures));
1066 break;
1067 case 1:
1068 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1069 memcpy(pv, &val, cb);
1070 LOG_COMMON_CFG_ACCESS(uDriverFeatures, offCfg - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures) + 4);
1071 break;
1072 default:
1073 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1074 pVirtio->uDriverFeaturesSelect));
1075 return VINF_IOM_MMIO_UNUSED_00;
1076 }
1077 }
1078 }
1079 else if (MATCH_COMMON_CFG(uNumQueues))
1080 {
1081 if (fWrite)
1082 {
1083 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1084 return VINF_SUCCESS;
1085 }
1086 else
1087 {
1088 *(uint16_t *)pv = VIRTQ_MAX_CNT;
1089 LOG_COMMON_CFG_ACCESS(uNumQueues, 0);
1090 }
1091 }
1092 else if (MATCH_COMMON_CFG(uDeviceStatus))
1093 {
1094 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
1095 {
1096 pVirtio->uDeviceStatus = *(uint8_t *)pv;
1097 Log6Func(("Guest wrote uDeviceStatus ................ ("));
1098 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
1099 Log6((")\n"));
1100 if (pVirtio->uDeviceStatus == 0)
1101 virtioGuestR3Resetted(pVirtio);
1102 /*
1103 * Notify client only if status actually changed from last time.
1104 */
1105 uint32_t const fOkayNow = pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
1106 uint32_t const fWasOkay = pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
1107 if (fOkayNow != fWasOkay)
1108 pVirtio->Callbacks.pfnStatusChanged(pVirtio, fOkayNow);
1109 pVirtio->uPrevDeviceStatus = pVirtio->uDeviceStatus;
1110 }
1111 else /* Guest READ pCommonCfg->uDeviceStatus */
1112 {
1113 Log6Func(("Guest read uDeviceStatus ................ ("));
1114 *(uint32_t *)pv = pVirtio->uDeviceStatus; /** @todo r=bird: Why 32-bit write here, the field is 8-bit? */
1115 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
1116 Log6((")\n"));
1117 }
1118 }
1119 else
1120 if (MATCH_COMMON_CFG(uMsixConfig))
1121 COMMON_CFG_ACCESSOR(uMsixConfig);
1122 else
1123 if (MATCH_COMMON_CFG(uDeviceFeaturesSelect))
1124 COMMON_CFG_ACCESSOR(uDeviceFeaturesSelect);
1125 else
1126 if (MATCH_COMMON_CFG(uDriverFeaturesSelect))
1127 COMMON_CFG_ACCESSOR(uDriverFeaturesSelect);
1128 else
1129 if (MATCH_COMMON_CFG(uConfigGeneration))
1130 COMMON_CFG_ACCESSOR_READONLY(uConfigGeneration);
1131 else
1132 if (MATCH_COMMON_CFG(uQueueSelect))
1133 COMMON_CFG_ACCESSOR(uQueueSelect);
1134 else
1135 if (MATCH_COMMON_CFG(uQueueSize))
1136 COMMON_CFG_ACCESSOR_INDEXED(uQueueSize, pVirtio->uQueueSelect);
1137 else
1138 if (MATCH_COMMON_CFG(uQueueMsixVector))
1139 COMMON_CFG_ACCESSOR_INDEXED(uQueueMsixVector, pVirtio->uQueueSelect);
1140 else
1141 if (MATCH_COMMON_CFG(uQueueEnable))
1142 COMMON_CFG_ACCESSOR_INDEXED(uQueueEnable, pVirtio->uQueueSelect);
1143 else
1144 if (MATCH_COMMON_CFG(uQueueNotifyOff))
1145 COMMON_CFG_ACCESSOR_INDEXED_READONLY(uQueueNotifyOff, pVirtio->uQueueSelect);
1146 else
1147 if (MATCH_COMMON_CFG(aGCPhysQueueDesc))
1148 COMMON_CFG_ACCESSOR_INDEXED(aGCPhysQueueDesc, pVirtio->uQueueSelect);
1149 else
1150 if (MATCH_COMMON_CFG(aGCPhysQueueAvail))
1151 COMMON_CFG_ACCESSOR_INDEXED(aGCPhysQueueAvail, pVirtio->uQueueSelect);
1152 else
1153 if (MATCH_COMMON_CFG(aGCPhysQueueUsed))
1154 COMMON_CFG_ACCESSOR_INDEXED(aGCPhysQueueUsed, pVirtio->uQueueSelect);
1155 else
1156 {
1157 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: offCfg=%#x (%d), cb=%d\n",
1158 fWrite ? "write" : "read ", offCfg, offCfg, cb));
1159 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1160 }
1161
1162#undef COMMON_CFG_ACCESSOR_READONLY
1163#undef COMMON_CFG_ACCESSOR_INDEXED_READONLY
1164#undef COMMON_CFG_ACCESSOR_INDEXED
1165#undef COMMON_CFG_ACCESSOR
1166#undef LOG_COMMON_CFG_ACCESS_INDEXED
1167#undef LOG_COMMON_CFG_ACCESS
1168#undef MATCH_COMMON_CFG
1169 return rc;
1170}
1171
1172/**
1173 * Memory mapped I/O Handler for PCI Capabilities read operations.
1174 *
1175 * @returns VBox status code.
1176 *
1177 * @param pDevIns The device instance.
1178 * @param pvUser User argument.
1179 * @param GCPhysAddr Physical address (in GC) where the read starts.
1180 * @param pv Where to store the result.
1181 * @param cb Number of bytes read.
1182 */
1183PDMBOTHCBDECL(int) virtioR3MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
1184{
1185 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1186 Assert(pVirtio == (PVIRTIOSTATE)pvUser); RT_NOREF(pvUser);
1187 int rc = VINF_SUCCESS;
1188
1189 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
1190 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
1191 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysIsrCap, pVirtio->pIsrCap, fIsr);
1192
1193 if (fDevSpecific)
1194 {
1195 uint32_t uOffset = GCPhysAddr - pVirtio->GCPhysDeviceCap;
1196 /*
1197 * Callback to client to manage device-specific configuration.
1198 */
1199 rc = pVirtio->Callbacks.pfnDevCapRead(pDevIns, uOffset, pv, cb);
1200
1201 /*
1202 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1203 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1204 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1205 */
1206 bool fDevSpecificFieldChanged = !!memcmp((char *)pVirtio->pvDevSpecificCfg + uOffset,
1207 (char *)pVirtio->pvPrevDevSpecificCfg + uOffset, cb);
1208
1209 memcpy(pVirtio->pvPrevDevSpecificCfg, pVirtio->pvDevSpecificCfg, pVirtio->cbDevSpecificCfg);
1210
1211 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1212 {
1213 ++pVirtio->uConfigGeneration;
1214 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1215 pVirtio->uConfigGeneration,
1216 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1217 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1218 pVirtio->fGenUpdatePending = false;
1219 }
1220 }
1221 else
1222 if (fCommonCfg)
1223 {
1224 uint32_t uOffset = GCPhysAddr - pVirtio->GCPhysCommonCfg;
1225 rc = virtioCommonCfgAccessed(pVirtio, false /* fWrite */, uOffset, cb, pv);
1226 }
1227 else
1228 if (fIsr && cb == sizeof(uint8_t))
1229 {
1230 *(uint8_t *)pv = pVirtio->uISR;
1231 Log6Func(("Read and clear ISR\n"));
1232 pVirtio->uISR = 0; /* VirtIO specification requires reads of ISR to clear it */
1233 virtioLowerInterrupt(pVirtio);
1234 }
1235 else
1236 {
1237 LogFunc(("Bad read access to mapped capabilities region:\n"
1238 " pVirtio=%#p GCPhysAddr=%RGp cb=%u\n",
1239 pVirtio, GCPhysAddr, cb));
1240 return VINF_IOM_MMIO_UNUSED_00;
1241 }
1242 return rc;
1243}
1244
1245/**
1246 * Memory mapped I/O Handler for PCI Capabilities write operations.
1247 *
1248 * @returns VBox status code.
1249 *
1250 * @param pDevIns The device instance.
1251 * @param pvUser User argument.
1252 * @param GCPhysAddr Physical address (in GC) where the write starts.
1253 * @param pv Where to fetch the result.
1254 * @param cb Number of bytes to write.
1255 */
1256PDMBOTHCBDECL(int) virtioR3MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
1257{
1258 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1259 Assert(pVirtio == (PVIRTIOSTATE)pvUser); RT_NOREF(pvUser);
1260
1261 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
1262 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
1263 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysIsrCap, pVirtio->pIsrCap, fIsr);
1264 MATCH_VIRTIO_CAP_STRUCT(pVirtio->GCPhysNotifyCap, pVirtio->pNotifyCap, fNotify);
1265
1266 if (fDevSpecific)
1267 {
1268 uint32_t uOffset = GCPhysAddr - pVirtio->GCPhysDeviceCap;
1269 /*
1270 * Pass this MMIO write access back to the client to handle
1271 */
1272 (void)pVirtio->Callbacks.pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1273 }
1274 else
1275 if (fCommonCfg)
1276 {
1277 uint32_t uOffset = GCPhysAddr - pVirtio->GCPhysCommonCfg;
1278 (void)virtioCommonCfgAccessed(pVirtio, true /* fWrite */, uOffset, cb, (void *)pv);
1279 }
1280 else
1281 if (fIsr && cb == sizeof(uint8_t))
1282 {
1283 pVirtio->uISR = *(uint8_t *)pv;
1284 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1285 pVirtio->uISR & 0xff,
1286 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1287 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1288 }
1289 else
1290 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1291 if (fNotify && cb == sizeof(uint16_t))
1292 {
1293 uint32_t uNotifyBaseOffset = GCPhysAddr - pVirtio->GCPhysNotifyCap;
1294 uint16_t idxQueue = uNotifyBaseOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1295 uint16_t uAvailDescIdx = *(uint16_t *)pv;
1296
1297 virtior3QueueNotified(pVirtio, idxQueue, uAvailDescIdx);
1298 }
1299 else
1300 {
1301 Log2Func(("Bad write access to mapped capabilities region:\n"
1302 " pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n",
1303 pVirtio, GCPhysAddr, pv, cb, pv, cb));
1304 }
1305 return VINF_SUCCESS;
1306}
1307
1308#ifdef IN_RING3
1309
1310/**
1311 * @callback_method_impl{FNPCIIOREGIONMAP}
1312 */
1313static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
1314 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
1315{
1316 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1317 int rc = VINF_SUCCESS;
1318
1319 Assert(pPciDev == pDevIns->apPciDevs[0]);
1320 Assert(cb >= 32);
1321 RT_NOREF3(pPciDev, iRegion, enmType);
1322
1323 if (iRegion == VIRTIO_REGION_PCI_CAP)
1324 {
1325 /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
1326 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, pVirtio,
1327 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
1328 virtioR3MmioWrite, virtioR3MmioRead,
1329 "virtio-scsi MMIO");
1330
1331 if (RT_FAILURE(rc))
1332 {
1333 Log2Func(("virtio: PCI Capabilities failed to map GCPhysAddr=%RGp cb=%RGp, region=%d\n", GCPhysAddress, cb, iRegion));
1334 return rc;
1335 }
1336 Log2Func(("virtio: PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp, region=%d\n", GCPhysAddress, cb, iRegion));
1337 pVirtio->GCPhysPciCapBase = GCPhysAddress;
1338 pVirtio->GCPhysCommonCfg = GCPhysAddress + pVirtio->pCommonCfgCap->uOffset;
1339 pVirtio->GCPhysNotifyCap = GCPhysAddress + pVirtio->pNotifyCap->pciCap.uOffset;
1340 pVirtio->GCPhysIsrCap = GCPhysAddress + pVirtio->pIsrCap->uOffset;
1341 if (pVirtio->pvPrevDevSpecificCfg)
1342 pVirtio->GCPhysDeviceCap = GCPhysAddress + pVirtio->pDeviceCap->uOffset;
1343 }
1344 return rc;
1345}
1346
1347/**
1348 * @callback_method_impl{FNPCICONFIGREAD}
1349 */
1350static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1351 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1352{
1353 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1354 RT_NOREF(pPciDev);
1355
1356 LogFlowFunc(("pDevIns=%p pPciDev=%p uAddress=%#x cb=%u pu32Value=%p\n",
1357 pDevIns, pPciDev, uAddress, cb, pu32Value));
1358 if (uAddress == pVirtio->uPciCfgDataOff)
1359 {
1360 /*
1361 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1362 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1363 * (the virtio_pci_cfg_cap capability), and access data items.
1364 */
1365 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
1366 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
1367 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
1368
1369 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1370 || cb != uLength
1371 || uBar != VIRTIO_REGION_PCI_CAP)
1372 {
1373 Log2Func(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1374 *pu32Value = UINT32_MAX;
1375 return VINF_SUCCESS;
1376 }
1377
1378 int rc = virtioR3MmioRead(pDevIns, pVirtio, pVirtio->GCPhysPciCapBase + uOffset, pu32Value, cb);
1379 Log2Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d -> %Rrc\n",
1380 uBar, uOffset, uLength, *pu32Value, rc));
1381 return rc;
1382 }
1383 return VINF_PDM_PCI_DO_DEFAULT;
1384}
1385
1386/**
1387 * @callback_method_impl{FNPCICONFIGWRITE}
1388 */
1389static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1390 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1391{
1392 PVIRTIOSTATE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOSTATE);
1393 RT_NOREF(pPciDev);
1394
1395 LogFlowFunc(("pDevIns=%p pPciDev=%p uAddress=%#x cb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, cb, u32Value));
1396 if (uAddress == pVirtio->uPciCfgDataOff)
1397 {
1398 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1399 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1400 * (the virtio_pci_cfg_cap capability), and access data items. */
1401
1402 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
1403 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
1404 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
1405
1406 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1407 || cb != uLength
1408 || uBar != VIRTIO_REGION_PCI_CAP)
1409 {
1410 Log2Func(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1411 return VINF_SUCCESS;
1412 }
1413
1414 int rc = virtioR3MmioWrite(pDevIns, pVirtio, pVirtio->GCPhysPciCapBase + uOffset, &u32Value, cb);
1415 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1416 uBar, uOffset, uLength, u32Value, rc));
1417 return rc;
1418 }
1419 return VINF_PDM_PCI_DO_DEFAULT;
1420}
1421
1422
1423/*********************************************************************************************************************************
1424* Saved state. *
1425*********************************************************************************************************************************/
1426
1427/**
1428 * Called from the FNSSMDEVSAVEEXEC function of the device.
1429 *
1430 * @param pVirtio Pointer to the virtio state.
1431 * @param pHlp The ring-3 device helpers.
1432 * @param pSSM The saved state handle.
1433 * @returns VBox status code.
1434 */
1435int virtioR3SaveExec(PVIRTIOSTATE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1436{
1437 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1438 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1439
1440 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1441 pHlp->pfnSSMPutU8(pSSM, pVirtio->uDeviceStatus);
1442 pHlp->pfnSSMPutU8(pSSM, pVirtio->uConfigGeneration);
1443 pHlp->pfnSSMPutU8(pSSM, pVirtio->uPciCfgDataOff);
1444 pHlp->pfnSSMPutU8(pSSM, pVirtio->uISR);
1445 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueSelect);
1446 pHlp->pfnSSMPutU32(pSSM, pVirtio->uDeviceFeaturesSelect);
1447 pHlp->pfnSSMPutU32(pSSM, pVirtio->uDriverFeaturesSelect);
1448 pHlp->pfnSSMPutU64(pSSM, pVirtio->uDriverFeatures);
1449 Assert(pVirtio->uNumQueues == VIRTQ_MAX_CNT); /** @todo r=bird: See todo in struct & virtioR3LoadExec. */
1450 pHlp->pfnSSMPutU32(pSSM, pVirtio->uNumQueues);
1451
1452 for (uint32_t i = 0; i < pVirtio->uNumQueues; i++)
1453 {
1454 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysQueueDesc[i]);
1455 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysQueueAvail[i]);
1456 pHlp->pfnSSMPutGCPhys64(pSSM, pVirtio->aGCPhysQueueUsed[i]);
1457 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueNotifyOff[i]);
1458 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueMsixVector[i]);
1459 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueEnable[i]);
1460 pHlp->pfnSSMPutU16(pSSM, pVirtio->uQueueSize[i]);
1461 pHlp->pfnSSMPutU16(pSSM, pVirtio->virtqState[i].uAvailIdx);
1462 pHlp->pfnSSMPutU16(pSSM, pVirtio->virtqState[i].uUsedIdx);
1463 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtio->virtqState[i].szVirtqName, 32);
1464 AssertRCReturn(rc, rc);
1465 }
1466
1467 return VINF_SUCCESS;
1468}
1469
1470/**
1471 * Called from the FNSSMDEVLOADEXEC function of the device.
1472 *
1473 * @param pVirtio Pointer to the virtio state.
1474 * @param pHlp The ring-3 device helpers.
1475 * @param pSSM The saved state handle.
1476 * @returns VBox status code.
1477 */
1478int virtioR3LoadExec(PVIRTIOSTATE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1479{
1480 /*
1481 * Check the marker and (embedded) version number.
1482 */
1483 uint64_t uMarker = 0;
1484 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1485 AssertRCReturn(rc, rc);
1486 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1487 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1488 N_("Expected marker value %#RX64 found %#RX64 instead"),
1489 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1490 uint32_t uVersion = 0;
1491 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1492 AssertRCReturn(rc, rc);
1493 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1494 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1495 N_("Unsupported virtio version: %u"), uVersion);
1496
1497 /*
1498 * Load the state.
1499 */
1500 pHlp->pfnSSMGetBool(pSSM, &pVirtio->fGenUpdatePending);
1501 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uDeviceStatus);
1502 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uConfigGeneration);
1503 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uPciCfgDataOff);
1504 pHlp->pfnSSMGetU8(pSSM, &pVirtio->uISR);
1505 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueSelect);
1506 pHlp->pfnSSMGetU32(pSSM, &pVirtio->uDeviceFeaturesSelect);
1507 pHlp->pfnSSMGetU32(pSSM, &pVirtio->uDriverFeaturesSelect);
1508 pHlp->pfnSSMGetU64(pSSM, &pVirtio->uDriverFeatures);
1509
1510 /* Make sure the queue count is within expectations. */
1511 /** @todo r=bird: Turns out the expectations are exactly VIRTQ_MAX_CNT, bug? */
1512 rc = pHlp->pfnSSMGetU32(pSSM, &pVirtio->uNumQueues);
1513 AssertRCReturn(rc, rc);
1514 AssertReturn(pVirtio->uNumQueues == VIRTQ_MAX_CNT,
1515 pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1516 N_("Saved queue count %u, expected %u"), uVersion, VIRTQ_MAX_CNT));
1517 AssertCompile(RT_ELEMENTS(pVirtio->virtqState) == VIRTQ_MAX_CNT);
1518 AssertCompile(RT_ELEMENTS(pVirtio->aGCPhysQueueDesc) == VIRTQ_MAX_CNT);
1519
1520 for (uint32_t idxQueue = 0; idxQueue < pVirtio->uNumQueues; idxQueue++)
1521 {
1522 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysQueueDesc[idxQueue]);
1523 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysQueueAvail[idxQueue]);
1524 pHlp->pfnSSMGetGCPhys64(pSSM, &pVirtio->aGCPhysQueueUsed[idxQueue]);
1525 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueNotifyOff[idxQueue]);
1526 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueMsixVector[idxQueue]);
1527 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueEnable[idxQueue]);
1528 pHlp->pfnSSMGetU16(pSSM, &pVirtio->uQueueSize[idxQueue]);
1529 pHlp->pfnSSMGetU16(pSSM, &pVirtio->virtqState[idxQueue].uAvailIdx);
1530 pHlp->pfnSSMGetU16(pSSM, &pVirtio->virtqState[idxQueue].uUsedIdx);
1531 pHlp->pfnSSMGetMem(pSSM, pVirtio->virtqState[idxQueue].szVirtqName, sizeof(pVirtio->virtqState[idxQueue].szVirtqName));
1532 }
1533
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/*********************************************************************************************************************************
1539* Device Level *
1540*********************************************************************************************************************************/
1541
1542/**
1543 * This should be called from PDMDEVREGR3::pfnReset.
1544 *
1545 * @param pVirtio Pointer to the virtio state.
1546 */
1547void virtioR3PropagateResetNotification(PVIRTIOSTATE pVirtio)
1548{
1549 /** @todo r=bird: You probably need to do something here. See
1550 * virtioScsiR3Reset. */
1551 RT_NOREF(pVirtio);
1552}
1553
1554
1555/**
1556 * This sends notification ('kicks') guest driver to check queues for any new
1557 * elements in the used queue to process.
1558 *
1559 * It should be called after resuming in case anything was added to the queues
1560 * during suspend/quiescing and a notification was missed, to prevent the guest
1561 * from stalling after suspend.
1562 */
1563void virtioR3PropagateResumeNotification(PVIRTIOSTATE pVirtio)
1564{
1565 virtioNotifyGuestDriver(pVirtio, (uint16_t)0 /* idxQueue */, true /* fForce */);
1566}
1567
1568
1569/**
1570 * This should be called from PDMDEVREGR3::pfnDestruct.
1571 *
1572 * @param pVirtio Pointer to the virtio state.
1573 * @param pDevIns The device instance.
1574 */
1575void virtioR3Term(PVIRTIOSTATE pVirtio, PPDMDEVINS pDevIns)
1576{
1577 if (pVirtio->pvPrevDevSpecificCfg)
1578 {
1579 RTMemFree(pVirtio->pvPrevDevSpecificCfg);
1580 pVirtio->pvPrevDevSpecificCfg = NULL;
1581 }
1582 RT_NOREF(pDevIns);
1583}
1584
1585
1586/**
1587 * Setup PCI device controller and Virtio state
1588 *
1589 * This should be called from PDMDEVREGR3::pfnConstruct.
1590 *
1591 * @param pVirtio Pointer to the virtio state. This must be
1592 * the first member in the shared device
1593 * instance data!
1594 * @param pDevIns The device instance.
1595 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1596 * @param pcszInstance Device instance name (format-specifier)
1597 * @param fDevSpecificFeatures VirtIO device-specific features offered by
1598 * client
1599 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
1600 * @param pvDevSpecificCfg Address of client's dev-specific
1601 * configuration struct.
1602 */
1603int virtioR3Init(PVIRTIOSTATE pVirtio, PPDMDEVINS pDevIns, PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
1604 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
1605{
1606 /*
1607 * The pVirtio state must be the first member of the shared device instance
1608 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
1609 */
1610 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOSTATE), VERR_STATE_CHANGED);
1611
1612
1613#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */
1614# ifdef VBOX_WITH_MSI_DEVICES
1615 pVirtio->fMsiSupport = true;
1616# endif
1617#endif
1618
1619 /*
1620 * The host features offered include both device-specific features
1621 * and reserved feature bits (device independent)
1622 */
1623 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1624 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1625 | fDevSpecificFeatures;
1626
1627 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1628
1629 pVirtio->pDevInsR3 = pDevIns;
1630 pVirtio->uDeviceStatus = 0;
1631 pVirtio->cbDevSpecificCfg = cbDevSpecificCfg;
1632 pVirtio->pvDevSpecificCfg = pvDevSpecificCfg;
1633 pVirtio->pvPrevDevSpecificCfg = RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
1634 AssertLogRelReturn(pVirtio->pvPrevDevSpecificCfg, VERR_NO_MEMORY);
1635
1636 /* Set PCI config registers (assume 32-bit mode) */
1637 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1638 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1639
1640 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
1641 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1642 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1643 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
1644 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
1645 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
1646 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
1647 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
1648 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
1649 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
1650
1651 /* Register PCI device */
1652 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1653 if (RT_FAILURE(rc))
1654 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1655
1656 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
1657 AssertRCReturn(rc, rc);
1658
1659
1660 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1661
1662 /* The following capability mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CFG_CAP_T)
1663 * as a mandatory but suboptimal alternative interface to host device capabilities, facilitating
1664 * access the memory of any BAR. If the guest uses it (the VirtIO driver on Linux doesn't),
1665 * Unlike Common, Notify, ISR and Device capabilities, it is accessed directly via PCI Config region.
1666 * therefore does not contribute to the capabilities region (BAR) the other capabilities use.
1667 */
1668#define CFGADDR2IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
1669
1670 PVIRTIO_PCI_CAP_T pCfg;
1671 uint32_t cbRegion = 0;
1672
1673 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1674 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
1675 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1676 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1677 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1678 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1679 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1680 pCfg->uOffset = RT_ALIGN_32(0, 4); /* reminder, in case someone changes offset */
1681 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1682 cbRegion += pCfg->uLength;
1683 pVirtio->pCommonCfgCap = pCfg;
1684
1685 /*
1686 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based the choice
1687 * of this implementation that each queue's uQueueNotifyOff is set equal to (QueueSelect) ordinal
1688 * value of the queue */
1689 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1690 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1691 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1692 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1693 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1694 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1695 pCfg->uOffset = pVirtio->pCommonCfgCap->uOffset + pVirtio->pCommonCfgCap->uLength;
1696 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 2);
1697 pCfg->uLength = VIRTQ_MAX_CNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1698 cbRegion += pCfg->uLength;
1699 pVirtio->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1700 pVirtio->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1701
1702 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1703 *
1704 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1705 * of spec shows it as a 32-bit field with upper bits 'reserved'
1706 * Will take spec words more literally than the diagram for now.
1707 */
1708 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1709 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1710 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1711 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1712 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1713 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1714 pCfg->uOffset = pVirtio->pNotifyCap->pciCap.uOffset + pVirtio->pNotifyCap->pciCap.uLength;
1715 pCfg->uLength = sizeof(uint8_t);
1716 cbRegion += pCfg->uLength;
1717 pVirtio->pIsrCap = pCfg;
1718
1719 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1720 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1721 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1722 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1723 * even list it as present if uLength isn't non-zero and 4-byte-aligned as the linux driver is
1724 * initializing. */
1725
1726 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
1727 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1728 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1729 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1730 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1731 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtio->pvDevSpecificCfg) ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1732 pCfg->uBar = 0;
1733 pCfg->uOffset = 0;
1734 pCfg->uLength = 0;
1735 cbRegion += pCfg->uLength;
1736 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1737
1738 if (pVirtio->pvDevSpecificCfg)
1739 {
1740 /* Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
1741 * device-specific config fields struct and passes size to this constructor */
1742 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1743 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1744 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1745 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1746 pCfg->uCapNext = pVirtio->fMsiSupport ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1747 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1748 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1749 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1750 pCfg->uLength = cbDevSpecificCfg;
1751 cbRegion += pCfg->uLength;
1752 pVirtio->pDeviceCap = pCfg;
1753 }
1754
1755 if (pVirtio->fMsiSupport)
1756 {
1757 PDMMSIREG aMsiReg;
1758 RT_ZERO(aMsiReg);
1759 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1760 aMsiReg.iMsixNextOffset = 0;
1761 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
1762 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
1763 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1764 if (RT_FAILURE(rc))
1765 {
1766 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
1767 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
1768 pVirtio->fMsiSupport = false;
1769 }
1770 else
1771 Log2Func(("Using MSI-X for guest driver notification\n"));
1772 }
1773 else
1774 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
1775
1776
1777 /* Set offset to first capability and enable PCI dev capabilities */
1778 PDMPciDevSetCapabilityList(pPciDev, 0x40);
1779 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
1780
1781 /* Linux drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1782 * 'unknown' device-specific capability without querying the capability to figure
1783 * out size, so pad with an extra page */
1784
1785 rc = PDMDevHlpPCIIORegionRegister(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE, PAGE_SIZE),
1786 PCI_ADDRESS_SPACE_MEM, virtioR3Map);
1787 if (RT_FAILURE(rc))
1788 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space"));
1789
1790 return rc;
1791}
1792
1793#endif /* IN_RING3 */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette