VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.h@ 92091

Last change on this file since 92091 was 92091, checked in by vboxsync, 3 years ago

Optimize how legacy/modern driver is determined to avoid polling, and eliminate code duplication for feature logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.2 KB
Line 
1/* $Id: VirtioCore.h 92091 2021-10-27 05:55:32Z vboxsync $ */
2
3/** @file
4 * VirtioCore.h - Virtio Declarations
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#ifndef VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
20#define VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
21#ifndef RT_WITHOUT_PRAGMA_ONCE
22# pragma once
23#endif
24
25#include <iprt/ctype.h>
26#include <iprt/sg.h>
27#include <iprt/types.h>
28
29#ifdef LOG_ENABLED
30# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) \
31 do { \
32 if (LogIsItEnabled(logLevel, LOG_GROUP)) \
33 virtioCoreHexDump((pv), (cb), (base), (title)); \
34 } while (0)
35#else
36# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0)
37#endif
38
39/** Pointer to the shared VirtIO state. */
40typedef struct VIRTIOCORE *PVIRTIOCORE;
41/** Pointer to the ring-3 VirtIO state. */
42typedef struct VIRTIOCORER3 *PVIRTIOCORER3;
43/** Pointer to the ring-0 VirtIO state. */
44typedef struct VIRTIOCORER0 *PVIRTIOCORER0;
45/** Pointer to the raw-mode VirtIO state. */
46typedef struct VIRTIOCORERC *PVIRTIOCORERC;
47/** Pointer to the instance data for the current context. */
48typedef CTX_SUFF(PVIRTIOCORE) PVIRTIOCORECC;
49
50#define VIRTIO_MAX_VIRTQ_NAME_SIZE 32 /**< Maximum length of a queue name */
51#define VIRTQ_SIZE 1024 /**< Max size (# entries) of a virtq */
52#define VIRTQ_MAX_COUNT 24 /**< Max queues we allow guest to create */
53#define VIRTIO_NOTIFY_OFFSET_MULTIPLIER 2 /**< VirtIO Notify Cap. MMIO config param */
54#define VIRTIO_REGION_LEGACY_IO 0 /**< BAR for VirtIO legacy drivers MBZ */
55#define VIRTIO_REGION_PCI_CAP 2 /**< BAR for VirtIO Cap. MMIO (impl specific) */
56#define VIRTIO_REGION_MSIX_CAP 0 /**< Bar for MSI-X handling */
57#define VIRTIO_PAGE_SIZE 4096 /**< Page size used by VirtIO specification */
58
59
60/* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices,
61 says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the
62 the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been
63 implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now
64 will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which
65 tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes) */
66
67
68/** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions,
69 * except they work with the data type GCPhys rather than void *
70 */
71typedef struct VIRTIOSGSEG /**< An S/G entry */
72{
73 RTGCPHYS GCPhys; /**< Pointer to the segment buffer */
74 size_t cbSeg; /**< Size of the segment buffer */
75} VIRTIOSGSEG;
76
77typedef VIRTIOSGSEG *PVIRTIOSGSEG, **PPVIRTIOSGSEG;
78typedef const VIRTIOSGSEG *PCVIRTIOSGSEG;
79
80typedef struct VIRTIOSGBUF
81{
82 PVIRTIOSGSEG paSegs; /**< Pointer to the scatter/gather array */
83 unsigned cSegs; /**< Number of segs in scatter/gather array */
84 unsigned idxSeg; /**< Current segment we are in */
85 RTGCPHYS GCPhysCur; /**< Ptr to byte within the current seg */
86 size_t cbSegLeft; /**< # of bytes left in the current segment */
87} VIRTIOSGBUF;
88
89typedef VIRTIOSGBUF *PVIRTIOSGBUF, **PPVIRTIOSGBUF;
90typedef const VIRTIOSGBUF *PCVIRTIOSGBUF;
91
92/**
93 * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described
94 * by the index of its head descriptor, which in optionally chains to another descriptor
95 * and so on.
96 *
97 * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host)
98 * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to
99 * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates
100 * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing
101 * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors
102 * to be filled with data on the host to return to theguest.
103 */
104typedef struct VIRTQBUF
105{
106 uint32_t u32Magic; /**< Magic value, VIRTQBUF_MAGIC. */
107 uint16_t uVirtq; /**< VirtIO index of associated virtq */
108 uint16_t pad;
109 uint32_t volatile cRefs; /**< Reference counter. */
110 uint32_t uHeadIdx; /**< Head idx of associated desc chain */
111 size_t cbPhysSend; /**< Total size of src buffer */
112 PVIRTIOSGBUF pSgPhysSend; /**< Phys S/G buf for data from guest */
113 size_t cbPhysReturn; /**< Total size of dst buffer */
114 PVIRTIOSGBUF pSgPhysReturn; /**< Phys S/G buf to store result for guest */
115
116 /** @name Internal (bird combined 5 allocations into a single), fingers off.
117 * @{ */
118 VIRTIOSGBUF SgBufIn;
119 VIRTIOSGBUF SgBufOut;
120 VIRTIOSGSEG aSegsIn[VIRTQ_SIZE];
121 VIRTIOSGSEG aSegsOut[VIRTQ_SIZE];
122 /** @} */
123} VIRTQBUF_T;
124
125/** Pointers to a Virtio descriptor chain. */
126typedef VIRTQBUF_T *PVIRTQBUF, **PPVIRTQBUF;
127
128/** Magic value for VIRTQBUF_T::u32Magic. */
129#define VIRTQBUF_MAGIC UINT32_C(0x19600219)
130
131typedef struct VIRTIOPCIPARAMS
132{
133 uint16_t uDeviceId; /**< PCI Cfg Device ID */
134 uint16_t uClassBase; /**< PCI Cfg Base Class */
135 uint16_t uClassSub; /**< PCI Cfg Subclass */
136 uint16_t uClassProg; /**< PCI Cfg Programming Interface Class */
137 uint16_t uSubsystemId; /**< PCI Cfg Card Manufacturer Vendor ID */
138 uint16_t uInterruptLine; /**< PCI Cfg Interrupt line */
139 uint16_t uInterruptPin; /**< PCI Cfg Interrupt pin */
140} VIRTIOPCIPARAMS, *PVIRTIOPCIPARAMS;
141
142
143/* Virtio Platform Independent Reserved Feature Bits (see 1.1 specification section 6) */
144
145#define VIRTIO_F_NOTIFY_ON_EMPTY RT_BIT_64(24) /**< Legacy feature: Force intr if no AVAIL */
146#define VIRTIO_F_ANY_LAYOUT RT_BIT_64(27) /**< Doc bug: Goes under two names in spec */
147#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
148#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
149#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
150#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
151#define VIRTIO_F_BAD_FEATURE RT_BIT_64(30) /**< QEMU kludge. UNUSED as of >= VirtIO 1.0 */
152#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
153#define VIRTIO_F_ACCESS_PLATFORM RT_BIT_64(33) /**< Funky guest mem access (VirtIO 1.1 NYI) */
154#define VIRTIO_F_RING_PACKED RT_BIT_64(34) /**< Packed Queue Layout (VirtIO 1.1 NYI) */
155#define VIRTIO_F_IN_ORDER RT_BIT_64(35) /**< Honor guest buf order (VirtIO 1.1 NYI) */
156#define VIRTIO_F_ORDER_PLATFORM RT_BIT_64(36) /**< Host mem access honored (VirtIO 1.1 NYI) */
157#define VIRTIO_F_SR_IOV RT_BIT_64(37) /**< Dev Single Root I/O virt (VirtIO 1.1 NYI) */
158#define VIRTIO_F_NOTIFICAITON_DATA RT_BIT_64(38) /**< Driver passes extra data (VirtIO 1.1 NYI) */
159
160typedef struct VIRTIO_FEATURES_LIST
161{
162 uint64_t fFeatureBit;
163 const char *pcszDesc;
164} VIRTIO_FEATURES_LIST, *PVIRTIO_FEATURES_LIST;
165
166static const VIRTIO_FEATURES_LIST s_aCoreFeatures[] =
167{
168 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
169 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
170 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },
171};
172
173
174#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
175#define VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED ( 0 ) /**< Only offered to legacy drivers */
176
177#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
178#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
179#define DEVICE_PCI_NETWORK_SUBSYSTEM 1 /**< Network Card, per VirtIO legacy spec. */
180#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
181#define DEVICE_PCI_REVISION_ID_VIRTIO 0 /**< VirtIO Modern Transitional driver rev MBZ */
182
183/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
184
185#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
186
187/** Device Status field constants (from Virtio 1.0 spec) */
188#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
189#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
190#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
191#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
192#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
193#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
194
195typedef enum VIRTIOVMSTATECHANGED
196{
197 kvirtIoVmStateChangedInvalid = 0,
198 kvirtIoVmStateChangedReset,
199 kvirtIoVmStateChangedSuspend,
200 kvirtIoVmStateChangedPowerOff,
201 kvirtIoVmStateChangedResume,
202 kvirtIoVmStateChangedFor32BitHack = 0x7fffffff
203} VIRTIOVMSTATECHANGED;
204
205
206
207/** @def Virtio Device PCI Capabilities type codes */
208#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
209#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
210#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
211#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
212#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
213
214#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
215
216/**
217 * The following is the PCI capability struct common to all VirtIO capability types
218 */
219typedef struct virtio_pci_cap
220{
221 /* All little-endian */
222 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
223 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
224 uint8_t uCapLen; /**< Generic PCI field: capability length */
225 uint8_t uCfgType; /**< Identifies the structure. */
226 uint8_t uBar; /**< Where to find it. */
227 uint8_t uPadding[3]; /**< Pad to full dword. */
228 uint32_t uOffset; /**< Offset within bar. (L.E.) */
229 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
230} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
231
232/**
233 * VirtIO Legacy Capabilities' related MMIO-mapped structs (see virtio-0.9.5 spec)
234 *
235 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
236 */
237typedef struct virtio_legacy_pci_common_cfg
238{
239 /* Device-specific fields */
240 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
241 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
242 uint32_t uVirtqPfn; /**< RW (driver writes queue page number) */
243 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
244 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
245 uint16_t uQueueNotify; /**< RO (offset into virtqueue; see spec) */
246 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
247 uint8_t fIsrStatus; /**< RW (driver writes ISR status, 0=reset) */
248#ifdef LEGACY_MSIX_SUPPORTED
249 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
250 uint16_t uMsixVector; /**< RW (driver sets MSI-X config vector) */
251#endif
252} VIRTIO_LEGACY_PCI_COMMON_CFG_T, *PVIRTIO_LEGACY_PCI_COMMON_CFG_T;
253
254/**
255 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
256 *
257 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
258 */
259typedef struct virtio_pci_common_cfg
260{
261 /* Device-specific fields */
262 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
263 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
264 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
265 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
266 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
267 uint16_t uNumVirtqs; /**< RO (device specifies max queues) */
268 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
269 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
270
271 /* Virtq-specific fields (values reflect (via MMIO) info related to queue indicated by uVirtqSelect. */
272 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
273 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
274 uint16_t uMsixVector; /**< RW (driver selects MSI-X queue vector) */
275 uint16_t uEnable; /**< RW (driver controls usability of queue) */
276 uint16_t uNotifyOffset; /**< RO (offset into virtqueue; see spec) */
277 uint64_t GCPhysVirtqDesc; /**< RW (driver writes desc table phys addr) */
278 uint64_t GCPhysVirtqAvail; /**< RW (driver writes avail ring phys addr) */
279 uint64_t GCPhysVirtqUsed; /**< RW (driver writes used ring phys addr) */
280} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
281
282typedef struct virtio_pci_notify_cap
283{
284 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
285 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
286} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
287
288typedef struct virtio_pci_cfg_cap
289{
290 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
291 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
292} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
293
294/**
295 * PCI capability data locations (PCI CFG and MMIO).
296 */
297typedef struct VIRTIO_PCI_CAP_LOCATIONS_T
298{
299 uint16_t offMmio;
300 uint16_t cbMmio;
301 uint16_t offPci;
302 uint16_t cbPci;
303} VIRTIO_PCI_CAP_LOCATIONS_T;
304
305typedef struct VIRTQUEUE
306{
307 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
308 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
309 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) PhysAdr per-Q used structs GUEST */
310 uint16_t uMsixVector; /**< (MMIO) Per-queue vector for MSI-X GUEST */
311 uint16_t uEnable; /**< (MMIO) Per-queue enable GUEST */
312 uint16_t uNotifyOffset; /**< (MMIO) per-Q notify offset HOST */
313 uint16_t uQueueSize; /**< (MMIO) Per-queue size HOST/GUEST */
314 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */
315 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */
316 uint16_t uVirtq; /**< Index of this queue */
317 char szName[32]; /**< Dev-specific name of queue */
318 bool fUsedRingEvent; /**< Flags if used idx to notify guest reached */
319 uint8_t padding[3];
320} VIRTQUEUE, *PVIRTQUEUE;
321
322/**
323 * The core/common state of the VirtIO PCI devices, shared edition.
324 */
325typedef struct VIRTIOCORE
326{
327 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
328 PPDMDEVINS pDevInsR0; /**< Client device instance */
329 PPDMDEVINS pDevInsR3; /**< Client device instance */
330 VIRTQUEUE aVirtqueues[VIRTQ_MAX_COUNT]; /**< (MMIO) VirtIO contexts for queues */
331 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
332 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
333 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
334 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
335 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
336 uint8_t fDeviceStatus; /**< (MMIO) Device Status GUEST */
337 uint8_t fPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
338 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
339 uint16_t uQueueNotify; /**< Caches queue idx in legacy mode GUEST */
340 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
341 uint8_t uPciCfgDataOff; /**< Offset to PCI configuration data area */
342 uint8_t uISR; /**< Interrupt Status Register. */
343 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */
344 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */
345 uint32_t fLegacyDriver; /**< Set if guest driver < VirtIO 1.0 */
346
347 /** @name The locations of the capability structures in PCI config space and the BAR.
348 * @{ */
349 VIRTIO_PCI_CAP_LOCATIONS_T LocPciCfgCap; /**< VIRTIO_PCI_CFG_CAP_T */
350 VIRTIO_PCI_CAP_LOCATIONS_T LocNotifyCap; /**< VIRTIO_PCI_NOTIFY_CAP_T */
351 VIRTIO_PCI_CAP_LOCATIONS_T LocCommonCfgCap; /**< VIRTIO_PCI_CAP_T */
352 VIRTIO_PCI_CAP_LOCATIONS_T LocIsrCap; /**< VIRTIO_PCI_CAP_T */
353 VIRTIO_PCI_CAP_LOCATIONS_T LocDeviceCap; /**< VIRTIO_PCI_CAP_T + custom data. */
354 /** @} */
355
356
357
358 IOMMMIOHANDLE hMmioPciCap; /**< MMIO handle of PCI cap. region (\#2) */
359 IOMIOPORTHANDLE hLegacyIoPorts; /**< Handle of legacy I/O port range. */
360
361
362#ifdef VBOX_WITH_STATISTICS
363 /** @name Statistics
364 * @{ */
365 STAMCOUNTER StatDescChainsAllocated;
366 STAMCOUNTER StatDescChainsFreed;
367 STAMCOUNTER StatDescChainsSegsIn;
368 STAMCOUNTER StatDescChainsSegsOut;
369 STAMPROFILEADV StatReadR3; /** I/O port and MMIO R3 Read profiling */
370 STAMPROFILEADV StatReadR0; /** I/O port and MMIO R0 Read profiling */
371 STAMPROFILEADV StatReadRC; /** I/O port and MMIO R3 Read profiling */
372 STAMPROFILEADV StatWriteR3; /** I/O port and MMIO R3 Write profiling */
373 STAMPROFILEADV StatWriteR0; /** I/O port and MMIO R3 Write profiling */
374 STAMPROFILEADV StatWriteRC; /** I/O port and MMIO R3 Write profiling */
375#endif
376
377
378 /** @} */
379} VIRTIOCORE;
380
381#define MAX_NAME 64
382
383/**
384 * The core/common state of the VirtIO PCI devices, ring-3 edition.
385 */
386typedef struct VIRTIOCORER3
387{
388 /** @name Callbacks filled by the device before calling virtioCoreR3Init.
389 * @{ */
390 /**
391 * Implementation-specific client callback to report VirtIO version as modern or legacy.
392 * That's the only meaningful distinction in the VirtIO specification. Beyond that
393 * versioning is loosely discernable through feature negotiation. There will be two callbacks,
394 * the first indicates the guest driver is considered legacy VirtIO, as it is critical to
395 * assume that initially. A 2nd callback will occur during feature negotiation
396 * which will indicate the guest is modern, if the guest acknowledges VIRTIO_F_VERSION_1,
397 * feature, or legacy if the feature isn't negotiated. That 2nd callback allows
398 * the device-specific code to configure its behavior in terms of both guest version and features.
399 *
400 * @param pVirtio Pointer to the shared virtio state.
401 * @param fModern True if guest driver identified itself as modern (e.g. VirtIO 1.0 featured)
402 */
403 DECLCALLBACKMEMBER(void, pfnGuestVersionHandler,(PVIRTIOCORE pVirtio, uint32_t fModern));
404
405 /**
406 * Implementation-specific client callback to notify client of significant device status
407 * changes.
408 *
409 * @param pVirtio Pointer to the shared virtio state.
410 * @param pVirtioCC Pointer to the ring-3 virtio state.
411 * @param fDriverOk True if guest driver is okay (thus queues, etc... are
412 * valid)
413 */
414 DECLCALLBACKMEMBER(void, pfnStatusChanged,(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint32_t fDriverOk));
415
416 /**
417 * Implementation-specific client callback to access VirtIO Device-specific capabilities
418 * (other VirtIO capabilities and features are handled in VirtIO implementation)
419 *
420 * @param pDevIns The device instance.
421 * @param offCap Offset within device specific capabilities struct.
422 * @param pvBuf Buffer in which to save read data.
423 * @param cbToRead Number of bytes to read.
424 */
425 DECLCALLBACKMEMBER(int, pfnDevCapRead,(PPDMDEVINS pDevIns, uint32_t offCap, void *pvBuf, uint32_t cbToRead));
426
427 /**
428 * Implementation-specific client callback to access VirtIO Device-specific capabilities
429 * (other VirtIO capabilities and features are handled in VirtIO implementation)
430 *
431 * @param pDevIns The device instance.
432 * @param offCap Offset within device specific capabilities struct.
433 * @param pvBuf Buffer with the bytes to write.
434 * @param cbToWrite Number of bytes to write.
435 */
436 DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite));
437
438
439 /**
440 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
441 * that the avail queue has buffers, and this callback informs the client.
442 *
443 * @param pVirtio Pointer to the shared virtio state.
444 * @param pVirtioCC Pointer to the ring-3 virtio state.
445 * @param uVirtqNbr Index of the notified queue
446 */
447 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
448
449 /** @} */
450
451 R3PTRTYPE(PVIRTIO_PCI_CFG_CAP_T) pPciCfgCap; /**< Pointer to struct in PCI config area. */
452 R3PTRTYPE(PVIRTIO_PCI_NOTIFY_CAP_T) pNotifyCap; /**< Pointer to struct in PCI config area. */
453 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pCommonCfgCap; /**< Pointer to struct in PCI config area. */
454 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pIsrCap; /**< Pointer to struct in PCI config area. */
455 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pDeviceCap; /**< Pointer to struct in PCI config area. */
456
457 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
458 R3PTRTYPE(uint8_t *) pbDevSpecificCfg; /**< Pointer to client's struct */
459 R3PTRTYPE(uint8_t *) pbPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
460 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
461 char pcszMmioName[MAX_NAME]; /**< MMIO mapping name */
462 char pcszPortIoName[MAX_NAME]; /**< PORT mapping name */
463} VIRTIOCORER3;
464
465/**
466 * The core/common state of the VirtIO PCI devices, ring-0 edition.
467 */
468typedef struct VIRTIOCORER0
469{
470 /**
471 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
472 * that the avail queue has buffers, and this callback informs the client.
473 *
474 * @param pVirtio Pointer to the shared virtio state.
475 * @param pVirtioCC Pointer to the ring-3 virtio state.
476 * @param uVirtqNbr Index of the notified queue
477 */
478 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
479
480} VIRTIOCORER0;
481
482/**
483 * The core/common state of the VirtIO PCI devices, raw-mode edition.
484 */
485typedef struct VIRTIOCORERC
486{
487 uint64_t uUnusedAtTheMoment;
488} VIRTIOCORERC;
489
490
491/** @typedef VIRTIOCORECC
492 * The instance data for the current context. */
493typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC;
494
495
496/** @name API for VirtIO parent device
497 * @{ */
498
499/**
500 * Setup PCI device controller and Virtio state
501 *
502 * This should be called from PDMDEVREGR3::pfnConstruct.
503 *
504 * @param pDevIns The device instance.
505 * @param pVirtio Pointer to the shared virtio state. This
506 * must be the first member in the shared
507 * device instance data!
508 * @param pVirtioCC Pointer to the ring-3 virtio state. This
509 * must be the first member in the ring-3
510 * device instance data!
511 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
512 * @param pcszInstance Device instance name (format-specifier)
513 * @param fDevSpecificFeatures VirtIO device-specific features offered by
514 * client
515 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
516 * @param pvDevSpecificCfg Address of client's dev-specific
517 * configuration struct.
518 */
519int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
520 PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
521 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
522
523/**
524 * Initiate orderly reset procedure. This is an exposed API for clients that might need it.
525 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
526 *
527 * @param pVirtio Pointer to the virtio state.
528 */
529void virtioCoreResetAll(PVIRTIOCORE pVirtio);
530
531/**
532 * 'Attaches' host device-specific implementation's queue state to host VirtIO core
533 * virtqueue management infrastructure, informing the virtio core of the name of the
534 * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues
535 * in this API (and is opaquely the index into the VirtIO core's array of queue state).
536 *
537 * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each
538 * VirtIO device type).
539 *
540 * @param pVirtio Pointer to the shared virtio state.
541 * @param uVirtqNbr Virtq number
542 * @param pcszName Name to give queue
543 *
544 * @returns VBox status code.
545 */
546int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, const char *pcszName);
547
548/**
549 * Enables or disables a virtq
550 *
551 * @param pVirtio Pointer to the shared virtio state.
552 * @param uVirtqNbr Virtq number
553 * @param fEnable Flags whether to enable or disable the virtq
554 *
555 */
556void virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
557
558/**
559 * Enable or disable notification for the specified queue.
560 *
561 * With notification enabled, the guest driver notifies the host device (via MMIO
562 * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout")
563 * whenever the guest driver adds a new entry to the avail ring of the respective queue.
564 *
565 * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to
566 * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate
567 * to the device how to handle sending interrupts for the used ring.
568 *
569 * @param pVirtio Pointer to the shared virtio state.
570 * @param uVirtqNbr Virtq number
571 * @param fEnable Selects notification mode (enabled or disabled)
572 */
573void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
574
575/**
576 * Notifies guest (via ISR or MSI-X) of device configuration change
577 *
578 * @param pVirtio Pointer to the shared virtio state.
579 */
580void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio);
581
582/**
583 * Displays the VirtIO spec-related features offered and their accepted/declined status
584 * by both the VirtIO core and dev-specific device code (which invokes this function).
585 * The result is a comprehensive list of available features the VirtIO specification
586 * defines, which ones were actually offered by the device, and which ones were accepted
587 * by the guest driver, thus providing a legible summary view of the configuration
588 * the device is operating with.
589 *
590 * @param pVirtio Pointer to the shared virtio state.
591 * @param pHlp Pointer to the debug info hlp struct
592 * @param s_aDevSpecificFeatures
593 * Features specification lists for device-specific implementation
594 * (i.e: net controller, scsi controller ...)
595 * @param cFeatures Number of features in aDevSpecificFeatures
596 */
597void virtioCorePrintDeviceFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp,
598 const VIRTIO_FEATURES_LIST *aDevSpecificFeatures, int cFeatures);
599
600/*
601 * Debuging assist feature displays the state of the VirtIO core code, which includes
602 * an overview of the state of all of the queues.
603 *
604 * This can be invoked when running the VirtualBox debugger, or from the command line
605 * using the command: "VboxManage debugvm <VM name or id> info <device name> [args]"
606 *
607 * Example: VBoxManage debugvm myVnetVm info "virtio-net" help
608 *
609 * This is implemented currently to be invoked by the inheriting device-specific code
610 * (see DevVirtioNet for an example, which receives the debugvm callback directly).
611 * DevVirtioNet lists the available sub-options if no arguments are provided. In that
612 * example this virtq info related function is invoked hierarchically when virtio-net
613 * displays its device-specific queue info.
614 *
615 * @param pDevIns The device instance.
616 * @param pHlp Pointer to the debug info hlp struct
617 * @param pszArgs Arguments to function
618 */
619void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtqNbr);
620
621/**
622 * Returns the number of avail bufs in the virtq.
623 *
624 * @param pDevIns The device instance.
625 * @param pVirtio Pointer to the shared virtio state.
626 * @param uVirtqNbr Virtqueue to return the count of buffers available for.
627 */
628uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
629
630/**
631 * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume'
632 * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get
633 * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring,
634 * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip
635 * transaction by putting the descriptor on the used ring.
636 *
637 *
638 * @param pDevIns The device instance.
639 * @param pVirtio Pointer to the shared virtio state.
640 * @param uVirtqNbr Virtq number
641 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
642 * pre-processed transaction information pulled from the virtq.
643 *
644 * @returns VBox status code:
645 * @retval VINF_SUCCESS Success
646 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
647 * @retval VERR_NOT_AVAILABLE If the queue is empty.
648 */
649int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
650 PPVIRTQBUF ppVirtqBuf);
651
652/**
653 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of
654 * indicated queue, and converts the buf's s/g vectors into OUT (e.g. guest-to-host)
655 * components and and IN (host-to-guest) components.
656 *
657 * The caller is responsible for GCPhys to host virtual memory conversions. If the
658 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must
659 * be called and in that case virtioCoreR3VirtqUsedBufPut() must be called to
660 * complete the roundtrip virtq transaction.
661 *
662 * @param pDevIns The device instance.
663 * @param pVirtio Pointer to the shared virtio state.
664 * @param uVirtqNbr Virtq number
665 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
666 * pre-processed transaction information pulled from the virtq.
667 * Returned reference must be released by calling
668 * virtioCoreR3VirtqBufRelease().
669 * @param fRemove flags whether to remove desc chain from queue (false = peek)
670 *
671 * @returns VBox status code:
672 * @retval VINF_SUCCESS Success
673 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
674 * @retval VERR_NOT_AVAILABLE If the queue is empty.
675 */
676int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
677 PPVIRTQBUF ppVirtqBuf, bool fRemove);
678
679/**
680 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor
681 * chain into its OUT (to device) and IN (to guest) components.
682 *
683 * The caller is responsible for GCPhys to host virtual memory conversions and *must*
684 * return the virtq buffer using virtioCoreR3VirtqUsedBufPut() to complete the roundtrip
685 * virtq transaction.
686 * *
687 * @param pDevIns The device instance.
688 * @param pVirtio Pointer to the shared virtio state.
689 * @param uVirtqNbr Virtq number
690 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
691 * pre-processed transaction information pulled from the virtq.
692 * Returned reference must be released by calling
693 * virtioCoreR3VirtqBufRelease().
694 * @param fRemove flags whether to remove desc chain from queue (false = peek)
695 *
696 * @returns VBox status code:
697 * @retval VINF_SUCCESS Success
698 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
699 * @retval VERR_NOT_AVAILABLE If the queue is empty.
700 */
701int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
702 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf);
703
704/**
705 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(),
706 * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairs to complete each
707 * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the
708 * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always
709 * initiated by the guest and completed by the host. In other words, for the host to send any
710 * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring
711 * of the virtq.
712 *
713 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest,
714 * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since
715 * the last call to virtioCoreR3VirtqUsedRingSync()
716
717 * @note This does a write-ahead to the used ring of the guest's queue. The data
718 * written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
719 *
720 *
721 * @param pDevIns The device instance (for reading).
722 * @param pVirtio Pointer to the shared virtio state.
723 * @param uVirtqNbr Virtq number
724 *
725 * @param pSgVirtReturn Points to scatter-gather buffer of virtual memory
726 * segments the caller is returning to the guest.
727 *
728 * @param pVirtqBuf This contains the context of the scatter-gather
729 * buffer originally pulled from the queue.
730 *
731 * @param fFence If true, put up copy fence (memory barrier) after
732 * copying to guest phys. mem.
733 *
734 * @returns VBox status code.
735 * @retval VINF_SUCCESS Success
736 * @retval VERR_INVALID_STATE VirtIO not in ready state
737 * @retval VERR_NOT_AVAILABLE Virtq is empty
738 *
739 * @note This function will not release any reference to pVirtqBuf. The
740 * caller must take care of that.
741 */
742int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
743 PVIRTQBUF pVirtqBuf, bool fFence);
744/**
745 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek())
746 *
747 * @param pVirtio Pointer to the virtio state.
748 * @param uVirtqNbr Index of queue
749 */
750int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
751
752/**
753 * Checks to see if guest has acknowledged device's VIRTIO_F_VERSION_1 feature.
754 * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers
755 * may start using the device prematurely, as opposed to the rigorously sane protocol
756 * prescribed by the "modern" VirtIO spec. Early access implies a legacy driver.
757 * Therefore legacy mode is the assumption until feature negotiation.
758 *
759 * @param pVirtio Pointer to the virtio state.
760 */
761int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio);
762
763DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
764{
765 AssertPtr(pGcSgBuf);
766 Assert((cSegs > 0 && RT_VALID_PTR(paSegs)) || (!cSegs && !paSegs));
767 Assert(cSegs < (~(unsigned)0 >> 1));
768
769 pGcSgBuf->paSegs = paSegs;
770 pGcSgBuf->cSegs = (unsigned)cSegs;
771 pGcSgBuf->idxSeg = 0;
772 if (cSegs && paSegs)
773 {
774 pGcSgBuf->GCPhysCur = paSegs[0].GCPhys;
775 pGcSgBuf->cbSegLeft = paSegs[0].cbSeg;
776 }
777 else
778 {
779 pGcSgBuf->GCPhysCur = 0;
780 pGcSgBuf->cbSegLeft = 0;
781 }
782}
783
784DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
785{
786 size_t cbData;
787 RTGCPHYS pGcBuf;
788
789 /* Check that the S/G buffer has memory left. */
790 if (RT_LIKELY(pGcSgBuf->idxSeg < pGcSgBuf->cSegs && pGcSgBuf->cbSegLeft))
791 { /* likely */ }
792 else
793 {
794 *pcbData = 0;
795 return 0;
796 }
797
798 AssertMsg( pGcSgBuf->cbSegLeft <= 128 * _1M
799 && (RTGCPHYS)pGcSgBuf->GCPhysCur >= (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys
800 && (RTGCPHYS)pGcSgBuf->GCPhysCur + pGcSgBuf->cbSegLeft <=
801 (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys + pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg,
802 ("pGcSgBuf->idxSeg=%d pGcSgBuf->cSegs=%d pGcSgBuf->GCPhysCur=%p pGcSgBuf->cbSegLeft=%zd "
803 "pGcSgBuf->paSegs[%d].GCPhys=%p pGcSgBuf->paSegs[%d].cbSeg=%zd\n",
804 pGcSgBuf->idxSeg, pGcSgBuf->cSegs, pGcSgBuf->GCPhysCur, pGcSgBuf->cbSegLeft,
805 pGcSgBuf->idxSeg, pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys, pGcSgBuf->idxSeg,
806 pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg));
807
808 cbData = RT_MIN(*pcbData, pGcSgBuf->cbSegLeft);
809 pGcBuf = pGcSgBuf->GCPhysCur;
810 pGcSgBuf->cbSegLeft -= cbData;
811 if (!pGcSgBuf->cbSegLeft)
812 {
813 pGcSgBuf->idxSeg++;
814
815 if (pGcSgBuf->idxSeg < pGcSgBuf->cSegs)
816 {
817 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys;
818 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg;
819 }
820 *pcbData = cbData;
821 }
822 else
823 pGcSgBuf->GCPhysCur = pGcSgBuf->GCPhysCur + cbData;
824
825 return pGcBuf;
826}
827
828DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
829{
830 AssertPtrReturnVoid(pGcSgBuf);
831
832 pGcSgBuf->idxSeg = 0;
833 if (pGcSgBuf->cSegs)
834 {
835 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[0].GCPhys;
836 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[0].cbSeg;
837 }
838 else
839 {
840 pGcSgBuf->GCPhysCur = 0;
841 pGcSgBuf->cbSegLeft = 0;
842 }
843}
844
845DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
846{
847 AssertReturn(pGcSgBuf, 0);
848
849 size_t cbLeft = cbAdvance;
850 while (cbLeft)
851 {
852 size_t cbThisAdvance = cbLeft;
853 virtioCoreGCPhysChainGet(pGcSgBuf, &cbThisAdvance);
854 if (!cbThisAdvance)
855 break;
856
857 cbLeft -= cbThisAdvance;
858 }
859 return cbAdvance - cbLeft;
860}
861
862DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
863{
864 AssertReturn(pGcSgBuf, 0);
865 AssertPtrReturn(pcbSeg, 0);
866
867 if (!*pcbSeg)
868 *pcbSeg = pGcSgBuf->cbSegLeft;
869
870 return virtioCoreGCPhysChainGet(pGcSgBuf, pcbSeg);
871}
872
873DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf)
874{
875 size_t cb = 0;
876 unsigned i = pGcSgBuf->cSegs;
877 while (i-- > 0)
878 cb += pGcSgBuf->paSegs[i].cbSeg;
879 return cb;
880 }
881
882#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
883
884/**
885 * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys
886 *
887 * To be performant it is left to the caller to validate the size of the buffer with regard
888 * to data being pulled from it to avoid overruns/underruns.
889 *
890 * @param pVirtio Pointer to the shared virtio state.
891 * @param pVirtqBuf output: virtq buffer
892 * @param pv input: virtual memory buffer to receive bytes
893 * @param cb number of bytes to add to the s/g buffer.
894 */
895DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
896{
897 uint8_t *pb = (uint8_t *)pv;
898 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb);
899 while (cbLim)
900 {
901 size_t cbSeg = cbLim;
902 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg);
903 PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
904 pb += cbSeg;
905 cbLim -= cbSeg;
906 pVirtqBuf->cbPhysSend -= cbSeg;
907 }
908 LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n",
909 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
910 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn));
911}
912
913/**
914 * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory
915 *
916 * To be performant it is left to the caller to validate the size of the buffer with regard
917 * to data being pulled from it to avoid overruns/underruns.
918 *
919 * @param pVirtio Pointer to the shared virtio state.
920 * @param pVirtqBuf input: virtq buffer
921 * @param pv output: virtual memory buffer to receive bytes
922 * @param cb number of bytes to Drain from buffer
923 */
924DECLINLINE(void) virtioCoreR3VirtqBufDrain(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
925{
926 uint8_t *pb = (uint8_t *)pv;
927 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysSend, cb);
928 while (cbLim)
929 {
930 size_t cbSeg = cbLim;
931 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysSend, &cbSeg);
932 PDMDevHlpPCIPhysRead(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
933 pb += cbSeg;
934 cbLim -= cbSeg;
935 pVirtqBuf->cbPhysSend -= cbSeg;
936 }
937 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
938 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
939 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));
940}
941
942#undef VIRTQNAME
943
944/**
945 * Updates indicated virtq's "used ring" descriptor index to match "shadow" index that tracks
946 * pending buffers added to the used ring, thus exposing all the data added by virtioCoreR3VirtqUsedBufPut()
947 * to the "used ring" since the last virtioCoreVirtqUsedRingSync().
948 *
949 * This *must* be invoked after one or more virtioCoreR3VirtqUsedBufPut() calls to inform guest driver
950 * there is data in the queue. If enabled by guest, IRQ or MSI-X signalling will notify guest
951 * proactively, otherwise guest detects updates by polling. (see VirtIO 1.0, Section 2.4 "Virtqueues").
952 *
953 * @param pDevIns The device instance.
954 * @param pVirtio Pointer to the shared virtio state.
955 * @param uVirtqNbr Virtq number
956 *
957 * @returns VBox status code.
958 * @retval VINF_SUCCESS Success
959 * @retval VERR_INVALID_STATE VirtIO not in ready state
960 */
961int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
962
963/**
964 * Retains a reference to the given descriptor chain.
965 *
966 * @param pVirtqBuf The descriptor chain to reference.
967 *
968 * @returns New reference count.
969 * @retval UINT32_MAX on invalid parameter.
970 */
971uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf);
972
973/**
974 * Releases a reference to the given descriptor chain.
975 *
976 * @param pVirtio Pointer to the shared virtio state.
977 * @param pVirtqBuf The descriptor chain to reference. NULL is quietly
978 * ignored (returns 0).
979 * @returns New reference count.
980 * @retval 0 if freed or invalid parameter.
981 */
982uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf);
983
984/**
985 * Return queue enable state
986 *
987 * @param pVirtio Pointer to the virtio state.
988 * @param uVirtqNbr Virtq number.
989 *
990 * @returns true or false indicating queue is enabled or not.
991 */
992DECLINLINE(bool) virtioCoreIsVirtqEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
993{
994 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
995 if (pVirtio->fLegacyDriver)
996 return pVirtio->aVirtqueues[uVirtqNbr].GCPhysVirtqDesc != 0;
997 return pVirtio->aVirtqueues[uVirtqNbr].uEnable != 0;
998}
999
1000/**
1001 * Get name of queue, via uVirtqNbr, assigned during virtioCoreR3VirtqAttach()
1002 *
1003 * @param pVirtio Pointer to the virtio state.
1004 * @param uVirtqNbr Virtq number.
1005 *
1006 * @returns Pointer to read-only queue name.
1007 */
1008DECLINLINE(const char *) virtioCoreVirtqGetName(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
1009{
1010 Assert((size_t)uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
1011 return pVirtio->aVirtqueues[uVirtqNbr].szName;
1012}
1013
1014/**
1015 * Get the bitmask of features VirtIO is running with. This is called by the device-specific
1016 * VirtIO implementation to identify this device's operational configuration after features
1017 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating
1018 * to guest which features it supports, then guest accepting among those offered which features
1019 * it will enable. That becomes the agreement between the host and guest. The bitmask containing
1020 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init()
1021 * by the host side device-specific virtio implementation.
1022 *
1023 * @param pVirtio Pointer to the virtio state.
1024 *
1025 * @returns Features the guest driver has accepted, finalizing the operational features
1026 */
1027DECLINLINE(uint64_t) virtioCoreGetNegotiatedFeatures(PVIRTIOCORE pVirtio)
1028{
1029 return pVirtio->uDriverFeatures;
1030}
1031
1032/**
1033 * Get the the name of the VM state change associated with the enumeration variable
1034 *
1035 * @param enmState VM state (enumeration value)
1036 *
1037 * @returns associated text.
1038 */
1039const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
1040
1041/**
1042 * Debug assist code for any consumer that inherits VIRTIOCORE.
1043 * Log memory-mapped I/O input or output value.
1044 *
1045 * This is to be invoked by macros that assume they are invoked in functions with
1046 * the relevant arguments. (See Virtio_1_0.cpp).
1047 *
1048 * It is exposed via the API so inheriting device-specific clients can provide similar
1049 * logging capabilities for a consistent look-and-feel.
1050 *
1051 * @param pszFunc To avoid displaying this function's name via __FUNCTION__ or LogFunc()
1052 * @param pszMember Name of struct member
1053 * @param pv pointer to value
1054 * @param cb size of value
1055 * @param uOffset offset into member where value starts
1056 * @param fWrite True if write I/O
1057 * @param fHasIndex True if the member is indexed
1058 * @param idx The index if fHasIndex
1059 */
1060void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
1061 const void *pv, uint32_t cb, uint32_t uOffset,
1062 int fWrite, int fHasIndex, uint32_t idx);
1063
1064/**
1065 * Debug assist for any consumer
1066 *
1067 * Does a formatted hex dump using Log(()), recommend using VIRTIO_HEX_DUMP() macro to
1068 * control enabling of logging efficiently.
1069 *
1070 * @param pv pointer to buffer to dump contents of
1071 * @param cb count of characters to dump from buffer
1072 * @param uBase base address of per-row address prefixing of hex output
1073 * @param pszTitle Optional title. If present displays title that lists
1074 * provided text with value of cb to indicate VIRTQ_SIZE next to it.
1075 */
1076void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle);
1077
1078/**
1079 * Debug assist for any consumer device code
1080&
1081 * Do a hex dump of memory in guest physical context
1082 *
1083 * @param GCPhys pointer to buffer to dump contents of
1084 * @param cb count of characters to dump from buffer
1085 * @param uBase base address of per-row address prefixing of hex output
1086 * @param pszTitle Optional title. If present displays title that lists
1087 * provided text with value of cb to indicate size next to it.
1088 */
1089void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle);
1090
1091/**
1092 * The following API is functions identically to the similarly-named calls pertaining to the RTSGBUF
1093 */
1094
1095/**
1096 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
1097 *
1098 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of
1099 */
1100DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
1101{
1102 size_t cb = 0;
1103 unsigned i = pGcSgBuf->cSegs;
1104 while (i-- > 0)
1105 cb += pGcSgBuf->paSegs[i].cbSeg;
1106 return cb;
1107}
1108
1109/**
1110 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
1111 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
1112 * access functions can't be used. The following wrappers select the mem access method based on whether the
1113 * device is operating in legacy mode or not.
1114 */
1115DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
1116{
1117 int rc;
1118 if (virtioCoreIsLegacyMode(pVirtio))
1119 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
1120 else
1121 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
1122 return rc;
1123}
1124
1125DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1126{
1127 int rc;
1128 if (virtioCoreIsLegacyMode(pVirtio))
1129 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
1130 else
1131 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
1132 return rc;
1133}
1134
1135/** Misc VM and PDM boilerplate */
1136int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
1137int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
1138void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState);
1139void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC);
1140int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio);
1141const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
1142
1143/*
1144 * The following macros assist with handling/logging MMIO accesses to VirtIO dev-specific config area,
1145 * in a way that enhances code readability and debug logging consistency.
1146 *
1147 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1148 */
1149
1150#ifdef LOG_ENABLED
1151
1152# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess) \
1153 if (LogIs7Enabled()) { \
1154 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1155 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1156 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, false, 0); \
1157 }
1158
1159# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx) \
1160 if (LogIs7Enabled()) { \
1161 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1162 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1163 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, true, uIdx); \
1164 }
1165#else
1166# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uMbrOffset) do { } while (0)
1167# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uMbrOffset, uIdx) do { } while (0)
1168#endif
1169
1170DECLINLINE(bool) virtioCoreMatchMember(uint32_t uOffset, uint32_t cb, uint32_t uMemberOff,
1171 size_t uMemberSize, bool fSubFieldMatch)
1172{
1173 /* Test for 8-byte field (always accessed as two 32-bit components) */
1174 if (uMemberSize == 8)
1175 return (cb == sizeof(uint32_t)) && (uOffset == uMemberOff || uOffset == (uMemberOff + sizeof(uint32_t)));
1176
1177 if (fSubFieldMatch)
1178 return (uOffset >= uMemberOff) && (cb <= uMemberSize - (uOffset - uMemberOff));
1179
1180 /* Test for exact match */
1181 return (uOffset == uMemberOff) && (cb == uMemberSize);
1182}
1183
1184/**
1185 * Yields boolean true if uOffsetOfAccess falls within bytes of specified member of config struct
1186 */
1187#define VIRTIO_DEV_CONFIG_SUBMATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1188 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1189 RT_UOFFSETOF(tCfgStruct, member), \
1190 RT_SIZEOFMEMB(tCfgStruct, member), true /* fSubfieldMatch */)
1191
1192#define VIRTIO_DEV_CONFIG_MATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1193 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1194 RT_UOFFSETOF(tCfgStruct, member), \
1195 RT_SIZEOFMEMB(tCfgStruct, member), false /* fSubfieldMatch */)
1196
1197
1198
1199/**
1200 * Copy reads or copy writes specified member field of config struct (based on fWrite),
1201 * the memory described by cb and pv.
1202 *
1203 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1204 */
1205#define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1206 do \
1207 { \
1208 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1209 if (fWrite) \
1210 memcpy(((char *)&(pCfgStruct)->member) + uOffsetInMember, pv, cb); \
1211 else \
1212 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1213 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1214 } while(0)
1215
1216/**
1217 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1218 * The operation is a nop and logs error if implied parameter fWrite is true.
1219 *
1220 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1221 */
1222#define VIRTIO_DEV_CONFIG_ACCESS_READONLY(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1223 do \
1224 { \
1225 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1226 if (fWrite) \
1227 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1228 else \
1229 { \
1230 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1231 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1232 } \
1233 } while(0)
1234
1235/**
1236 * Copies into or out of specified member field of config struct (based on fWrite),
1237 * the memory described by cb and pv.
1238 *
1239 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1240 */
1241#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1242 do \
1243 { \
1244 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1245 if (fWrite) \
1246 memcpy(((char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, pv, cb); \
1247 else \
1248 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1249 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1250 } while(0)
1251
1252/**
1253 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1254 * The operation is a nop and logs error if implied parameter fWrite is true.
1255 *
1256 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1257 */
1258#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1259 do \
1260 { \
1261 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1262 if (fWrite) \
1263 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1264 else \
1265 { \
1266 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1267 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1268 } \
1269 } while(0)
1270
1271/** @} */
1272
1273/** @name API for VirtIO parent device
1274 * @{ */
1275
1276#endif /* !VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette