VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.h@ 92939

Last change on this file since 92939 was 92939, checked in by vboxsync, 3 years ago

Improve transitional behavior, and save/load exec code. Some Rx buffer handling code optimization for speed, and make it easier to understand and maintain. Add missing function comments and improve others. Try to make debug logging even clearer and more succinct. And any other miscellaneous small improvements I could find. See BugRef(8651) Comment #171

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.6 KB
Line 
1/* $Id: VirtioCore.h 92939 2021-12-15 15:51:28Z vboxsync $ */
2
3/** @file
4 * VirtioCore.h - Virtio Declarations
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#ifndef VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
20#define VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
21#ifndef RT_WITHOUT_PRAGMA_ONCE
22# pragma once
23#endif
24
25#include <iprt/ctype.h>
26#include <iprt/sg.h>
27#include <iprt/types.h>
28
29#ifdef LOG_ENABLED
30# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) \
31 do { \
32 if (LogIsItEnabled(logLevel, LOG_GROUP)) \
33 virtioCoreHexDump((pv), (cb), (base), (title)); \
34 } while (0)
35#else
36# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0)
37#endif
38
39/** Marks the start of the virtio saved state (just for sanity). */
40#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
41
42/** Pointer to the shared VirtIO state. */
43typedef struct VIRTIOCORE *PVIRTIOCORE;
44/** Pointer to the ring-3 VirtIO state. */
45typedef struct VIRTIOCORER3 *PVIRTIOCORER3;
46/** Pointer to the ring-0 VirtIO state. */
47typedef struct VIRTIOCORER0 *PVIRTIOCORER0;
48/** Pointer to the raw-mode VirtIO state. */
49typedef struct VIRTIOCORERC *PVIRTIOCORERC;
50/** Pointer to the instance data for the current context. */
51typedef CTX_SUFF(PVIRTIOCORE) PVIRTIOCORECC;
52
53#define VIRTIO_MAX_VIRTQ_NAME_SIZE 32 /**< Maximum length of a queue name */
54#define VIRTQ_SIZE 1024 /**< Max size (# entries) of a virtq */
55#define VIRTQ_MAX_COUNT 24 /**< Max queues we allow guest to create */
56#define VIRTIO_NOTIFY_OFFSET_MULTIPLIER 2 /**< VirtIO Notify Cap. MMIO config param */
57#define VIRTIO_REGION_LEGACY_IO 0 /**< BAR for VirtIO legacy drivers MBZ */
58#define VIRTIO_REGION_PCI_CAP 2 /**< BAR for VirtIO Cap. MMIO (impl specific) */
59#define VIRTIO_REGION_MSIX_CAP 0 /**< Bar for MSI-X handling */
60#define VIRTIO_PAGE_SIZE 4096 /**< Page size used by VirtIO specification */
61
62/**
63 * @todo Move the following virtioCoreGCPhysChain*() functions mimic the functionality of the related
64 * into some VirtualBox source tree common location and out of this code.
65 *
66 * They behave identically to the S/G utilities in the RT library, except they work with that
67 * GCPhys data type specifically instead of void *, to avoid potentially disastrous mismatch
68 * between sizeof(void *) and sizeof(GCPhys).
69 *
70 */
71typedef struct VIRTIOSGSEG /**< An S/G entry */
72{
73 RTGCPHYS GCPhys; /**< Pointer to the segment buffer */
74 size_t cbSeg; /**< Size of the segment buffer */
75} VIRTIOSGSEG;
76
77typedef VIRTIOSGSEG *PVIRTIOSGSEG, **PPVIRTIOSGSEG;
78typedef const VIRTIOSGSEG *PCVIRTIOSGSEG;
79
80typedef struct VIRTIOSGBUF
81{
82 PVIRTIOSGSEG paSegs; /**< Pointer to the scatter/gather array */
83 unsigned cSegs; /**< Number of segs in scatter/gather array */
84 unsigned idxSeg; /**< Current segment we are in */
85 RTGCPHYS GCPhysCur; /**< Ptr to byte within the current seg */
86 size_t cbSegLeft; /**< # of bytes left in the current segment */
87} VIRTIOSGBUF;
88
89typedef VIRTIOSGBUF *PVIRTIOSGBUF, **PPVIRTIOSGBUF;
90typedef const VIRTIOSGBUF *PCVIRTIOSGBUF;
91
92/**
93 * VirtIO buffers are descriptor chains (e.g. scatter-gather vectors). A VirtIO buffer is referred to by the index
94 * of its head descriptor. Each descriptor optionally chains to another descriptor, and so on.
95 *
96 * For any given descriptor, each length and GCPhys pair in the chain represents either an OUT segment (e.g. guest-to-host)
97 * or an IN segment (host-to-guest).
98 *
99 * A VIRTQBUF is created and retured from a call to to either virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet().
100 *
101 * Those functions consolidate the VirtIO descriptor chain into a single representation where:
102 *
103 * pSgPhysSend GCPhys s/g buffer containing all of the (VirtIO) OUT descriptors
104 * pSgPhysReturn GCPhys s/g buffer containing all of the (VirtIO) IN descriptors
105 *
106 * The OUT descriptors are data sent from guest to host (dev-specific commands and/or data)
107 * The IN are to be filled with data (converted to physical) on host, to be returned to guest
108 *
109 */
110typedef struct VIRTQBUF
111{
112 uint32_t u32Magic; /**< Magic value, VIRTQBUF_MAGIC. */
113 uint16_t uVirtq; /**< VirtIO index of associated virtq */
114 uint16_t pad;
115 uint32_t volatile cRefs; /**< Reference counter. */
116 uint32_t uHeadIdx; /**< Head idx of associated desc chain */
117 size_t cbPhysSend; /**< Total size of src buffer */
118 PVIRTIOSGBUF pSgPhysSend; /**< Phys S/G buf for data from guest */
119 size_t cbPhysReturn; /**< Total size of dst buffer */
120 PVIRTIOSGBUF pSgPhysReturn; /**< Phys S/G buf to store result for guest */
121
122 /** @name Internal (bird combined 5 allocations into a single), fingers off.
123 * @{ */
124 VIRTIOSGBUF SgBufIn;
125 VIRTIOSGBUF SgBufOut;
126 VIRTIOSGSEG aSegsIn[VIRTQ_SIZE];
127 VIRTIOSGSEG aSegsOut[VIRTQ_SIZE];
128 /** @} */
129} VIRTQBUF_T;
130
131/** Pointers to a Virtio descriptor chain. */
132typedef VIRTQBUF_T *PVIRTQBUF, **PPVIRTQBUF;
133
134/** Magic value for VIRTQBUF_T::u32Magic. */
135#define VIRTQBUF_MAGIC UINT32_C(0x19600219)
136
137typedef struct VIRTIOPCIPARAMS
138{
139 uint16_t uDeviceId; /**< PCI Cfg Device ID */
140 uint16_t uClassBase; /**< PCI Cfg Base Class */
141 uint16_t uClassSub; /**< PCI Cfg Subclass */
142 uint16_t uClassProg; /**< PCI Cfg Programming Interface Class */
143 uint16_t uSubsystemId; /**< PCI Cfg Card Manufacturer Vendor ID */
144 uint16_t uInterruptLine; /**< PCI Cfg Interrupt line */
145 uint16_t uInterruptPin; /**< PCI Cfg Interrupt pin */
146} VIRTIOPCIPARAMS, *PVIRTIOPCIPARAMS;
147
148
149/* Virtio Platform Independent Reserved Feature Bits (see 1.1 specification section 6) */
150
151#define VIRTIO_F_NOTIFY_ON_EMPTY RT_BIT_64(24) /**< Legacy feature: Force intr if no AVAIL */
152#define VIRTIO_F_ANY_LAYOUT RT_BIT_64(27) /**< Doc bug: Goes under two names in spec */
153#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
154#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
155#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
156#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
157#define VIRTIO_F_BAD_FEATURE RT_BIT_64(30) /**< QEMU kludge. UNUSED as of >= VirtIO 1.0 */
158#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
159#define VIRTIO_F_ACCESS_PLATFORM RT_BIT_64(33) /**< Funky guest mem access (VirtIO 1.1 NYI) */
160#define VIRTIO_F_RING_PACKED RT_BIT_64(34) /**< Packed Queue Layout (VirtIO 1.1 NYI) */
161#define VIRTIO_F_IN_ORDER RT_BIT_64(35) /**< Honor guest buf order (VirtIO 1.1 NYI) */
162#define VIRTIO_F_ORDER_PLATFORM RT_BIT_64(36) /**< Host mem access honored (VirtIO 1.1 NYI) */
163#define VIRTIO_F_SR_IOV RT_BIT_64(37) /**< Dev Single Root I/O virt (VirtIO 1.1 NYI) */
164#define VIRTIO_F_NOTIFICAITON_DATA RT_BIT_64(38) /**< Driver passes extra data (VirtIO 1.1 NYI) */
165
166typedef struct VIRTIO_FEATURES_LIST
167{
168 uint64_t fFeatureBit;
169 const char *pcszDesc;
170} VIRTIO_FEATURES_LIST, *PVIRTIO_FEATURES_LIST;
171
172static const VIRTIO_FEATURES_LIST s_aCoreFeatures[] =
173{
174 { VIRTIO_F_VERSION_1, " VERSION_1 Guest driver supports VirtIO specification V1.0+ (e.g. \"modern\")\n" },
175 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
176 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
177};
178
179#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
180#define VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED ( 0 ) /**< Only offered to legacy drivers */
181
182#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
183#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
184#define DEVICE_PCI_NETWORK_SUBSYSTEM 1 /**< Network Card, per VirtIO legacy spec. */
185#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
186#define DEVICE_PCI_REVISION_ID_VIRTIO 0 /**< VirtIO Modern Transitional driver rev MBZ */
187
188/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
189
190#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
191
192/** Device Status field constants (from Virtio 1.0 spec) */
193#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
194#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
195#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
196#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
197#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
198#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
199
200typedef enum VIRTIOVMSTATECHANGED
201{
202 kvirtIoVmStateChangedInvalid = 0,
203 kvirtIoVmStateChangedReset,
204 kvirtIoVmStateChangedSuspend,
205 kvirtIoVmStateChangedPowerOff,
206 kvirtIoVmStateChangedResume,
207 kvirtIoVmStateChangedFor32BitHack = 0x7fffffff
208} VIRTIOVMSTATECHANGED;
209
210/** @def Virtio Device PCI Capabilities type codes */
211#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
212#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
213#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
214#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
215#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
216
217#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
218
219/**
220 * The following is the PCI capability struct common to all VirtIO capability types
221 */
222typedef struct virtio_pci_cap
223{
224 /* All little-endian */
225 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
226 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
227 uint8_t uCapLen; /**< Generic PCI field: capability length */
228 uint8_t uCfgType; /**< Identifies the structure. */
229 uint8_t uBar; /**< Where to find it. */
230 uint8_t uPadding[3]; /**< Pad to full dword. */
231 uint32_t uOffset; /**< Offset within bar. (L.E.) */
232 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
233} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
234
235/**
236 * VirtIO Legacy Capabilities' related MMIO-mapped structs (see virtio-0.9.5 spec)
237 *
238 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
239 */
240typedef struct virtio_legacy_pci_common_cfg
241{
242 /* Device-specific fields */
243 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
244 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
245 uint32_t uVirtqPfn; /**< RW (driver writes queue page number) */
246 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
247 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
248 uint16_t uQueueNotify; /**< RO (offset into virtqueue; see spec) */
249 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
250 uint8_t fIsrStatus; /**< RW (driver writes ISR status, 0=reset) */
251#ifdef LEGACY_MSIX_SUPPORTED
252 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
253 uint16_t uMsixVector; /**< RW (driver sets MSI-X config vector) */
254#endif
255} VIRTIO_LEGACY_PCI_COMMON_CFG_T, *PVIRTIO_LEGACY_PCI_COMMON_CFG_T;
256
257/**
258 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
259 *
260 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
261 */
262typedef struct virtio_pci_common_cfg
263{
264 /* Device-specific fields */
265 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
266 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
267 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
268 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
269 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
270 uint16_t uNumVirtqs; /**< RO (device specifies max queues) */
271 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
272 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
273
274 /* Virtq-specific fields (values reflect (via MMIO) info related to queue indicated by uVirtqSelect. */
275 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
276 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
277 uint16_t uMsixVector; /**< RW (driver selects MSI-X queue vector) */
278 uint16_t uEnable; /**< RW (driver controls usability of queue) */
279 uint16_t uNotifyOffset; /**< RO (offset into virtqueue; see spec) */
280 uint64_t GCPhysVirtqDesc; /**< RW (driver writes desc table phys addr) */
281 uint64_t GCPhysVirtqAvail; /**< RW (driver writes avail ring phys addr) */
282 uint64_t GCPhysVirtqUsed; /**< RW (driver writes used ring phys addr) */
283} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
284
285typedef struct virtio_pci_notify_cap
286{
287 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
288 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
289} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
290
291typedef struct virtio_pci_cfg_cap
292{
293 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
294 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
295} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
296
297/**
298 * PCI capability data locations (PCI CFG and MMIO).
299 */
300typedef struct VIRTIO_PCI_CAP_LOCATIONS_T
301{
302 uint16_t offMmio;
303 uint16_t cbMmio;
304 uint16_t offPci;
305 uint16_t cbPci;
306} VIRTIO_PCI_CAP_LOCATIONS_T;
307
308typedef struct VIRTQUEUE
309{
310 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) Addr of virtq's desc ring GUEST */
311 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) Addr of virtq's avail ring GUEST */
312 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) Addr of virtq's used ring GUEST */
313 uint16_t uMsixVector; /**< (MMIO) MSI-X vector GUEST */
314 uint16_t uEnable; /**< (MMIO) Queue enable flag GUEST */
315 uint16_t uNotifyOffset; /**< (MMIO) Notification offset for queue HOST */
316 uint16_t uQueueSize; /**< (MMIO) Size of queue HOST/GUEST */
317 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */
318 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */
319 uint16_t uVirtq; /**< Index of this queue */
320 char szName[32]; /**< Dev-specific name of queue */
321 bool fUsedRingEvent; /**< Flags if used idx to notify guest reached */
322 bool fAttached; /**< Flags if dev-specific client attached */
323} VIRTQUEUE, *PVIRTQUEUE;
324
325/**
326 * The core/common state of the VirtIO PCI devices, shared edition.
327 */
328typedef struct VIRTIOCORE
329{
330 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
331 PPDMDEVINS pDevInsR0; /**< Client device instance */
332 PPDMDEVINS pDevInsR3; /**< Client device instance */
333 VIRTQUEUE aVirtqueues[VIRTQ_MAX_COUNT]; /**< (MMIO) VirtIO contexts for queues */
334 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
335 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
336 uint32_t fDriverFeaturesWritten; /**< (MMIO) Host features complete tracking */
337 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
338 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
339 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
340 uint8_t fDeviceStatus; /**< (MMIO) Device Status GUEST */
341 uint8_t fPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
342 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
343 uint16_t uQueueNotify; /**< Caches queue idx in legacy mode GUEST */
344 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
345 uint8_t uPciCfgDataOff; /**< Offset to PCI configuration data area */
346 uint8_t uISR; /**< Interrupt Status Register. */
347 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */
348 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */
349 uint32_t fLegacyDriver; /**< Set if guest drv < VirtIO 1.0 and allowed */
350 uint32_t fOfferLegacy; /**< Set at init call from dev-specific code */
351
352 /** @name The locations of the capability structures in PCI config space and the BAR.
353 * @{ */
354 VIRTIO_PCI_CAP_LOCATIONS_T LocPciCfgCap; /**< VIRTIO_PCI_CFG_CAP_T */
355 VIRTIO_PCI_CAP_LOCATIONS_T LocNotifyCap; /**< VIRTIO_PCI_NOTIFY_CAP_T */
356 VIRTIO_PCI_CAP_LOCATIONS_T LocCommonCfgCap; /**< VIRTIO_PCI_CAP_T */
357 VIRTIO_PCI_CAP_LOCATIONS_T LocIsrCap; /**< VIRTIO_PCI_CAP_T */
358 VIRTIO_PCI_CAP_LOCATIONS_T LocDeviceCap; /**< VIRTIO_PCI_CAP_T + custom data. */
359 /** @} */
360
361 IOMMMIOHANDLE hMmioPciCap; /**< MMIO handle of PCI cap. region (\#2) */
362 IOMIOPORTHANDLE hLegacyIoPorts; /**< Handle of legacy I/O port range. */
363
364#ifdef VBOX_WITH_STATISTICS
365 /** @name Statistics
366 * @{ */
367 STAMCOUNTER StatDescChainsAllocated;
368 STAMCOUNTER StatDescChainsFreed;
369 STAMCOUNTER StatDescChainsSegsIn;
370 STAMCOUNTER StatDescChainsSegsOut;
371 STAMPROFILEADV StatReadR3; /** I/O port and MMIO R3 Read profiling */
372 STAMPROFILEADV StatReadR0; /** I/O port and MMIO R0 Read profiling */
373 STAMPROFILEADV StatReadRC; /** I/O port and MMIO R3 Read profiling */
374 STAMPROFILEADV StatWriteR3; /** I/O port and MMIO R3 Write profiling */
375 STAMPROFILEADV StatWriteR0; /** I/O port and MMIO R3 Write profiling */
376 STAMPROFILEADV StatWriteRC; /** I/O port and MMIO R3 Write profiling */
377#endif
378 /** @} */
379
380} VIRTIOCORE;
381
382#define MAX_NAME 64
383
384/**
385 * The core/common state of the VirtIO PCI devices, ring-3 edition.
386 */
387typedef struct VIRTIOCORER3
388{
389 /** @name Callbacks filled by the device before calling virtioCoreR3Init.
390 * @{ */
391 /**
392 * Implementation-specific client callback to report VirtIO when feature negotiation is
393 * complete. It should be invoked by the VirtIO core only once.
394 *
395 * @param pVirtio Pointer to the shared virtio state.
396 * @param fDriverFeatures Bitmask of features the guest driver has accepted/declined.
397 * @param fLegacy true if legacy mode offered and until guest driver identifies itself
398 * as modern(e.g. VirtIO 1.0 featured)
399 */
400 DECLCALLBACKMEMBER(void, pfnFeatureNegotiationComplete, (PVIRTIOCORE pVirtio, uint64_t fDriverFeatures, uint32_t fLegacy));
401
402 /**
403 * Implementation-specific client callback to notify client of significant device status
404 * changes.
405 *
406 * @param pVirtio Pointer to the shared virtio state.
407 * @param pVirtioCC Pointer to the ring-3 virtio state.
408 * @param fDriverOk True if guest driver is okay (thus queues, etc... are
409 * valid)
410 */
411 DECLCALLBACKMEMBER(void, pfnStatusChanged,(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint32_t fDriverOk));
412
413 /**
414 * Implementation-specific client callback to access VirtIO Device-specific capabilities
415 * (other VirtIO capabilities and features are handled in VirtIO implementation)
416 *
417 * @param pDevIns The device instance.
418 * @param offCap Offset within device specific capabilities struct.
419 * @param pvBuf Buffer in which to save read data.
420 * @param cbToRead Number of bytes to read.
421 */
422 DECLCALLBACKMEMBER(int, pfnDevCapRead,(PPDMDEVINS pDevIns, uint32_t offCap, void *pvBuf, uint32_t cbToRead));
423
424 /**
425 * Implementation-specific client callback to access VirtIO Device-specific capabilities
426 * (other VirtIO capabilities and features are handled in VirtIO implementation)
427 *
428 * @param pDevIns The device instance.
429 * @param offCap Offset within device specific capabilities struct.
430 * @param pvBuf Buffer with the bytes to write.
431 * @param cbToWrite Number of bytes to write.
432 */
433 DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite));
434
435 /**
436 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
437 * that the avail queue has buffers, and this callback informs the client.
438 *
439 * @param pVirtio Pointer to the shared virtio state.
440 * @param pVirtioCC Pointer to the ring-3 virtio state.
441 * @param uVirtqNbr Index of the notified queue
442 */
443 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
444
445 /** @} */
446
447 R3PTRTYPE(PVIRTIO_PCI_CFG_CAP_T) pPciCfgCap; /**< Pointer to struct in PCI config area. */
448 R3PTRTYPE(PVIRTIO_PCI_NOTIFY_CAP_T) pNotifyCap; /**< Pointer to struct in PCI config area. */
449 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pCommonCfgCap; /**< Pointer to struct in PCI config area. */
450 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pIsrCap; /**< Pointer to struct in PCI config area. */
451 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pDeviceCap; /**< Pointer to struct in PCI config area. */
452
453 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
454 R3PTRTYPE(uint8_t *) pbDevSpecificCfg; /**< Pointer to client's struct */
455 R3PTRTYPE(uint8_t *) pbPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
456 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
457 char pcszMmioName[MAX_NAME]; /**< MMIO mapping name */
458 char pcszPortIoName[MAX_NAME]; /**< PORT mapping name */
459} VIRTIOCORER3;
460
461/**
462 * The core/common state of the VirtIO PCI devices, ring-0 edition.
463 */
464typedef struct VIRTIOCORER0
465{
466 /**
467 * This callback notifies the device-specific portion of this device implementation (if guest-to-host
468 * queue notifications are enabled), that the guest driver has notified the host (this device)
469 * that the VirtIO "avail" ring of a queue has some new s/g buffers added by the guest VirtIO driver.
470 *
471 * @param pVirtio Pointer to the shared virtio state.
472 * @param pVirtioCC Pointer to the ring-3 virtio state.
473 * @param uVirtqNbr Index of the notified queue
474 */
475 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
476
477} VIRTIOCORER0;
478
479/**
480 * The core/common state of the VirtIO PCI devices, raw-mode edition.
481 */
482typedef struct VIRTIOCORERC
483{
484 uint64_t uUnusedAtTheMoment;
485} VIRTIOCORERC;
486
487/** @typedef VIRTIOCORECC
488 * The instance data for the current context. */
489typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC;
490
491/** @name API for VirtIO parent device
492 * @{ */
493
494/**
495 * Setup PCI device controller and Virtio state
496 *
497 * This should be called from PDMDEVREGR3::pfnConstruct.
498 *
499 * @param pDevIns Device instance.
500 * @param pVirtio Pointer to the shared virtio state. This
501 * must be the first member in the shared
502 * device instance data!
503 * @param pVirtioCC Pointer to the ring-3 virtio state. This
504 * must be the first member in the ring-3
505 * device instance data!
506 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
507 * @param pcszInstance Device instance name (format-specifier)
508 * @param fDevSpecificFeatures VirtIO device-specific features offered by
509 * client
510 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
511 * @param pvDevSpecificCfg Address of client's dev-specific
512 * configuration struct.
513 */
514int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
515 PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
516 uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
517/**
518 * Initiate orderly reset procedure. This is an exposed API for clients that might need it.
519 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
520 *
521 * @param pVirtio Pointer to the virtio state.
522 */
523void virtioCoreResetAll(PVIRTIOCORE pVirtio);
524
525/**
526 * 'Attaches' host device-specific implementation's queue state to host VirtIO core
527 * virtqueue management infrastructure, informing the virtio core of the name of the
528 * queue to associate with the queue number.
529
530 * Note: uVirtqNbr (ordinal index) is used as the 'handle' for virtqs in this VirtioCore
531 * implementation's API (as an opaque selector into the VirtIO core's array of queues' states).
532 *
533 * Virtqueue numbers are actually VirtIO-specification defined device-specifically
534 * (i.e. they are unique within each VirtIO device type), but are in some cases scalable
535 * so only the pattern of queue numbers is defined by the spec and implementations may contain
536 * a self-determined plurality of queues.
537 *
538 * @param pVirtio Pointer to the shared virtio state.
539 * @param uVirtqNbr Virtq number
540 * @param pcszName Name to give queue
541 *
542 * @returns VBox status code.
543 */
544int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, const char *pcszName);
545
546/**
547 * Detaches host device-specific implementation's queue state from the host VirtIO core
548 * virtqueue management infrastructure, informing the VirtIO core that the queue is
549 * not utilized by the device-specific code.
550 *
551 * @param pVirtio Pointer to the shared virtio state.
552 * @param uVirtqNbr Virtq number
553 * @param pcszName Name to give queue
554 *
555 * @returns VBox status code.
556 */
557int virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
558
559/**
560 * Checks to see whether queue is attached to core.
561 *
562 * @param pVirtio Pointer to the shared virtio state.
563 * @param uVirtqNbr Virtq number
564 *
565 * Returns boolean true or false indicating whether dev-specific reflection
566 * of queue is attached to core.
567 */
568bool virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
569
570/**
571 * Checks to see whether queue is enabled.
572 *
573 * @param pVirtio Pointer to the shared virtio state.
574 * @param uVirtqNbr Virtq number
575 *
576 * Returns boolean true or false indicating core queue enable state.
577 * There is no API function to enable the queue, because the actual enabling is handled
578 * by the guest via MMIO.
579 *
580 * NOTE: Guest VirtIO driver's claim over this state is overridden (which violates VirtIO 1.0 spec
581 * in a carefully controlled manner) in the case where the queue MUST be disabled, due to observed
582 * control queue corruption (e.g. null GCPhys virtq base addr) while restoring legacy-only device's
583 * (DevVirtioNet.cpp) as a way to flag that the queue is unusable-as-saved and must to be removed.
584 * That is all handled in the load/save exec logic. Device reset could potentially, depending on
585 * parameters passed from host VirtIO device to guest VirtIO driver, result in guest re-establishing
586 * queue, except, in that situation, the queue operational state would be valid.
587 */
588bool virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
589
590/**
591 * Enable or disable notification for the specified queue.
592 *
593 * When queue notifications are enabled, the guest VirtIO driver notifies host VirtIO device
594 * (via MMIO, see VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") whenever guest driver adds
595 * a new s/g buffer to the "avail" ring of the queue.
596 *
597 * Note: VirtIO queue layout includes flags the device controls in "used" ring to inform guest
598 * driver if it should notify host of guest's buffer additions to the "avail" ring, and
599 * conversely, the guest driver sets flags in the "avail" ring to communicate to host device
600 * whether or not to interrupt guest when it adds buffers to used ring.
601 *
602 * @param pVirtio Pointer to the shared virtio state.
603 * @param uVirtqNbr Virtq number
604 * @param fEnable Selects notification mode (enabled or disabled)
605 */
606void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
607
608/**
609 * Notifies guest (via ISR or MSI-X) of device configuration change
610 *
611 * @param pVirtio Pointer to the shared virtio state.
612 */
613void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio);
614
615/**
616 * Displays a well-formated human-readable translation of otherwise inscrutable bitmasks
617 * that embody features VirtIO specification definitions, indicating: Totality of features
618 * that can be implemented by host and guest, which features were offered by the host, and
619 * which were actually accepted by the guest. It displays it as a summary view of the device's
620 * finalized operational state (host-guest negotiated architecture) in such a way that shows
621 * which options are available for implementing or enabling.
622 *
623 * The non-device-specific VirtIO features list are managed by core API (e.g. implied).
624 * Only dev-specific features must be passed as parameter.
625
626 * @param pVirtio Pointer to the shared virtio state.
627 * @param pHlp Pointer to the debug info hlp struct
628 * @param s_aDevSpecificFeatures Dev-specific features (virtio-net, virtio-scsi...)
629 * @param cFeatures Number of features in aDevSpecificFeatures
630 */
631void virtioCorePrintDeviceFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp,
632 const VIRTIO_FEATURES_LIST *aDevSpecificFeatures, int cFeatures);
633
634/*
635 * Debug-assist utility function to display state of the VirtIO core code, including
636 * an overview of the state of all of the queues.
637 *
638 * This can be invoked when running the VirtualBox debugger, or from the command line
639 * using the command: "VboxManage debugvm <VM name or id> info <device name> [args]"
640 *
641 * Example: VBoxManage debugvm myVnetVm info "virtio-net" help
642 *
643 * This is implemented currently to be invoked by the inheriting device-specific code
644 * (see the the VirtualBox virtio-net (VirtIO network controller device implementation)
645 * for an example of code that receive debugvm callback directly).
646 *
647 * DevVirtioNet lists available sub-options if no arguments are provided. In that
648 * example this virtq info related function is invoked hierarchically when virtio-net
649 * displays its device-specific queue info.
650 *
651 * @param pDevIns The device instance.
652 * @param pHlp Pointer to the debug info hlp struct
653 * @param pszArgs Arguments to function
654 */
655void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtqNbr);
656
657/**
658 * Returns the number of avail bufs in the virtq.
659 *
660 * @param pDevIns The device instance.
661 * @param pVirtio Pointer to the shared virtio state.
662 * @param uVirtqNbr Virtqueue to return the count of buffers available for.
663 */
664uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
665
666/**
667 * This function is identical to virtioCoreR3VirtqAvailBufGet(), *except* it doesn't consume
668 * peeked buffer from avail ring of the virtq. The function *becomes* identical to the
669 * virtioCoreR3VirtqAvailBufGet() only if virtioCoreR3VirtqAvailRingNext() is invoked to
670 * consume buf from the queue's avail ring, followed by invocation of virtioCoreR3VirtqUsedBufPut(),
671 * to hand host-processed buffer back to guest, which completes guest-initiated virtq buffer circuit.
672 *
673 * @param pDevIns The device instance.
674 * @param pVirtio Pointer to the shared virtio state.
675 * @param uVirtqNbr Virtq number
676 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
677 * pre-processed transaction information pulled from the virtq.
678 *
679 * @returns VBox status code:
680 * @retval VINF_SUCCESS Success
681 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
682 * @retval VERR_NOT_AVAILABLE If the queue is empty.
683 */
684int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
685 PPVIRTQBUF ppVirtqBuf);
686
687/**
688 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of
689 * indicated queue, separating the buf's s/g vectors into OUT (e.g. guest-to-host)
690 * components and and IN (host-to-guest) components.
691 *
692 * Caller is responsible for GCPhys to host virtual memory conversions. If the
693 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must
694 * be called, and after that virtioCoreR3VirtqUsedBufPut() must be called to
695 * complete the buffer transfer cycle with the guest.
696 *
697 * @param pDevIns The device instance.
698 * @param pVirtio Pointer to the shared virtio state.
699 * @param uVirtqNbr Virtq number
700 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
701 * pre-processed transaction information pulled from the virtq.
702 * Returned reference must be released by calling
703 * virtioCoreR3VirtqBufRelease().
704 * @param fRemove flags whether to remove desc chain from queue (false = peek)
705 *
706 * @returns VBox status code:
707 * @retval VINF_SUCCESS Success
708 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
709 * @retval VERR_NOT_AVAILABLE If the queue is empty.
710 */
711int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
712 PPVIRTQBUF ppVirtqBuf, bool fRemove);
713
714/**
715 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the
716 * descriptor chain into its OUT (to device) and IN (to guest) components.
717 *
718 * The caller is responsible for GCPhys to host virtual memory conversions and *must*
719 * return the virtq buffer using virtioCoreR3VirtqUsedBufPut() to complete the roundtrip
720 * virtq transaction.
721 * *
722 * @param pDevIns The device instance.
723 * @param pVirtio Pointer to the shared virtio state.
724 * @param uVirtqNbr Virtq number
725 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
726 * pre-processed transaction information pulled from the virtq.
727 * Returned reference must be released by calling
728 * virtioCoreR3VirtqBufRelease().
729 * @param fRemove flags whether to remove desc chain from queue (false = peek)
730 *
731 * @returns VBox status code:
732 * @retval VINF_SUCCESS Success
733 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
734 * @retval VERR_NOT_AVAILABLE If the queue is empty.
735 */
736int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
737 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf);
738
739/**
740 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(),
741 * (or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pair), to complete each
742 * buffer transfer transaction (guest-host buffer cycle), ultimately moving each descriptor chain
743 * from the avail ring of a queue onto the used ring of the queue. Note that VirtIO buffer
744 * transactions are *always* initiated by the guest and completed by the host. In other words,
745 * for the host to send any I/O related data to the guest (and in some cases configuration data),
746 * the guest must provide buffers via the virtq's avail ring, for the host to fill.
747 *
748 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest,
749 * completing all pending virtioCoreR3VirtqAvailBufPut() operations that have accumulated since
750 * the last call to virtioCoreR3VirtqUsedRingSync().
751
752 * @note This function effectively performs write-ahead to the used ring of the virtq.
753 * Data written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
754 *
755 * @param pDevIns The device instance (for reading).
756 * @param pVirtio Pointer to the shared virtio state.
757 * @param uVirtqNbr Virtq number
758 *
759 * @param pSgVirtReturn Points to scatter-gather buffer of virtual memory
760 * segments the caller is returning to the guest.
761 *
762 * @param pVirtqBuf This contains the context of the scatter-gather
763 * buffer originally pulled from the queue.
764 *
765 * @param fFence If true (default), put up copy-fence (memory barrier) after
766 * copying to guest phys. mem.
767 *
768 * @returns VBox status code.
769 * @retval VINF_SUCCESS Success
770 * @retval VERR_INVALID_STATE VirtIO not in ready state
771 * @retval VERR_NOT_AVAILABLE Virtq is empty
772 *
773 * @note This function will not release any reference to pVirtqBuf. The
774 * caller must take care of that.
775 */
776int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
777 PVIRTQBUF pVirtqBuf, bool fFence = true);
778
779
780/**
781 * Quicker variant of same-named function (directly above) that it overloads,
782 * Instead, this variant accepts as input a pointer to a buffer and count,
783 * instead of S/G buffer thus doesn't have to copy between two S/G buffers and avoids some overhead.
784 *
785 * @param pDevIns The device instance (for reading).
786 * @param pVirtio Pointer to the shared virtio state.
787 * @param uVirtqNbr Virtq number
788 * @param cb Number of bytes to add to copy to phys. buf.
789 * @param pv Virtual mem buf to copy to phys buf.
790 * @param cbEnqueue How many bytes in packet to enqueue (0 = don't enqueue)
791 * @param fFence If true (default), put up copy-fence (memory barrier) after
792 * copying to guest phys. mem.
793 *
794 * @returns VBox status code.
795 * @retval VINF_SUCCESS Success
796 * @retval VERR_INVALID_STATE VirtIO not in ready state
797 * @retval VERR_NOT_AVAILABLE Virtq is empty
798 *
799 * @note This function will not release any reference to pVirtqBuf. The
800 * caller must take care of that.
801 */
802int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, size_t cb, const void *pv,
803 PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence = true);
804
805
806/**
807 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek())
808 *
809 * @param pVirtio Pointer to the virtio state.
810 * @param uVirtqNbr Index of queue
811 */
812int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
813
814/**
815 * Checks to see if guest has accepted host device's VIRTIO_F_VERSION_1 (i.e. "modern")
816 * behavioral modeling, indicating guest agreed to comply with the modern VirtIO 1.0+ specification.
817 * Otherwise unavoidable presumption is that the host device is dealing with legacy VirtIO
818 * guest drive, thus must be prepared to cope with less mature architecture and behaviors
819 * from prototype era of VirtIO. (see comments in PDM-invoked device constructor for more information).
820 *
821 * @param pVirtio Pointer to the virtio state.
822 */
823int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio);
824
825/**
826 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
827 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
828 * access functions can't be used. The following wrappers select the memory access method based on whether the
829 * device is operating in legacy mode or not.
830 */
831DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
832{
833 int rc;
834 if (virtioCoreIsLegacyMode(pVirtio))
835 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
836 else
837 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
838 return rc;
839}
840
841DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
842{
843 int rc;
844 if (virtioCoreIsLegacyMode(pVirtio))
845 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
846 else
847 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
848 return rc;
849}
850
851/*
852 * (See comments for corresponding function in sg.h)
853 */
854DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
855{
856 AssertPtr(pGcSgBuf);
857 Assert((cSegs > 0 && RT_VALID_PTR(paSegs)) || (!cSegs && !paSegs));
858 Assert(cSegs < (~(unsigned)0 >> 1));
859
860 pGcSgBuf->paSegs = paSegs;
861 pGcSgBuf->cSegs = (unsigned)cSegs;
862 pGcSgBuf->idxSeg = 0;
863 if (cSegs && paSegs)
864 {
865 pGcSgBuf->GCPhysCur = paSegs[0].GCPhys;
866 pGcSgBuf->cbSegLeft = paSegs[0].cbSeg;
867 }
868 else
869 {
870 pGcSgBuf->GCPhysCur = 0;
871 pGcSgBuf->cbSegLeft = 0;
872 }
873}
874
875/*
876 * (See comments for corresponding function in sg.h)
877 */
878DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
879{
880 size_t cbData;
881 RTGCPHYS pGcBuf;
882
883 /* Check that the S/G buffer has memory left. */
884 if (RT_LIKELY(pGcSgBuf->idxSeg < pGcSgBuf->cSegs && pGcSgBuf->cbSegLeft))
885 { /* likely */ }
886 else
887 {
888 *pcbData = 0;
889 return 0;
890 }
891
892 AssertMsg( pGcSgBuf->cbSegLeft <= 128 * _1M
893 && (RTGCPHYS)pGcSgBuf->GCPhysCur >= (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys
894 && (RTGCPHYS)pGcSgBuf->GCPhysCur + pGcSgBuf->cbSegLeft <=
895 (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys + pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg,
896 ("pGcSgBuf->idxSeg=%d pGcSgBuf->cSegs=%d pGcSgBuf->GCPhysCur=%p pGcSgBuf->cbSegLeft=%zd "
897 "pGcSgBuf->paSegs[%d].GCPhys=%p pGcSgBuf->paSegs[%d].cbSeg=%zd\n",
898 pGcSgBuf->idxSeg, pGcSgBuf->cSegs, pGcSgBuf->GCPhysCur, pGcSgBuf->cbSegLeft,
899 pGcSgBuf->idxSeg, pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys, pGcSgBuf->idxSeg,
900 pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg));
901
902 cbData = RT_MIN(*pcbData, pGcSgBuf->cbSegLeft);
903 pGcBuf = pGcSgBuf->GCPhysCur;
904 pGcSgBuf->cbSegLeft -= cbData;
905 if (!pGcSgBuf->cbSegLeft)
906 {
907 pGcSgBuf->idxSeg++;
908
909 if (pGcSgBuf->idxSeg < pGcSgBuf->cSegs)
910 {
911 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys;
912 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg;
913 }
914 *pcbData = cbData;
915 }
916 else
917 pGcSgBuf->GCPhysCur = pGcSgBuf->GCPhysCur + cbData;
918
919 return pGcBuf;
920}
921
922/*
923 * (See comments for corresponding function in sg.h)
924 */
925DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
926{
927 AssertPtrReturnVoid(pGcSgBuf);
928
929 pGcSgBuf->idxSeg = 0;
930 if (pGcSgBuf->cSegs)
931 {
932 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[0].GCPhys;
933 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[0].cbSeg;
934 }
935 else
936 {
937 pGcSgBuf->GCPhysCur = 0;
938 pGcSgBuf->cbSegLeft = 0;
939 }
940}
941
942/*
943 * (See comments for corresponding function in sg.h)
944 */
945DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
946{
947 AssertReturn(pGcSgBuf, 0);
948
949 size_t cbLeft = cbAdvance;
950 while (cbLeft)
951 {
952 size_t cbThisAdvance = cbLeft;
953 virtioCoreGCPhysChainGet(pGcSgBuf, &cbThisAdvance);
954 if (!cbThisAdvance)
955 break;
956
957 cbLeft -= cbThisAdvance;
958 }
959 return cbAdvance - cbLeft;
960}
961
962/*
963 * (See comments for corresponding function in sg.h)
964 */
965DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
966{
967 AssertReturn(pGcSgBuf, 0);
968 AssertPtrReturn(pcbSeg, 0);
969
970 if (!*pcbSeg)
971 *pcbSeg = pGcSgBuf->cbSegLeft;
972
973 return virtioCoreGCPhysChainGet(pGcSgBuf, pcbSeg);
974}
975
976/**
977 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
978 *
979 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of
980 */
981DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
982{
983 size_t cb = 0;
984 unsigned i = pGcSgBuf->cSegs;
985 while (i-- > 0)
986 cb += pGcSgBuf->paSegs[i].cbSeg;
987 return cb;
988}
989
990/*
991 * (See comments for corresponding function in sg.h)
992 */
993DECLINLINE(size_t) virtioCoreGCPhysChainCalcLengthLeft(PVIRTIOSGBUF pGcSgBuf)
994{
995 size_t cb = pGcSgBuf->cbSegLeft;
996 unsigned i = pGcSgBuf->cSegs;
997 while (i-- > pGcSgBuf->idxSeg + 1)
998 cb += pGcSgBuf->paSegs[i].cbSeg;
999 return cb;
1000}
1001#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
1002
1003/**
1004 * Convert and append bytes from a virtual-memory simple buffer to VirtIO guest's
1005 * physical memory described by a buffer pulled form the avail ring of a virtq.
1006 *
1007 * @param pVirtio Pointer to the shared virtio state.
1008 * @param pVirtqBuf VirtIO buffer to fill
1009 * @param pv input: virtual memory buffer to receive bytes
1010 * @param cb number of bytes to add to the s/g buffer.
1011 */
1012DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
1013{
1014 uint8_t *pvBuf = (uint8_t *)pv;
1015 size_t cbRemain = cb, cbTotal = 0;
1016 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
1017 while (cbRemain)
1018 {
1019 uint32_t cbBounded = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain);
1020 Assert(cbBounded > 0);
1021 virtioCoreGCPhysWrite(pVirtio, CTX_SUFF(pVirtio->pDevIns), (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbBounded);
1022 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbBounded);
1023 pvBuf += cbBounded;
1024 cbRemain -= cbBounded;
1025 cbTotal += cbBounded;
1026 }
1027 LogFunc(("Appended %d bytes to guest phys buf [head: %u]. %d bytes unused in buf.)\n",
1028 cbTotal, pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pSgPhysReturn)));
1029}
1030
1031/**
1032 * Extract some bytes from of a virtq s/g buffer, converting them from GCPhys space to
1033 * to ordinary virtual memory (i.e. making data directly accessible to host device code)
1034 *
1035 * As a performance optimization, it is left to the caller to validate buffer size.
1036 *
1037 * @param pVirtio Pointer to the shared virtio state.
1038 * @param pVirtqBuf input: virtq buffer
1039 * @param pv output: virtual memory buffer to receive bytes
1040 * @param cb number of bytes to Drain from buffer
1041 */
1042DECLINLINE(void) virtioCoreR3VirtqBufDrain(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
1043{
1044 uint8_t *pb = (uint8_t *)pv;
1045 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysSend, cb);
1046 while (cbLim)
1047 {
1048 size_t cbSeg = cbLim;
1049 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysSend, &cbSeg);
1050 PDMDevHlpPCIPhysRead(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
1051 pb += cbSeg;
1052 cbLim -= cbSeg;
1053 pVirtqBuf->cbPhysSend -= cbSeg;
1054 }
1055 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
1056 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
1057 pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)));
1058}
1059
1060#undef VIRTQNAME
1061
1062/**
1063 * Updates indicated virtq's "used ring" descriptor index to match "shadow" index that tracks
1064 * pending buffers added to the used ring, thus exposing all the data added by virtioCoreR3VirtqUsedBufPut()
1065 * to the "used ring" since the last virtioCoreVirtqUsedRingSync().
1066 *
1067 * This *must* be invoked after one or more virtioCoreR3VirtqUsedBufPut() calls to inform guest driver
1068 * there is data in the queue. If enabled by guest, IRQ or MSI-X signalling will notify guest
1069 * proactively, otherwise guest detects updates by polling. (see VirtIO 1.0, Section 2.4 "Virtqueues").
1070 *
1071 * @param pDevIns The device instance.
1072 * @param pVirtio Pointer to the shared virtio state.
1073 * @param uVirtqNbr Virtq number
1074 *
1075 * @returns VBox status code.
1076 * @retval VINF_SUCCESS Success
1077 * @retval VERR_INVALID_STATE VirtIO not in ready state
1078 */
1079int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
1080
1081/**
1082 * Retains a reference to the given descriptor chain.
1083 *
1084 * @param pVirtqBuf The descriptor chain to reference.
1085 *
1086 * @returns New reference count.
1087 * @retval UINT32_MAX on invalid parameter.
1088 */
1089uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf);
1090
1091/**
1092 * Releases a reference to the given descriptor chain.
1093 *
1094 * @param pVirtio Pointer to the shared virtio state.
1095 * @param pVirtqBuf The descriptor chain to reference. NULL is quietly
1096 * ignored (returns 0).
1097 * @returns New reference count.
1098 * @retval 0 if freed or invalid parameter.
1099 */
1100uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf);
1101
1102/**
1103 * Return queue enable state
1104 *
1105 * @param pVirtio Pointer to the virtio state.
1106 * @param uVirtqNbr Virtq number.
1107 *
1108 * @returns true or false indicating queue is enabled or not.
1109 */
1110DECLINLINE(bool) virtioCoreIsVirtqEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
1111{
1112 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
1113 if (pVirtio->fLegacyDriver)
1114 return pVirtio->aVirtqueues[uVirtqNbr].GCPhysVirtqDesc != 0;
1115 return pVirtio->aVirtqueues[uVirtqNbr].uEnable != 0;
1116}
1117
1118/**
1119 * Get name of queue, via uVirtqNbr, assigned during virtioCoreR3VirtqAttach()
1120 *
1121 * @param pVirtio Pointer to the virtio state.
1122 * @param uVirtqNbr Virtq number.
1123 *
1124 * @returns Pointer to read-only queue name.
1125 */
1126DECLINLINE(const char *) virtioCoreVirtqGetName(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
1127{
1128 Assert((size_t)uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
1129 return pVirtio->aVirtqueues[uVirtqNbr].szName;
1130}
1131
1132/**
1133 * Get the bitmask of features VirtIO is running with. This is called by the device-specific
1134 * VirtIO implementation to identify this device's operational configuration after features
1135 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating
1136 * to guest which features it supports, then guest accepting from among the offered, which features
1137 * it will enable. That becomes the agreement between the host and guest. The bitmask containing
1138 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init()
1139 * by the host side device-specific virtio implementation.
1140 *
1141 * @param pVirtio Pointer to the virtio state.
1142 *
1143 * @returns Features the guest driver has accepted, finalizing the operational features
1144 */
1145DECLINLINE(uint64_t) virtioCoreGetNegotiatedFeatures(PVIRTIOCORE pVirtio)
1146{
1147 return pVirtio->uDriverFeatures;
1148}
1149
1150/**
1151 * Get name of the VM state change associated with the enumeration variable
1152 *
1153 * @param enmState VM state (enumeration value)
1154 *
1155 * @returns associated text.
1156 */
1157const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
1158
1159/**
1160 * Debug assist code for any consumer that inherits VIRTIOCORE.
1161 * Log memory-mapped I/O input or output value.
1162 *
1163 * This is to be invoked by macros that assume they are invoked in functions with
1164 * the relevant arguments. (See Virtio_1_0.cpp).
1165 *
1166 * It is exposed via the API so inheriting device-specific clients can provide similar
1167 * logging capabilities for a consistent look-and-feel.
1168 *
1169 * @param pszFunc To avoid displaying this function's name via __FUNCTION__ or LogFunc()
1170 * @param pszMember Name of struct member
1171 * @param pv pointer to value
1172 * @param cb size of value
1173 * @param uOffset offset into member where value starts
1174 * @param fWrite True if write I/O
1175 * @param fHasIndex True if the member is indexed
1176 * @param idx The index if fHasIndex
1177 */
1178void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
1179 const void *pv, uint32_t cb, uint32_t uOffset,
1180 int fWrite, int fHasIndex, uint32_t idx);
1181
1182/**
1183 * Debug assist for any consumer
1184 *
1185 * Does a formatted hex dump using Log(()), recommend using VIRTIO_HEX_DUMP() macro to
1186 * control enabling of logging efficiently.
1187 *
1188 * @param pv pointer to buffer to dump contents of
1189 * @param cb count of characters to dump from buffer
1190 * @param uBase base address of per-row address prefixing of hex output
1191 * @param pszTitle Optional title. If present displays title that lists
1192 * provided text with value of cb to indicate VIRTQ_SIZE next to it.
1193 */
1194void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle);
1195
1196/**
1197 * Debug assist for any consumer device code
1198 * Do a hex dump of memory in guest physical context
1199 *
1200 * @param GCPhys pointer to buffer to dump contents of
1201 * @param cb count of characters to dump from buffer
1202 * @param uBase base address of per-row address prefixing of hex output
1203 * @param pszTitle Optional title. If present displays title that lists
1204 * provided text with value of cb to indicate size next to it.
1205 */
1206void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle);
1207
1208/**
1209 * The following API is functions identically to the similarly-named calls pertaining to the RTSGBUF
1210 */
1211
1212/** Misc VM and PDM boilerplate */
1213int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues);
1214int virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues);
1215int virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta);
1216void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState);
1217void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC);
1218int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio);
1219const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
1220
1221/*
1222 * The following macros assist with handling/logging MMIO accesses to VirtIO dev-specific config area,
1223 * in a way that enhances code readability and debug logging consistency.
1224 *
1225 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1226 */
1227#ifdef LOG_ENABLED
1228
1229# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess) \
1230 if (LogIs7Enabled()) { \
1231 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1232 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1233 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, false, 0); \
1234 }
1235
1236# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx) \
1237 if (LogIs7Enabled()) { \
1238 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1239 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1240 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, true, uIdx); \
1241 }
1242#else
1243# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uMbrOffset) do { } while (0)
1244# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uMbrOffset, uIdx) do { } while (0)
1245#endif
1246
1247DECLINLINE(bool) virtioCoreMatchMember(uint32_t uOffset, uint32_t cb, uint32_t uMemberOff,
1248 size_t uMemberSize, bool fSubFieldMatch)
1249{
1250 /* Test for 8-byte field (always accessed as two 32-bit components) */
1251 if (uMemberSize == 8)
1252 return (cb == sizeof(uint32_t)) && (uOffset == uMemberOff || uOffset == (uMemberOff + sizeof(uint32_t)));
1253
1254 if (fSubFieldMatch)
1255 return (uOffset >= uMemberOff) && (cb <= uMemberSize - (uOffset - uMemberOff));
1256
1257 /* Test for exact match */
1258 return (uOffset == uMemberOff) && (cb == uMemberSize);
1259}
1260
1261/**
1262 * Yields boolean true if uOffsetOfAccess falls within bytes of specified member of config struct
1263 */
1264#define VIRTIO_DEV_CONFIG_SUBMATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1265 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1266 RT_UOFFSETOF(tCfgStruct, member), \
1267 RT_SIZEOFMEMB(tCfgStruct, member), true /* fSubfieldMatch */)
1268
1269#define VIRTIO_DEV_CONFIG_MATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1270 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1271 RT_UOFFSETOF(tCfgStruct, member), \
1272 RT_SIZEOFMEMB(tCfgStruct, member), false /* fSubfieldMatch */)
1273
1274
1275
1276/**
1277 * Copy reads or copy writes specified member field of config struct (based on fWrite),
1278 * the memory described by cb and pv.
1279 *
1280 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
1281 */
1282#define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1283 do \
1284 { \
1285 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1286 if (fWrite) \
1287 memcpy(((char *)&(pCfgStruct)->member) + uOffsetInMember, pv, cb); \
1288 else \
1289 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1290 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1291 } while(0)
1292
1293/**
1294 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1295 * The operation is a NOP, logging an error if an implied parameter, fWrite, is boolean true.
1296 *
1297 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1298 */
1299#define VIRTIO_DEV_CONFIG_ACCESS_READONLY(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1300 do \
1301 { \
1302 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1303 if (fWrite) \
1304 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1305 else \
1306 { \
1307 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1308 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1309 } \
1310 } while(0)
1311
1312/**
1313 * Copies into or out of specified member field of config struct (based on fWrite),
1314 * the memory described by cb and pv.
1315 *
1316 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
1317 */
1318#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1319 do \
1320 { \
1321 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1322 if (fWrite) \
1323 memcpy(((char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, pv, cb); \
1324 else \
1325 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1326 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1327 } while(0)
1328
1329/**
1330 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1331 * The operation is a nop and logs error if implied parameter fWrite is true.
1332 *
1333 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
1334 */
1335#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1336 do \
1337 { \
1338 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1339 if (fWrite) \
1340 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1341 else \
1342 { \
1343 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1344 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1345 } \
1346 } while(0)
1347
1348/** @} */
1349
1350/** @name API for VirtIO parent device
1351 * @{ */
1352
1353#endif /* !VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette