VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.h@ 85415

Last change on this file since 85415 was 85415, checked in by vboxsync, 4 years ago

Network/DevVirtioNet_1_0.cpp: Fixed pause/resume/poweroff, added more support for multiqueue (MQ) handling (see bugref:8651, Comment 91), More little cleanup, comment fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.7 KB
Line 
1/* $Id: VirtioCore.h 85415 2020-07-22 14:44:19Z vboxsync $ */
2
3/** @file
4 * VirtioCore.h - Virtio Declarations
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#ifndef VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
20#define VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
21#ifndef RT_WITHOUT_PRAGMA_ONCE
22# pragma once
23#endif
24
25#include <iprt/ctype.h>
26#include <iprt/sg.h>
27
28#ifdef LOG_ENABLED
29# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) \
30 do { \
31 if (LogIsItEnabled(logLevel, LOG_GROUP)) \
32 virtioCoreHexDump((pv), (cb), (base), (title)); \
33 } while (0)
34#else
35# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0)
36#endif
37
38/** Pointer to the shared VirtIO state. */
39typedef struct VIRTIOCORE *PVIRTIOCORE;
40/** Pointer to the ring-3 VirtIO state. */
41typedef struct VIRTIOCORER3 *PVIRTIOCORER3;
42/** Pointer to the ring-0 VirtIO state. */
43typedef struct VIRTIOCORER0 *PVIRTIOCORER0;
44/** Pointer to the raw-mode VirtIO state. */
45typedef struct VIRTIOCORERC *PVIRTIOCORERC;
46/** Pointer to the instance data for the current context. */
47typedef CTX_SUFF(PVIRTIOCORE) PVIRTIOCORECC;
48
49#define VIRTIO_MAX_VIRTQ_NAME_SIZE 32 /**< Maximum length of a queue name */
50#define VIRTQ_MAX_ENTRIES 1024 /**< Max size (# desc elements) of a virtq */
51#define VIRTQ_MAX_COUNT 24 /**< Max queues we allow guest to create */
52#define VIRTIO_NOTIFY_OFFSET_MULTIPLIER 2 /**< VirtIO Notify Cap. MMIO config param */
53#define VIRTIO_REGION_PCI_CAP 2 /**< BAR for VirtIO Cap. MMIO (impl specific) */
54#define VIRTIO_REGION_MSIX_CAP 0 /**< Bar for MSI-X handling */
55
56
57/** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions,
58 * except they work with the data type GCPhys rather than void *
59 */
60typedef struct VIRTIOSGSEG /**< An S/G entry */
61{
62 RTGCPHYS GCPhys; /**< Pointer to the segment buffer */
63 size_t cbSeg; /**< Size of the segment buffer */
64} VIRTIOSGSEG;
65
66typedef VIRTIOSGSEG *PVIRTIOSGSEG, **PPVIRTIOSGSEG;
67typedef const VIRTIOSGSEG *PCVIRTIOSGSEG;
68
69typedef struct VIRTIOSGBUF
70{
71 PVIRTIOSGSEG paSegs; /**< Pointer to the scatter/gather array */
72 unsigned cSegs; /**< Number of segs in scatter/gather array */
73 unsigned idxSeg; /**< Current segment we are in */
74 RTGCPHYS GCPhysCur; /**< Ptr to byte within the current seg */
75 size_t cbSegLeft; /**< # of bytes left in the current segment */
76} VIRTIOSGBUF;
77
78typedef VIRTIOSGBUF *PVIRTIOSGBUF, **PPVIRTIOSGBUF;
79typedef const VIRTIOSGBUF *PCVIRTIOSGBUF;
80
81/**
82 * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described
83 * by the index of its head descriptor, which in optionally chains to another descriptor
84 * and so on.
85 *
86 * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host)
87 * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to
88 * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates
89 * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing
90 * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors
91 * to be filled with data on the host to return to theguest.
92 */
93typedef struct VIRTQBUF
94{
95 uint32_t u32Magic; /**< Magic value, VIRTQBUF_MAGIC. */
96 uint16_t uVirtq; /**< VirtIO index of associated virtq */
97 uint16_t pad;
98 uint32_t volatile cRefs; /**< Reference counter. */
99 uint32_t uHeadIdx; /**< Head idx of associated desc chain */
100 size_t cbPhysSend; /**< Total size of src buffer */
101 PVIRTIOSGBUF pSgPhysSend; /**< Phys S/G buf for data from guest */
102 size_t cbPhysReturn; /**< Total size of dst buffer */
103 PVIRTIOSGBUF pSgPhysReturn; /**< Phys S/G buf to store result for guest */
104
105 /** @name Internal (bird combined 5 allocations into a single), fingers off.
106 * @{ */
107 VIRTIOSGBUF SgBufIn;
108 VIRTIOSGBUF SgBufOut;
109 VIRTIOSGSEG aSegsIn[VIRTQ_MAX_ENTRIES];
110 VIRTIOSGSEG aSegsOut[VIRTQ_MAX_ENTRIES];
111 /** @} */
112} VIRTQBUF_T;
113
114/** Pointers to a Virtio descriptor chain. */
115typedef VIRTQBUF_T *PVIRTQBUF, **PPVIRTQBUF;
116
117/** Magic value for VIRTQBUF_T::u32Magic. */
118#define VIRTQBUF_MAGIC UINT32_C(0x19600219)
119
120typedef struct VIRTIOPCIPARAMS
121{
122 uint16_t uDeviceId; /**< PCI Cfg Device ID */
123 uint16_t uClassBase; /**< PCI Cfg Base Class */
124 uint16_t uClassSub; /**< PCI Cfg Subclass */
125 uint16_t uClassProg; /**< PCI Cfg Programming Interface Class */
126 uint16_t uSubsystemId; /**< PCI Cfg Card Manufacturer Vendor ID */
127 uint16_t uInterruptLine; /**< PCI Cfg Interrupt line */
128 uint16_t uInterruptPin; /**< PCI Cfg Interrupt pin */
129} VIRTIOPCIPARAMS, *PVIRTIOPCIPARAMS;
130
131#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
132#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
133#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
134#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
135#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
136
137#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
138
139#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
140#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
141#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
142#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
143
144/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
145
146#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
147
148/** Device Status field constants (from Virtio 1.0 spec) */
149#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
150#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
151#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
152#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
153#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
154#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
155
156typedef enum VIRTIOVMSTATECHANGED
157{
158 kvirtIoVmStateChangedInvalid = 0,
159 kvirtIoVmStateChangedReset,
160 kvirtIoVmStateChangedSuspend,
161 kvirtIoVmStateChangedPowerOff,
162 kvirtIoVmStateChangedResume,
163 kvirtIoVmStateChangedFor32BitHack = 0x7fffffff
164} VIRTIOVMSTATECHANGED;
165
166/** @def Virtio Device PCI Capabilities type codes */
167#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
168#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
169#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
170#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
171#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
172
173#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
174
175/**
176 * The following is the PCI capability struct common to all VirtIO capability types
177 */
178typedef struct virtio_pci_cap
179{
180 /* All little-endian */
181 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
182 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
183 uint8_t uCapLen; /**< Generic PCI field: capability length */
184 uint8_t uCfgType; /**< Identifies the structure. */
185 uint8_t uBar; /**< Where to find it. */
186 uint8_t uPadding[3]; /**< Pad to full dword. */
187 uint32_t uOffset; /**< Offset within bar. (L.E.) */
188 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
189} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
190
191/**
192 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
193 *
194 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
195 */
196typedef struct virtio_pci_common_cfg
197{
198 /* Device-specific fields */
199 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
200 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
201 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
202 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
203 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
204 uint16_t uNumVirtqs; /**< RO (device specifies max queues) */
205 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
206 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
207
208 /* Virtq-specific fields (values reflect (via MMIO) info related to queue indicated by uVirtqSelect. */
209 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
210 uint16_t uSize; /**< RW (queue size, 0 - 2^n) */
211 uint16_t uMsix; /**< RW (driver selects MSI-X queue vector) */
212 uint16_t uEnable; /**< RW (driver controls usability of queue) */
213 uint16_t uNotifyOffset; /**< RO (offset into virtqueue; see spec) */
214 uint64_t GCPhysVirtqDesc; /**< RW (driver writes desc table phys addr) */
215 uint64_t GCPhysVirtqAvail; /**< RW (driver writes avail ring phys addr) */
216 uint64_t GCPhysVirtqUsed; /**< RW (driver writes used ring phys addr) */
217} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
218
219typedef struct virtio_pci_notify_cap
220{
221 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
222 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
223} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
224
225typedef struct virtio_pci_cfg_cap
226{
227 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
228 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
229} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
230
231/**
232 * PCI capability data locations (PCI CFG and MMIO).
233 */
234typedef struct VIRTIO_PCI_CAP_LOCATIONS_T
235{
236 uint16_t offMmio;
237 uint16_t cbMmio;
238 uint16_t offPci;
239 uint16_t cbPci;
240} VIRTIO_PCI_CAP_LOCATIONS_T;
241
242typedef struct VIRTQUEUE
243{
244 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
245 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
246 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) PhysAdr per-Q used structs GUEST */
247 uint16_t uMsix; /**< (MMIO) Per-queue vector for MSI-X GUEST */
248 uint16_t uEnable; /**< (MMIO) Per-queue enable GUEST */
249 uint16_t uNotifyOffset; /**< (MMIO) per-Q notify offset HOST */
250 uint16_t uSize; /**< (MMIO) Per-queue size HOST/GUEST */
251 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */
252 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */
253 uint16_t uVirtq; /**< Index of this queue */
254 char szName[32]; /**< Dev-specific name of queue */
255 bool fUsedRingEvent; /**< Flags if used idx to notify guest reached */
256 uint8_t padding[3];
257} VIRTQUEUE, *PVIRTQUEUE;
258
259/**
260 * The core/common state of the VirtIO PCI devices, shared edition.
261 */
262typedef struct VIRTIOCORE
263{
264 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
265 PPDMDEVINS pDevInsR0; /**< Client device instance */
266 PPDMDEVINS pDevInsR3; /**< Client device instance */
267 VIRTQUEUE aVirtqueues[VIRTQ_MAX_COUNT]; /**< (MMIO) VirtIO contexts for queues */
268 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
269 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
270 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
271 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
272 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
273 uint8_t fDeviceStatus; /**< (MMIO) Device Status GUEST */
274 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
275 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
276
277 /** @name The locations of the capability structures in PCI config space and the BAR.
278 * @{ */
279 VIRTIO_PCI_CAP_LOCATIONS_T LocPciCfgCap; /**< VIRTIO_PCI_CFG_CAP_T */
280 VIRTIO_PCI_CAP_LOCATIONS_T LocNotifyCap; /**< VIRTIO_PCI_NOTIFY_CAP_T */
281 VIRTIO_PCI_CAP_LOCATIONS_T LocCommonCfgCap; /**< VIRTIO_PCI_CAP_T */
282 VIRTIO_PCI_CAP_LOCATIONS_T LocIsrCap; /**< VIRTIO_PCI_CAP_T */
283 VIRTIO_PCI_CAP_LOCATIONS_T LocDeviceCap; /**< VIRTIO_PCI_CAP_T + custom data. */
284 /** @} */
285
286 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */
287 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
288 uint8_t uPciCfgDataOff; /**< Offset to PCI configuration data area */
289 uint8_t uISR; /**< Interrupt Status Register. */
290 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */
291 /** The MMIO handle for the PCI capability region (\#2). */
292 IOMMMIOHANDLE hMmioPciCap;
293
294 /** @name Statistics
295 * @{ */
296 STAMCOUNTER StatDescChainsAllocated;
297 STAMCOUNTER StatDescChainsFreed;
298 STAMCOUNTER StatDescChainsSegsIn;
299 STAMCOUNTER StatDescChainsSegsOut;
300 /** @} */
301} VIRTIOCORE;
302
303#define MAX_NAME 64
304
305/**
306 * The core/common state of the VirtIO PCI devices, ring-3 edition.
307 */
308typedef struct VIRTIOCORER3
309{
310 /** @name Callbacks filled by the device before calling virtioCoreR3Init.
311 * @{ */
312 /**
313 * Implementation-specific client callback to notify client of significant device status
314 * changes.
315 *
316 * @param pVirtio Pointer to the shared virtio state.
317 * @param pVirtioCC Pointer to the ring-3 virtio state.
318 * @param fDriverOk True if guest driver is okay (thus queues, etc... are
319 * valid)
320 */
321 DECLCALLBACKMEMBER(void, pfnStatusChanged,(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint32_t fDriverOk));
322
323 /**
324 * Implementation-specific client callback to access VirtIO Device-specific capabilities
325 * (other VirtIO capabilities and features are handled in VirtIO implementation)
326 *
327 * @param pDevIns The device instance.
328 * @param offCap Offset within device specific capabilities struct.
329 * @param pvBuf Buffer in which to save read data.
330 * @param cbToRead Number of bytes to read.
331 */
332 DECLCALLBACKMEMBER(int, pfnDevCapRead,(PPDMDEVINS pDevIns, uint32_t offCap, void *pvBuf, uint32_t cbToRead));
333
334 /**
335 * Implementation-specific client ballback to access VirtIO Device-specific capabilities
336 * (other VirtIO capabilities and features are handled in VirtIO implementation)
337 *
338 * @param pDevIns The device instance.
339 * @param offCap Offset within device specific capabilities struct.
340 * @param pvBuf Buffer with the bytes to write.
341 * @param cbToWrite Number of bytes to write.
342 */
343 DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite));
344
345
346 /**
347 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
348 * that the avail queue has buffers, and this callback informs the client.
349 *
350 * @param pVirtio Pointer to the shared virtio state.
351 * @param pVirtioCC Pointer to the ring-3 virtio state.
352 * @param uVirtqNbr Index of the notified queue
353 */
354 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
355
356 /** @} */
357
358 R3PTRTYPE(PVIRTIO_PCI_CFG_CAP_T) pPciCfgCap; /**< Pointer to struct in PCI config area. */
359 R3PTRTYPE(PVIRTIO_PCI_NOTIFY_CAP_T) pNotifyCap; /**< Pointer to struct in PCI config area. */
360 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pCommonCfgCap; /**< Pointer to struct in PCI config area. */
361 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pIsrCap; /**< Pointer to struct in PCI config area. */
362 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pDeviceCap; /**< Pointer to struct in PCI config area. */
363
364 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
365 R3PTRTYPE(uint8_t *) pbDevSpecificCfg; /**< Pointer to client's struct */
366 R3PTRTYPE(uint8_t *) pbPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
367 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
368 char pcszMmioName[MAX_NAME]; /**< MMIO mapping name */
369} VIRTIOCORER3;
370
371/**
372 * The core/common state of the VirtIO PCI devices, ring-0 edition.
373 */
374typedef struct VIRTIOCORER0
375{
376 /**
377 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
378 * that the avail queue has buffers, and this callback informs the client.
379 *
380 * @param pVirtio Pointer to the shared virtio state.
381 * @param pVirtioCC Pointer to the ring-3 virtio state.
382 * @param uVirtqNbr Index of the notified queue
383 */
384 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
385
386} VIRTIOCORER0;
387
388/**
389 * The core/common state of the VirtIO PCI devices, raw-mode edition.
390 */
391typedef struct VIRTIOCORERC
392{
393 uint64_t uUnusedAtTheMoment;
394} VIRTIOCORERC;
395
396
397/** @typedef VIRTIOCORECC
398 * The instance data for the current context. */
399typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC;
400
401
402/** @name API for VirtIO parent device
403 * @{ */
404
405/**
406 * Setup PCI device controller and Virtio state
407 *
408 * This should be called from PDMDEVREGR3::pfnConstruct.
409 *
410 * @param pDevIns The device instance.
411 * @param pVirtio Pointer to the shared virtio state. This
412 * must be the first member in the shared
413 * device instance data!
414 * @param pVirtioCC Pointer to the ring-3 virtio state. This
415 * must be the first member in the ring-3
416 * device instance data!
417 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
418 * @param pcszInstance Device instance name (format-specifier)
419 * @param fDevSpecificFeatures VirtIO device-specific features offered by
420 * client
421 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
422 * @param pvDevSpecificCfg Address of client's dev-specific
423 * configuration struct.
424 */
425int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
426 PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
427 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
428
429/**
430 * Initiate orderly reset procedure. This is an exposed API for clients that might need it.
431 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
432 *
433 * @param pVirtio Pointer to the virtio state.
434 */
435void virtioCoreResetAll(PVIRTIOCORE pVirtio);
436
437/**
438 * 'Attaches' host device-specific implementation's queue state to host VirtIO core
439 * virtqueue management infrastructure, informing the virtio core of the name of the
440 * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues
441 * in this API (and is opaquely the index into the VirtIO core's array of queue state).
442 *
443 * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each
444 * VirtIO device type).
445 *
446 * @param pVirtio Pointer to the shared virtio state.
447 * @param uVirtqNbr Virtq number
448 * @param pcszName Name to give queue
449 *
450 * @returns VBox status code.
451 */
452int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, const char *pcszName);
453
454/**
455 * Enables or disables a virtq
456 *
457 * @param pVirtio Pointer to the shared virtio state.
458 * @param uVirtqNbr Virtq number
459 * @param fEnable Flags whether to enable or disable the virtq
460 *
461 */
462void virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
463
464/**
465 * Enable or disable notification for the specified queue.
466 *
467 * With notification enabled, the guest driver notifies the host device (via MMIO
468 * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout")
469 * whenever the guest driver adds a new entry to the avail ring of the respective queue.
470 *
471 * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to
472 * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate
473 * to the device how to handle sending interrupts for the used ring.
474 *
475 * @param pVirtio Pointer to the shared virtio state.
476 * @param uVirtqNbr Virtq number
477 * @param fEnable Selects notification mode (enabled or disabled)
478 */
479void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
480
481/**
482 * Notifies guest (via ISR or MSI-X) of device configuration change
483 *
484 * @param pVirtio Pointer to the shared virtio state.
485 */
486void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio);
487
488/**
489 * Displays the VirtIO spec-related features offered by the core component,
490 * as well as which features have been negotiated and accepted or declined by the guest driver,
491 * providing a summary view of the configuration the device is operating with.
492 *
493 * @param pVirtio Pointer to the shared virtio state.
494 * @param pHlp Pointer to the debug info hlp struct
495 */
496void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp);
497
498/*
499 * Debuging assist feature displays the state of the VirtIO core code, which includes
500 * an overview of the state of all of the queues.
501 *
502 * This can be invoked when running the VirtualBox debugger, or from the command line
503 * using the command: "VboxManage debugvm <VM name or id> info <device name> [args]"
504 *
505 * Example: VBoxManage debugvm myVnetVm info "virtio-net" help
506 *
507 * This is implemented currently to be invoked by the inheriting device-specific code
508 * (see DevVirtioNet for an example, which receives the debugvm callback directly).
509 * DevVirtioNet lists the available sub-options if no arguments are provided. In that
510 * example this virtq info related function is invoked hierarchically when virtio-net
511 * displays its device-specific queue info.
512 *
513 * @param pDevIns The device instance.
514 * @param pHlp Pointer to the debug info hlp struct
515 * @param pszArgs Arguments to function
516 */
517void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtqNbr);
518
519/**
520 * Returns the number of avail bufs in the virtq.
521 *
522 * @param pDevIns The device instance.
523 * @param pVirtio Pointer to the shared virtio state.
524 * @param uVirtqNbr Virtqueue to return the count of buffers available for.
525 */
526uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
527
528/**
529 * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume'
530 * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get
531 * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring,
532 * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip
533 * transaction by putting the descriptor on the used ring.
534 *
535 *
536 * @param pDevIns The device instance.
537 * @param pVirtio Pointer to the shared virtio state.
538 * @param uVirtqNbr Virtq number
539 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
540 * pre-processed transaction information pulled from the virtq.
541 *
542 * @returns VBox status code:
543 * @retval VINF_SUCCESS Success
544 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
545 * @retval VERR_NOT_AVAILABLE If the queue is empty.
546 */
547int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
548 PPVIRTQBUF ppVirtqBuf);
549
550/**
551 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of
552 * indicated queue, and converts the buf's s/g vectors into OUT (e.g. guest-to-host)
553 * components and and IN (host-to-guest) components.
554 *
555 * The caller is responsible for GCPhys to host virtual memory conversions. If the
556 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must
557 * be called and in that case virtioCoreR3VirtqUsedBufPut() must be called to
558 * complete the roundtrip virtq transaction.
559 *
560 * @param pDevIns The device instance.
561 * @param pVirtio Pointer to the shared virtio state.
562 * @param uVirtqNbr Virtq number
563 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
564 * pre-processed transaction information pulled from the virtq.
565 * Returned reference must be released by calling
566 * virtioCoreR3VirtqBufRelease().
567 * @param fRemove flags whether to remove desc chain from queue (false = peek)
568 *
569 * @returns VBox status code:
570 * @retval VINF_SUCCESS Success
571 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
572 * @retval VERR_NOT_AVAILABLE If the queue is empty.
573 */
574int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
575 PPVIRTQBUF ppVirtqBuf, bool fRemove);
576
577/**
578 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor
579 * chain into its OUT (to device) and IN (to guest) components.
580 *
581 * The caller is responsible for GCPhys to host virtual memory conversions and *must*
582 * return the virtq buffer using virtioCoreR3VirtqUsedBufPut() to complete the roundtrip
583 * virtq transaction.
584 * *
585 * @param pDevIns The device instance.
586 * @param pVirtio Pointer to the shared virtio state.
587 * @param uVirtqNbr Virtq number
588 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
589 * pre-processed transaction information pulled from the virtq.
590 * Returned reference must be released by calling
591 * virtioCoreR3VirtqBufRelease().
592 * @param fRemove flags whether to remove desc chain from queue (false = peek)
593 *
594 * @returns VBox status code:
595 * @retval VINF_SUCCESS Success
596 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
597 * @retval VERR_NOT_AVAILABLE If the queue is empty.
598 */
599int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
600 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf);
601
602/**
603 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(),
604 * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairs to complete each
605 * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the
606 * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always
607 * initiated by the guest and completed by the host. In other words, for the host to send any
608 * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring
609 * of the virtq.
610 *
611 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest,
612 * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since
613 * the last call to virtioCoreR3VirtqUsedRingSync()
614
615 * @note This does a write-ahead to the used ring of the guest's queue. The data
616 * written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
617 *
618 *
619 * @param pDevIns The device instance (for reading).
620 * @param pVirtio Pointer to the shared virtio state.
621 * @param uVirtqNbr Virtq number
622 *
623 * @param pSgVirtReturn Points to scatter-gather buffer of virtual memory
624 * segments the caller is returning to the guest.
625 *
626 * @param pVirtqBuf This contains the context of the scatter-gather
627 * buffer originally pulled from the queue.
628 *
629 * @param fFence If true, put up copy fence (memory barrier) after
630 * copying to guest phys. mem.
631 *
632 * @returns VBox status code.
633 * @retval VINF_SUCCESS Success
634 * @retval VERR_INVALID_STATE VirtIO not in ready state
635 * @retval VERR_NOT_AVAILABLE Virtq is empty
636 *
637 * @note This function will not release any reference to pVirtqBuf. The
638 * caller must take care of that.
639 */
640int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
641 PVIRTQBUF pVirtqBuf, bool fFence);
642/**
643 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek())
644 *
645 * @param pVirtio Pointer to the virtio state.
646 * @param uVirtqNbr Index of queue
647 */
648int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
649
650
651DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
652{
653 AssertPtr(pGcSgBuf);
654 Assert((cSegs > 0 && VALID_PTR(paSegs)) || (!cSegs && !paSegs));
655 Assert(cSegs < (~(unsigned)0 >> 1));
656
657 pGcSgBuf->paSegs = paSegs;
658 pGcSgBuf->cSegs = (unsigned)cSegs;
659 pGcSgBuf->idxSeg = 0;
660 if (cSegs && paSegs)
661 {
662 pGcSgBuf->GCPhysCur = paSegs[0].GCPhys;
663 pGcSgBuf->cbSegLeft = paSegs[0].cbSeg;
664 }
665 else
666 {
667 pGcSgBuf->GCPhysCur = 0;
668 pGcSgBuf->cbSegLeft = 0;
669 }
670}
671
672DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
673{
674 size_t cbData;
675 RTGCPHYS pGcBuf;
676
677 /* Check that the S/G buffer has memory left. */
678 if (RT_LIKELY(pGcSgBuf->idxSeg < pGcSgBuf->cSegs && pGcSgBuf->cbSegLeft))
679 { /* likely */ }
680 else
681 {
682 *pcbData = 0;
683 return 0;
684 }
685
686 AssertMsg( pGcSgBuf->cbSegLeft <= 128 * _1M
687 && (RTGCPHYS)pGcSgBuf->GCPhysCur >= (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys
688 && (RTGCPHYS)pGcSgBuf->GCPhysCur + pGcSgBuf->cbSegLeft <=
689 (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys + pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg,
690 ("pGcSgBuf->idxSeg=%d pGcSgBuf->cSegs=%d pGcSgBuf->GCPhysCur=%p pGcSgBuf->cbSegLeft=%zd "
691 "pGcSgBuf->paSegs[%d].GCPhys=%p pGcSgBuf->paSegs[%d].cbSeg=%zd\n",
692 pGcSgBuf->idxSeg, pGcSgBuf->cSegs, pGcSgBuf->GCPhysCur, pGcSgBuf->cbSegLeft,
693 pGcSgBuf->idxSeg, pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys, pGcSgBuf->idxSeg,
694 pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg));
695
696 cbData = RT_MIN(*pcbData, pGcSgBuf->cbSegLeft);
697 pGcBuf = pGcSgBuf->GCPhysCur;
698 pGcSgBuf->cbSegLeft -= cbData;
699 if (!pGcSgBuf->cbSegLeft)
700 {
701 pGcSgBuf->idxSeg++;
702
703 if (pGcSgBuf->idxSeg < pGcSgBuf->cSegs)
704 {
705 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys;
706 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg;
707 }
708 *pcbData = cbData;
709 }
710 else
711 pGcSgBuf->GCPhysCur = pGcSgBuf->GCPhysCur + cbData;
712
713 return pGcBuf;
714}
715
716DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
717{
718 AssertPtrReturnVoid(pGcSgBuf);
719
720 pGcSgBuf->idxSeg = 0;
721 if (pGcSgBuf->cSegs)
722 {
723 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[0].GCPhys;
724 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[0].cbSeg;
725 }
726 else
727 {
728 pGcSgBuf->GCPhysCur = 0;
729 pGcSgBuf->cbSegLeft = 0;
730 }
731}
732
733DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
734{
735 AssertReturn(pGcSgBuf, 0);
736
737 size_t cbLeft = cbAdvance;
738 while (cbLeft)
739 {
740 size_t cbThisAdvance = cbLeft;
741 virtioCoreGCPhysChainGet(pGcSgBuf, &cbThisAdvance);
742 if (!cbThisAdvance)
743 break;
744
745 cbLeft -= cbThisAdvance;
746 }
747 return cbAdvance - cbLeft;
748}
749
750DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
751{
752 AssertReturn(pGcSgBuf, 0);
753 AssertPtrReturn(pcbSeg, 0);
754
755 if (!*pcbSeg)
756 *pcbSeg = pGcSgBuf->cbSegLeft;
757
758 return virtioCoreGCPhysChainGet(pGcSgBuf, pcbSeg);
759}
760
761DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf)
762{
763 size_t cb = 0;
764 unsigned i = pGcSgBuf->cSegs;
765 while (i-- > 0)
766 cb += pGcSgBuf->paSegs[i].cbSeg;
767 return cb;
768 }
769
770#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
771
772/**
773 * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys
774 *
775 * To be performant it is left to the caller to validate the size of the buffer with regard
776 * to data being pulled from it to avoid overruns/underruns.
777 *
778 * @param pVirtio Pointer to the shared virtio state.
779 * @param pVirtqBuf output: virtq buffer
780 * @param pv input: virtual memory buffer to receive bytes
781 * @param cb number of bytes to add to the s/g buffer.
782 */
783DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
784{
785 uint8_t *pb = (uint8_t *)pv;
786 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb);
787 while (cbLim)
788 {
789 size_t cbSeg = cbLim;
790 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg);
791 PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
792 pb += cbSeg;
793 cbLim -= cbSeg;
794 pVirtqBuf->cbPhysSend -= cbSeg;
795 }
796 LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n",
797 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
798 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn));
799}
800
801/**
802 * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory
803 *
804 * To be performant it is left to the caller to validate the size of the buffer with regard
805 * to data being pulled from it to avoid overruns/underruns.
806 *
807 * @param pVirtio Pointer to the shared virtio state.
808 * @param pVirtqBuf input: virtq buffer
809 * @param pv output: virtual memory buffer to receive bytes
810 * @param cb number of bytes to Drain from buffer
811 */
812DECLINLINE(void) virtioCoreR3VirtqBufDrain(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
813{
814 uint8_t *pb = (uint8_t *)pv;
815 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysSend, cb);
816 while (cbLim)
817 {
818 size_t cbSeg = cbLim;
819 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysSend, &cbSeg);
820 PDMDevHlpPCIPhysRead(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
821 pb += cbSeg;
822 cbLim -= cbSeg;
823 pVirtqBuf->cbPhysSend -= cbSeg;
824 }
825 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
826 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
827 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));
828}
829
830#undef VIRTQNAME
831
832/**
833 * Updates indicated virtq's "used ring" descriptor index to match "shadow" index that tracks
834 * pending buffers added to the used ring, thus exposing all the data added by virtioCoreR3VirtqUsedBufPut()
835 * to the "used ring" since the last virtioCoreVirtqUsedRingSync().
836 *
837 * This *must* be invoked after one or more virtioCoreR3VirtqUsedBufPut() calls to inform guest driver
838 * there is data in the queue. If enabled by guest, IRQ or MSI-X signalling will notify guest
839 * proactively, otherwise guest detects updates by polling. (see VirtIO 1.0, Section 2.4 "Virtqueues").
840 *
841 * @param pDevIns The device instance.
842 * @param pVirtio Pointer to the shared virtio state.
843 * @param uVirtqNbr Virtq number
844 *
845 * @returns VBox status code.
846 * @retval VINF_SUCCESS Success
847 * @retval VERR_INVALID_STATE VirtIO not in ready state
848 */
849int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
850
851/**
852 * Retains a reference to the given descriptor chain.
853 *
854 * @param pVirtqBuf The descriptor chain to reference.
855 *
856 * @returns New reference count.
857 * @retval UINT32_MAX on invalid parameter.
858 */
859uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf);
860
861/**
862 * Releases a reference to the given descriptor chain.
863 *
864 * @param pVirtio Pointer to the shared virtio state.
865 * @param pVirtqBuf The descriptor chain to reference. NULL is quietly
866 * ignored (returns 0).
867 * @returns New reference count.
868 * @retval 0 if freed or invalid parameter.
869 */
870uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf);
871
872/**
873 * Return queue enable state
874 *
875 * @param pVirtio Pointer to the virtio state.
876 * @param uVirtqNbr Virtq number.
877 *
878 * @returns true or false indicating queue is enabled or not.
879 */
880DECLINLINE(bool) virtioCoreIsVirtqEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
881{
882 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
883 return pVirtio->aVirtqueues[uVirtqNbr].uEnable != 0;
884}
885
886/**
887 * Get name of queue, via uVirtqNbr, assigned during virtioCoreR3VirtqAttach()
888 *
889 * @param pVirtio Pointer to the virtio state.
890 * @param uVirtqNbr Virtq number.
891 *
892 * @returns Pointer to read-only queue name.
893 */
894DECLINLINE(const char *) virtioCoreVirtqGetName(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
895{
896 Assert((size_t)uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
897 return pVirtio->aVirtqueues[uVirtqNbr].szName;
898}
899
900/**
901 * Get the bitmask of features VirtIO is running with. This is called by the device-specific
902 * VirtIO implementation to identify this device's operational configuration after features
903 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating
904 * to guest which features it supports, then guest accepting among those offered which features
905 * it will enable. That becomes the agreement between the host and guest. The bitmask containing
906 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init()
907 * by the host side device-specific virtio implementation.
908 *
909 * @param pVirtio Pointer to the virtio state.
910 *
911 * @returns Features the guest driver has accepted, finalizing the operational features
912 */
913DECLINLINE(uint64_t) virtioCoreGetNegotiatedFeatures(PVIRTIOCORE pVirtio)
914{
915 return pVirtio->uDriverFeatures;
916}
917
918/**
919 * Get the the name of the VM state change associated with the enumeration variable
920 *
921 * @param enmState VM state (enumeration value)
922 *
923 * @returns associated text.
924 */
925const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
926
927/**
928 * Debug assist code for any consumer that inherits VIRTIOCORE.
929 * Log memory-mapped I/O input or output value.
930 *
931 * This is to be invoked by macros that assume they are invoked in functions with
932 * the relevant arguments. (See Virtio_1_0.cpp).
933 *
934 * It is exposed via the API so inheriting device-specific clients can provide similar
935 * logging capabilities for a consistent look-and-feel.
936 *
937 * @param pszFunc To avoid displaying this function's name via __FUNCTION__ or LogFunc()
938 * @param pszMember Name of struct member
939 * @param pv pointer to value
940 * @param cb size of value
941 * @param uOffset offset into member where value starts
942 * @param fWrite True if write I/O
943 * @param fHasIndex True if the member is indexed
944 * @param idx The index if fHasIndex
945 */
946void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
947 const void *pv, uint32_t cb, uint32_t uOffset,
948 int fWrite, int fHasIndex, uint32_t idx);
949
950/**
951 * Debug assist for any consumer
952 *
953 * Does a formatted hex dump using Log(()), recommend using VIRTIO_HEX_DUMP() macro to
954 * control enabling of logging efficiently.
955 *
956 * @param pv pointer to buffer to dump contents of
957 * @param cb count of characters to dump from buffer
958 * @param uBase base address of per-row address prefixing of hex output
959 * @param pszTitle Optional title. If present displays title that lists
960 * provided text with value of cb to indicate size next to it.
961 */
962void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle);
963
964/**
965 * Debug assist for any consumer device code
966&
967 * Do a hex dump of memory in guest physical context
968 *
969 * @param GCPhys pointer to buffer to dump contents of
970 * @param cb count of characters to dump from buffer
971 * @param uBase base address of per-row address prefixing of hex output
972 * @param pszTitle Optional title. If present displays title that lists
973 * provided text with value of cb to indicate size next to it.
974 */
975void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle);
976
977/**
978 * The following API is functions identically to the similarly-named calls pertaining to the RTSGBUF
979 */
980
981/**
982 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
983 *
984 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of
985 */
986DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
987{
988 size_t cb = 0;
989 unsigned i = pGcSgBuf->cSegs;
990 while (i-- > 0)
991 cb += pGcSgBuf->paSegs[i].cbSeg;
992 return cb;
993}
994
995
996/** Misc VM and PDM boilerplate */
997int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
998int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
999void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState);
1000void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC);
1001int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio);
1002const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
1003
1004/*
1005 * The following macros assist with handling/logging MMIO accesses to VirtIO dev-specific config area,
1006 * in a way that enhances code readability and debug logging consistency.
1007 *
1008 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1009 */
1010
1011#ifdef LOG_ENABLED
1012
1013# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess) \
1014 if (LogIs7Enabled()) { \
1015 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1016 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1017 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, false, 0); \
1018 }
1019
1020# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx) \
1021 if (LogIs7Enabled()) { \
1022 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1023 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1024 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, true, uIdx); \
1025 }
1026#else
1027# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uMbrOffset) do { } while (0)
1028# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uMbrOffset, uIdx) do { } while (0)
1029#endif
1030
1031DECLINLINE(bool) virtioCoreMatchMember(uint32_t uOffset, uint32_t cb, uint32_t uMemberOff,
1032 size_t uMemberSize, bool fSubFieldMatch)
1033{
1034 /* Test for 8-byte field (always accessed as two 32-bit components) */
1035 if (uMemberSize == 8)
1036 return (cb == sizeof(uint32_t)) && (uOffset == uMemberOff || uOffset == (uMemberOff + sizeof(uint32_t)));
1037
1038 if (fSubFieldMatch)
1039 return (uOffset >= uMemberOff) && (cb <= uMemberSize - (uOffset - uMemberOff));
1040
1041 /* Test for exact match */
1042 return (uOffset == uMemberOff) && (cb == uMemberSize);
1043}
1044
1045/**
1046 * Yields boolean true if uOffsetOfAccess falls within bytes of specified member of config struct
1047 */
1048#define VIRTIO_DEV_CONFIG_SUBMATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1049 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1050 RT_UOFFSETOF(tCfgStruct, member), \
1051 RT_SIZEOFMEMB(tCfgStruct, member), true /* fSubfieldMatch */)
1052
1053#define VIRTIO_DEV_CONFIG_MATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1054 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1055 RT_UOFFSETOF(tCfgStruct, member), \
1056 RT_SIZEOFMEMB(tCfgStruct, member), false /* fSubfieldMatch */)
1057
1058/**
1059 * Copy reads or copy writes specified member field of config struct (based on fWrite),
1060 * the memory described by cb and pv.
1061 *
1062 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1063 */
1064#define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1065 do \
1066 { \
1067 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1068 if (fWrite) \
1069 memcpy(((char *)&(pCfgStruct)->member) + uOffsetInMember, pv, cb); \
1070 else \
1071 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1072 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1073 } while(0)
1074
1075/**
1076 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1077 * The operation is a nop and logs error if implied parameter fWrite is true.
1078 *
1079 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1080 */
1081#define VIRTIO_DEV_CONFIG_ACCESS_READONLY(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1082 do \
1083 { \
1084 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1085 if (fWrite) \
1086 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1087 else \
1088 { \
1089 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1090 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1091 } \
1092 } while(0)
1093
1094/**
1095 * Copies into or out of specified member field of config struct (based on fWrite),
1096 * the memory described by cb and pv.
1097 *
1098 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1099 */
1100#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1101 do \
1102 { \
1103 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1104 if (fWrite) \
1105 memcpy(((char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, pv, cb); \
1106 else \
1107 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1108 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1109 } while(0)
1110
1111/**
1112 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1113 * The operation is a nop and logs error if implied parameter fWrite is true.
1114 *
1115 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1116 */
1117#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1118 do \
1119 { \
1120 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1121 if (fWrite) \
1122 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1123 else \
1124 { \
1125 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1126 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1127 } \
1128 } while(0)
1129
1130/** @} */
1131
1132/** @name API for VirtIO parent device
1133 * @{ */
1134
1135#endif /* !VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette