VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0_impl.h@ 80308

Last change on this file since 80308 was 80308, checked in by vboxsync, 5 years ago

Storage/DevVirtioSCSI.cpp: Some cleanup of r132764

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.2 KB
Line 
1/* $Id: Virtio_1_0_impl.h 80308 2019-08-15 19:48:27Z vboxsync $ $Revision: 80308 $ $Date: 2019-08-15 19:48:27 +0000 (Thu, 15 Aug 2019) $ $Author: vboxsync $ */
2/** @file
3 * Virtio_1_0_impl.h - Virtio Declarations
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
19#define VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include "Virtio_1_0.h"
25
26/** @name Saved state versions.
27 * The saved state version is changed if either common or any of specific
28 * parts are changed. That is, it is perfectly possible that the version
29 * of saved vnet state will increase as a result of change in vblk structure
30 * for example.
31 */
32#define VIRTIO_SAVEDSTATE_VERSION 1
33/** @} */
34
35#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
36
37#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
38#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
39#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
40#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
41
42#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( VIRTIO_F_EVENT_IDX ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
43
44#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
45#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
46#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
47#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
48
49/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
50
51#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
52
53/** Device Status field constants (from Virtio 1.0 spec) */
54#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
55#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
56#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
57#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
58#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
59#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
60
61/* @def Virtio Device PCI Capabilities type codes */
62#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
63#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
64#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
65#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
66#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
67
68#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
69
70/**
71 * The following is the PCI capability struct common to all VirtIO capability types
72 */
73typedef struct virtio_pci_cap
74{
75 /* All little-endian */
76 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
77 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
78 uint8_t uCapLen; /**< Generic PCI field: capability length */
79 uint8_t uCfgType; /**< Identifies the structure. */
80 uint8_t uBar; /**< Where to find it. */
81 uint8_t uPadding[3]; /**< Pad to full dword. */
82 uint32_t uOffset; /**< Offset within bar. (L.E.) */
83 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
84} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
85
86/**
87 * Local implementation's usage context of a queue (e.g. not part of VirtIO specification)
88 */
89typedef struct VIRTQ_SHADOW
90{
91 const char *pcszName[32]; /**< Dev-specific name of queue */
92 uint16_t uAvailIdx; /**< Consumer's position in avail ring */
93 uint16_t uUsedIdx; /**< Consumer's position in used ring */
94 bool fEventThresholdReached; /**< Don't lose track while queueing ahead */
95} VIRTQ_SHADOW_T, *PVIRTQ_SHADOW_T;
96
97/**
98 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
99 *
100 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
101 */
102typedef struct virtio_pci_common_cfg
103{
104 /* Per device fields */
105 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
106 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
107 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
108 uint32_t uDriverFeatures; /**< RW (driver accepts device features) */
109 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
110 uint16_t uNumQueues; /**< RO (device specifies max queues) */
111 uint8_t uDeviceStatus; /**< RW (driver writes device status, 0 resets)*/
112 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
113
114 /* Per virtqueue fields (as determined by uQueueSelect) */
115 uint16_t uQueueSelect; /**< RW (selects queue focus for these fields) */
116 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
117 uint16_t uQueueMsixVector; /**< RW (driver selects MSI-X queue vector) */
118 uint16_t uQueueEnable; /**< RW (driver controls usability of queue) */
119 uint16_t uQueueNotifyOff; /**< RO (offset uto virtqueue; see spec) */
120 uint64_t pGcPhysQueueDesc; /**< RW (driver writes desc table phys addr) */
121 uint64_t pGcPhysQueueAvail; /**< RW (driver writes avail ring phys addr) */
122 uint64_t pGcPhysQueueUsed; /**< RW (driver writes used ring phys addr) */
123} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
124
125typedef struct virtio_pci_notify_cap
126{
127 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
128 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
129} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
130
131typedef struct virtio_pci_cfg_cap
132{
133 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
134 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
135} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
136
137/** For ISR, spec says min. 1 byte. Diagram shows 32-bits, mostly reserved */
138typedef uint32_t VIRTIO_PCI_ISR_CAP_T, *PVIRTIO_PCI_ISR_CAP_T;
139
140/**
141 * The core (/common) state of the VirtIO PCI device
142 *
143 * @implements PDMILEDPORTS
144 */
145typedef struct VIRTIOSTATE
146{
147 PDMPCIDEV dev; /**< PCI device */
148 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
149
150 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3 */
151 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0 */
152 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC */
153
154 RTGCPHYS pGcPhysPciCapBase; /**< Pointer to MMIO mapped capability data */
155 RTGCPHYS pGcPhysCommonCfg; /**< Pointer to MMIO mapped capability data */
156 RTGCPHYS pGcPhysNotifyCap; /**< Pointer to MMIO mapped capability data */
157 RTGCPHYS pGcPhysIsrCap; /**< Pointer to MMIO mapped capability data */
158 RTGCPHYS pGcPhysDeviceCap; /**< Pointer to MMIO mapped capability data */
159
160 RTGCPHYS pGcPhysQueueDesc[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
161 RTGCPHYS pGcPhysQueueAvail[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
162 RTGCPHYS pGcPhysQueueUsed[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q used structs GUEST */
163 uint16_t uQueueNotifyOff[VIRTQ_MAX_CNT]; /**< (MMIO) per-Q notify offset HOST */
164 uint16_t uQueueMsixVector[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue vector for MSI-X GUEST */
165 uint16_t uQueueEnable[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue enable GUEST */
166 uint16_t uQueueSize[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue size HOST/GUEST */
167 uint16_t uQueueSelect; /**< (MMIO) queue selector GUEST */
168 uint16_t padding;
169 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
170 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
171 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
172 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
173 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
174 uint32_t uNumQueues; /**< (MMIO) Actual number of queues GUEST */
175 uint8_t uDeviceStatus; /**< (MMIO) Device Status GUEST */
176 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
177 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
178
179 VIRTQ_SHADOW_T virtqShadow[VIRTQ_MAX_CNT]; /**< Local impl-specific queue context */
180 VIRTIOCALLBACKS virtioCallbacks; /**< Callback vectors to client */
181
182 PFNPCICONFIGREAD pfnPciConfigReadOld; /**< Prev rd. cb. intercepting PCI Cfg I/O */
183 PFNPCICONFIGWRITE pfnPciConfigWriteOld; /**< Prev wr. cb. intercepting PCI Cfg I/O */
184
185 PVIRTIO_PCI_CFG_CAP_T pPciCfgCap; /**< Pointer to struct in configuration area */
186 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap; /**< Pointer to struct in configuration area */
187 PVIRTIO_PCI_CAP_T pCommonCfgCap; /**< Pointer to struct in configuration area */
188 PVIRTIO_PCI_CAP_T pIsrCap; /**< Pointer to struct in configuration area */
189 PVIRTIO_PCI_CAP_T pDeviceCap; /**< Pointer to struct in configuration area */
190
191 uint32_t cbDevSpecificCap; /**< Size of client's dev-specific config data */
192 void *pDevSpecificCap; /**< Pointer to client's struct */
193 void *pPrevDevSpecificCap; /**< Previous read dev-specific cfg of client */
194 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
195 uint8_t uPciCfgDataOff;
196 uint8_t uISR; /**< Interrupt Status Register. */
197
198} VIRTIOSTATE, *PVIRTIOSTATE;
199
200/** virtq related flags */
201#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
202#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
203#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
204
205#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
206#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
207
208/**
209 * virtq related structs
210 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
211 */
212typedef struct virtq_desc
213{
214 uint64_t pGcPhysBuf; /**< addr GC Phys. address of buffer */
215 uint32_t cb; /**< len Buffer length */
216 uint16_t fFlags; /**< flags Buffer specific flags */
217 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
218} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
219
220typedef struct virtq_avail
221{
222 uint16_t fFlags; /**< flags avail ring drv to dev flags */
223 uint16_t uDescIdx; /**< idx Index of next free ring slot */
224 uint16_t auRing[1]; /**< ring Ring: avail drv to dev bufs */
225 uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_NO_NOTIFY) */
226} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
227
228typedef struct virtq_used_elem
229{
230 uint32_t uDescIdx; /**< idx Start of used desc chain */
231 uint32_t cbElem; /**< len Total len of used desc chain */
232} VIRTQ_USED_ELEM_T;
233
234typedef struct virt_used
235{
236 uint16_t fFlags; /**< flags used ring host-to-guest flags */
237 uint16_t uDescIdx; /**< idx Index of next ring slot */
238 VIRTQ_USED_ELEM_T auRing[1]; /**< ring Ring: used dev to drv bufs */
239 uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_NO_NOTIFY) */
240} VIRTQ_USED_T, *PVIRTQ_USED_T;
241
242/**
243* This macro returns true if physical address and access length are within the mapped capability struct.
244*
245* Actual Parameters:
246* @oaram pPhysCapStruct - [input] Pointer to MMIO mapped capability struct
247* @param pCfgCap - [input] Pointer to capability in PCI configuration area
248* @param fMatched - [output] True if GCPhysAddr is within the physically mapped capability.
249*
250* Implied parameters:
251* @param GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
252* @param cb - [input, implied] Number of bytes to access
253*/
254#define MATCH_VIRTIO_CAP_STRUCT(pGcPhysCapData, pCfgCap, fMatched) \
255 bool fMatched = false; \
256 if (pGcPhysCapData && pCfgCap && GCPhysAddr >= (RTGCPHYS)pGcPhysCapData \
257 && GCPhysAddr < ((RTGCPHYS)pGcPhysCapData + ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
258 && cb <= ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
259 fMatched = true;
260
261/**
262 * This macro resolves to boolean true if uOffset matches a field offset and size exactly,
263 * (or if it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
264 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
265 *
266 * @param member - Member of VIRTIO_PCI_COMMON_CFG_T
267 * @param uOffset - Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
268 * @param cb - Implied parameter: Number of bytes to access
269 * @result - true or false
270 */
271#define MATCH_COMMON_CFG(member) \
272 (RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
273 && ( uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
274 || uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
275 && cb == sizeof(uint32_t)) \
276 || (uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
277 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member))
278
279#define LOG_COMMON_CFG_ACCESS(member) \
280 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
281 pv, cb, uIntraOff, fWrite, false, 0);
282
283#define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx) \
284 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
285 pv, cb, uIntraOff, fWrite, true, idx);
286
287#define COMMON_CFG_ACCESSOR(member) \
288 { \
289 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
290 if (fWrite) \
291 memcpy(((char *)&pVirtio->member) + uIntraOff, (const char *)pv, cb); \
292 else \
293 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
294 LOG_COMMON_CFG_ACCESS(member); \
295 }
296
297#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
298 { \
299 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
300 if (fWrite) \
301 memcpy(((char *)(pVirtio->member + idx)) + uIntraOff, (const char *)pv, cb); \
302 else \
303 memcpy((char *)pv, (const char *)(((char *)(pVirtio->member + idx)) + uIntraOff), cb); \
304 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
305 }
306
307#define COMMON_CFG_ACCESSOR_READONLY(member) \
308 { \
309 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
310 if (fWrite) \
311 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
312 else \
313 { \
314 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
315 LOG_COMMON_CFG_ACCESS(member); \
316 } \
317 }
318
319#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
320 { \
321 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
322 if (fWrite) \
323 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
324 else \
325 { \
326 memcpy((char *)pv, ((char *)(pVirtio->member + idx)) + uIntraOff, cb); \
327 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
328 } \
329 }
330
331/**
332 * Internal queue operations
333 */
334
335static int vqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld);
336static bool vqIsEmpty (PVIRTIOSTATE pVirtio, uint16_t qIdx);
337static void vqReset (PVIRTIOSTATE pVirtio, uint16_t qIdx);
338static void vqReadDesc (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc);
339static uint16_t vqReadAvailRingDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx);
340static uint16_t vqReadAvailDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
341static uint16_t vqReadAvailFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
342static uint16_t vqReadAvailUsedEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
343static void vqWriteUsedElem (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen);
344static void vqWriteUsedRingDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx);
345static uint16_t vqReadUsedDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
346static uint16_t vqReadUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
347static void vqWriteUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags);
348static uint16_t vqReadUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
349static void vqWriteUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx);
350
351DECLINLINE(int) vqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
352{
353 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
354}
355
356DECLINLINE(bool) vqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t qIdx)
357{
358 return vqReadAvailDescIdx(pVirtio, qIdx) == pVirtio->virtqShadow->uAvailIdx;
359}
360
361/**
362 * Accessor for virtq descspVirtio
363 */
364DECLINLINE(void) vqReadDesc(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
365{
366 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
367 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
368 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
369 pVirtio->pGcPhysQueueDesc[qIdx]
370 + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[qIdx]),
371 pDesc, sizeof(VIRTQ_DESC_T));
372}
373
374/**
375 * Accessors for virtq avail ring
376 */
377DECLINLINE(uint16_t) vqReadAvailRingDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx)
378{
379 uint16_t uDescIdx;
380 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
381 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
382 pVirtio->pGcPhysQueueAvail[qIdx]
383 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[qIdx]]),
384 &uDescIdx, sizeof(uDescIdx));
385 return uDescIdx;
386}
387
388DECLINLINE(uint16_t) vqReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
389{
390 uint16_t uDescIdx;
391 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
392 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
393 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uDescIdx),
394 &uDescIdx, sizeof(uDescIdx));
395 return uDescIdx;
396}
397
398DECLINLINE(uint16_t) vqReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
399{
400 uint16_t fFlags;
401 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
402 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
403 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
404 &fFlags, sizeof(fFlags));
405 return fFlags;
406}
407
408DECLINLINE(uint16_t) vqReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
409{
410 uint16_t uUsedEventIdx;
411 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
412 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
413 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
414 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[qIdx]]),
415 &uUsedEventIdx, sizeof(uUsedEventIdx));
416 return uUsedEventIdx;
417}
418
419/**
420 * Accessors for virtq used ring
421 */
422DECLINLINE(void) vqWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
423{
424 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
425 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
426 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
427 pVirtio->pGcPhysQueueUsed[qIdx]
428 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[usedIdx % pVirtio->uQueueSize[qIdx]]),
429 &elem, sizeof(elem));
430}
431
432DECLINLINE(void) vqWriteUsedRingDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx)
433{
434 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
435 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
436 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uDescIdx),
437 &uDescIdx, sizeof(uDescIdx));
438}
439
440DECLINLINE(uint16_t)vqReadUsedDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
441{
442 uint16_t uDescIdx;
443 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
444 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
445 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uDescIdx),
446 &uDescIdx, sizeof(uDescIdx));
447 return uDescIdx;
448}
449
450DECLINLINE(uint16_t) vqReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
451{
452 uint16_t fFlags;
453 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
454 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
455 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
456 &fFlags, sizeof(fFlags));
457 return fFlags;
458}
459
460DECLINLINE(void) vqWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags)
461{
462 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
463 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
464 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
465 &fFlags, sizeof(fFlags));
466}
467
468DECLINLINE(uint16_t) vqReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
469{
470 uint16_t uAvailEventIdx;
471 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
472 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
473 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
474 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
475 &uAvailEventIdx, sizeof(uAvailEventIdx));
476 return uAvailEventIdx;
477}
478
479DECLINLINE(void) vqWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx)
480{
481 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
482 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
483 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
484 pVirtio->pGcPhysQueueUsed[qIdx]
485 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
486 &uAvailEventIdx, sizeof(uAvailEventIdx));
487}
488
489/**
490 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic */
491DECLINLINE(void) virtioLogDeviceStatus( uint8_t status)
492{
493 if (status == 0)
494 Log(("RESET"));
495 else
496 {
497 int primed = 0;
498 if (status & VIRTIO_STATUS_ACKNOWLEDGE)
499 Log(("ACKNOWLEDGE", primed++));
500 if (status & VIRTIO_STATUS_DRIVER)
501 Log(("%sDRIVER", primed++ ? " | " : ""));
502 if (status & VIRTIO_STATUS_FEATURES_OK)
503 Log(("%sFEATURES_OK", primed++ ? " | " : ""));
504 if (status & VIRTIO_STATUS_DRIVER_OK)
505 Log(("%sDRIVER_OK", primed++ ? " | " : ""));
506 if (status & VIRTIO_STATUS_FAILED)
507 Log(("%sFAILED", primed++ ? " | " : ""));
508 if (status & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
509 Log(("%sNEEDS_RESET", primed++ ? " | " : ""));
510 }
511}
512
513static void vqReset (PVIRTIOSTATE pVirtio, uint16_t qIdx);
514static void vqDeviceNotified (PVIRTIOSTATE pVirtio, uint16_t qidx, uint16_t uDescIdx);
515static void vqNotifyDriver (PVIRTIOSTATE pVirtio, uint16_t qIdx);
516static int virtioRaiseInterrupt (PVIRTIOSTATE pVirtio, uint8_t uCause);
517static void virtioLowerInterrupt (PVIRTIOSTATE pVirtio);
518static int virtioCommonCfgAccessed (PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv);
519static void virtioGuestResetted (PVIRTIOSTATE pVirtio);
520
521static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
522static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
523static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
524static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass);
525
526#endif /* !VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette