VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0_impl.h@ 80351

Last change on this file since 80351 was 80340, checked in by vboxsync, 6 years ago

Fixed error in MMIO handling of cfg gen check/increment. Seem to have resolved stack corruption issue caused by putting structs that allocated 1.5MB on the stack in temporary functions to test notification callback and test de-queing data. See bugref:9440 Comment #53 for more info

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.3 KB
Line 
1/* $Id: Virtio_1_0_impl.h 80340 2019-08-19 07:43:37Z vboxsync $ $Revision: 80340 $ $Date: 2019-08-19 07:43:37 +0000 (Mon, 19 Aug 2019) $ $Author: vboxsync $ */
2/** @file
3 * Virtio_1_0_impl.h - Virtio Declarations
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
19#define VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include "Virtio_1_0.h"
25
26/** @name Saved state versions.
27 * The saved state version is changed if either common or any of specific
28 * parts are changed. That is, it is perfectly possible that the version
29 * of saved vnet state will increase as a result of change in vblk structure
30 * for example.
31 */
32#define VIRTIO_SAVEDSTATE_VERSION 1
33/** @} */
34
35#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
36
37#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
38#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
39#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
40#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
41
42#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( VIRTIO_F_EVENT_IDX ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
43
44#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
45#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
46#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
47#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
48
49/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
50
51#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
52
53/** Device Status field constants (from Virtio 1.0 spec) */
54#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
55#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
56#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
57#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
58#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
59#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
60
61/* @def Virtio Device PCI Capabilities type codes */
62#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
63#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
64#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
65#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
66#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
67
68#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
69
70/**
71 * The following is the PCI capability struct common to all VirtIO capability types
72 */
73typedef struct virtio_pci_cap
74{
75 /* All little-endian */
76 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
77 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
78 uint8_t uCapLen; /**< Generic PCI field: capability length */
79 uint8_t uCfgType; /**< Identifies the structure. */
80 uint8_t uBar; /**< Where to find it. */
81 uint8_t uPadding[3]; /**< Pad to full dword. */
82 uint32_t uOffset; /**< Offset within bar. (L.E.) */
83 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
84} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
85
86/**
87 * Local implementation's usage context of a queue (e.g. not part of VirtIO specification)
88 */
89typedef struct VIRTQ_PROXY
90{
91 const char szName[32]; /**< Dev-specific name of queue */
92 PVIRTQ_BUF_VECTOR_T pBufVec; /**< Per-queue s/g data. Serialize access! */
93 uint16_t uAvailIdx; /**< Consumer's position in avail ring */
94 uint16_t uUsedIdx; /**< Consumer's position in used ring */
95 bool fEventThresholdReached; /**< Don't lose track while queueing ahead */
96} VIRTQ_PROXY_T, *PVIRTQ_PROXY_T;
97
98/**
99 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
100 *
101 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
102 */
103typedef struct virtio_pci_common_cfg
104{
105 /* Per device fields */
106 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
107 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
108 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
109 uint32_t uDriverFeatures; /**< RW (driver accepts device features) */
110 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
111 uint16_t uNumQueues; /**< RO (device specifies max queues) */
112 uint8_t uDeviceStatus; /**< RW (driver writes device status, 0 resets)*/
113 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
114
115 /* Per virtqueue fields (as determined by uQueueSelect) */
116 uint16_t uQueueSelect; /**< RW (selects queue focus for these fields) */
117 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
118 uint16_t uQueueMsixVector; /**< RW (driver selects MSI-X queue vector) */
119 uint16_t uQueueEnable; /**< RW (driver controls usability of queue) */
120 uint16_t uQueueNotifyOff; /**< RO (offset uto virtqueue; see spec) */
121 uint64_t pGcPhysQueueDesc; /**< RW (driver writes desc table phys addr) */
122 uint64_t pGcPhysQueueAvail; /**< RW (driver writes avail ring phys addr) */
123 uint64_t pGcPhysQueueUsed; /**< RW (driver writes used ring phys addr) */
124} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
125
126typedef struct virtio_pci_notify_cap
127{
128 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
129 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
130} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
131
132typedef struct virtio_pci_cfg_cap
133{
134 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
135 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
136} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
137
138
139/**
140 * The core (/common) state of the VirtIO PCI device
141 *
142 * @implements PDMILEDPORTS
143 */
144typedef struct VIRTIOSTATE
145{
146 PDMPCIDEV dev; /**< PCI device */
147 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
148 void * pClientContext; /**< Client callback returned on callbacks */
149
150 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3 */
151 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0 */
152 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC */
153
154 RTGCPHYS pGcPhysPciCapBase; /**< Pointer to MMIO mapped capability data */
155 RTGCPHYS pGcPhysCommonCfg; /**< Pointer to MMIO mapped capability data */
156 RTGCPHYS pGcPhysNotifyCap; /**< Pointer to MMIO mapped capability data */
157 RTGCPHYS pGcPhysIsrCap; /**< Pointer to MMIO mapped capability data */
158 RTGCPHYS pGcPhysDeviceCap; /**< Pointer to MMIO mapped capability data */
159
160 RTGCPHYS pGcPhysQueueDesc[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
161 RTGCPHYS pGcPhysQueueAvail[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
162 RTGCPHYS pGcPhysQueueUsed[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q used structs GUEST */
163 uint16_t uQueueNotifyOff[VIRTQ_MAX_CNT]; /**< (MMIO) per-Q notify offset HOST */
164 uint16_t uQueueMsixVector[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue vector for MSI-X GUEST */
165 uint16_t uQueueEnable[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue enable GUEST */
166 uint16_t uQueueSize[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue size HOST/GUEST */
167 uint16_t uQueueSelect; /**< (MMIO) queue selector GUEST */
168 uint16_t padding;
169 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
170 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
171 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
172 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
173 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
174 uint32_t uNumQueues; /**< (MMIO) Actual number of queues GUEST */
175 uint8_t uDeviceStatus; /**< (MMIO) Device Status GUEST */
176 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
177 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
178
179 VIRTQ_PROXY_T virtqProxy[VIRTQ_MAX_CNT]; /**< Local impl-specific queue context */
180 VIRTIOCALLBACKS virtioCallbacks; /**< Callback vectors to client */
181
182 PFNPCICONFIGREAD pfnPciConfigReadOld; /**< Prev rd. cb. intercepting PCI Cfg I/O */
183 PFNPCICONFIGWRITE pfnPciConfigWriteOld; /**< Prev wr. cb. intercepting PCI Cfg I/O */
184
185 PVIRTIO_PCI_CFG_CAP_T pPciCfgCap; /**< Pointer to struct in configuration area */
186 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap; /**< Pointer to struct in configuration area */
187 PVIRTIO_PCI_CAP_T pCommonCfgCap; /**< Pointer to struct in configuration area */
188 PVIRTIO_PCI_CAP_T pIsrCap; /**< Pointer to struct in configuration area */
189 PVIRTIO_PCI_CAP_T pDeviceCap; /**< Pointer to struct in configuration area */
190
191 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
192 void *pDevSpecificCfg; /**< Pointer to client's struct */
193 void *pPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
194 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
195 uint8_t uPciCfgDataOff;
196 uint8_t uISR; /**< Interrupt Status Register. */
197
198} VIRTIOSTATE, *PVIRTIOSTATE;
199
200/** virtq related flags */
201#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
202#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
203#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
204
205#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
206#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
207
208/**
209 * virtq related structs
210 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
211 */
212typedef struct virtq_desc
213{
214 uint64_t pGcPhysBuf; /**< addr GC Phys. address of buffer */
215 uint32_t cb; /**< len Buffer length */
216 uint16_t fFlags; /**< flags Buffer specific flags */
217 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
218} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
219
220typedef struct virtq_avail
221{
222 uint16_t fFlags; /**< flags avail ring drv to dev flags */
223 uint16_t uDescIdx; /**< idx Index of next free ring slot */
224 uint16_t auRing[1]; /**< ring Ring: avail drv to dev bufs */
225 uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_NO_NOTIFY) */
226} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
227
228typedef struct virtq_used_elem
229{
230 uint32_t uDescIdx; /**< idx Start of used desc chain */
231 uint32_t cbElem; /**< len Total len of used desc chain */
232} VIRTQ_USED_ELEM_T;
233
234typedef struct virt_used
235{
236 uint16_t fFlags; /**< flags used ring host-to-guest flags */
237 uint16_t uDescIdx; /**< idx Index of next ring slot */
238 VIRTQ_USED_ELEM_T auRing[1]; /**< ring Ring: used dev to drv bufs */
239 uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_NO_NOTIFY) */
240} VIRTQ_USED_T, *PVIRTQ_USED_T;
241
242/**
243* This macro returns true if physical address and access length are within the mapped capability struct.
244*
245* Actual Parameters:
246* @oaram pPhysCapStruct - [input] Pointer to MMIO mapped capability struct
247* @param pCfgCap - [input] Pointer to capability in PCI configuration area
248* @param fMatched - [output] True if GCPhysAddr is within the physically mapped capability.
249*
250* Implied parameters:
251* @param GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
252* @param cb - [input, implied] Number of bytes to access
253*/
254#define MATCH_VIRTIO_CAP_STRUCT(pGcPhysCapData, pCfgCap, fMatched) \
255 bool fMatched = false; \
256 if (pGcPhysCapData && pCfgCap && GCPhysAddr >= (RTGCPHYS)pGcPhysCapData \
257 && GCPhysAddr < ((RTGCPHYS)pGcPhysCapData + ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
258 && cb <= ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
259 fMatched = true;
260
261/**
262 * This macro resolves to boolean true if uOffset matches a field offset and size exactly,
263 * (or if it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
264 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
265 *
266 * @param member - Member of VIRTIO_PCI_COMMON_CFG_T
267 * @param uOffset - Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
268 * @param cb - Implied parameter: Number of bytes to access
269 * @result - true or false
270 */
271#define MATCH_COMMON_CFG(member) \
272 (RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
273 && ( uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
274 || uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
275 && cb == sizeof(uint32_t)) \
276 || (uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
277 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member))
278
279#define LOG_COMMON_CFG_ACCESS(member) \
280 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
281 pv, cb, uIntraOff, fWrite, false, 0);
282
283#define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx) \
284 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
285 pv, cb, uIntraOff, fWrite, true, idx);
286
287#define COMMON_CFG_ACCESSOR(member) \
288 { \
289 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
290 if (fWrite) \
291 memcpy(((char *)&pVirtio->member) + uIntraOff, (const char *)pv, cb); \
292 else \
293 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
294 LOG_COMMON_CFG_ACCESS(member); \
295 }
296
297#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
298 { \
299 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
300 if (fWrite) \
301 memcpy(((char *)(pVirtio->member + idx)) + uIntraOff, (const char *)pv, cb); \
302 else \
303 memcpy((char *)pv, (const char *)(((char *)(pVirtio->member + idx)) + uIntraOff), cb); \
304 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
305 }
306
307#define COMMON_CFG_ACCESSOR_READONLY(member) \
308 { \
309 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
310 if (fWrite) \
311 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
312 else \
313 { \
314 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
315 LOG_COMMON_CFG_ACCESS(member); \
316 } \
317 }
318
319#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
320 { \
321 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
322 if (fWrite) \
323 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
324 else \
325 { \
326 memcpy((char *)pv, ((char *)(pVirtio->member + idx)) + uIntraOff, cb); \
327 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
328 } \
329 }
330
331#define DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
332
333/**
334 * Internal queue operations
335 */
336
337static int vqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld);
338static bool vqIsEmpty (PVIRTIOSTATE pVirtio, uint16_t qIdx);
339static void vqReadDesc (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc);
340static uint16_t vqReadAvailRingDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx);
341static uint16_t vqReadAvailDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
342static uint16_t vqReadAvailFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
343static uint16_t vqReadAvailUsedEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
344static void vqWriteUsedElem (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen);
345static void vqWriteUsedRingDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx);
346static uint16_t vqReadUsedDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
347static uint16_t vqReadUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
348static void vqWriteUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags);
349static uint16_t vqReadUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
350static void vqWriteUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx);
351
352DECLINLINE(int) vqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
353{
354 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
355}
356
357DECLINLINE(bool) vqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t qIdx)
358{
359 return vqReadAvailDescIdx(pVirtio, qIdx) == pVirtio->virtqProxy->uAvailIdx;
360}
361
362/**
363 * Accessor for virtq descspVirtio
364 */
365DECLINLINE(void) vqReadDesc(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
366{
367 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
368 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
369 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
370 pVirtio->pGcPhysQueueDesc[qIdx]
371 + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[qIdx]),
372 pDesc, sizeof(VIRTQ_DESC_T));
373}
374
375/**
376 * Accessors for virtq avail ring
377 */
378DECLINLINE(uint16_t) vqReadAvailRingDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx)
379{
380 uint16_t uDescIdx;
381 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
382 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
383 pVirtio->pGcPhysQueueAvail[qIdx]
384 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[qIdx]]),
385 &uDescIdx, sizeof(uDescIdx));
386 return uDescIdx;
387}
388
389DECLINLINE(uint16_t) vqReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
390{
391 uint16_t uDescIdx;
392 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
393 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
394 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uDescIdx),
395 &uDescIdx, sizeof(uDescIdx));
396 return uDescIdx;
397}
398
399DECLINLINE(uint16_t) vqReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
400{
401 uint16_t fFlags;
402 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
403 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
404 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
405 &fFlags, sizeof(fFlags));
406 return fFlags;
407}
408
409DECLINLINE(uint16_t) vqReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
410{
411 uint16_t uUsedEventIdx;
412 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
413 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
414 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
415 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[qIdx]]),
416 &uUsedEventIdx, sizeof(uUsedEventIdx));
417 return uUsedEventIdx;
418}
419
420/**
421 * Accessors for virtq used ring
422 */
423DECLINLINE(void) vqWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
424{
425 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
426 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
427 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
428 pVirtio->pGcPhysQueueUsed[qIdx]
429 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[usedIdx % pVirtio->uQueueSize[qIdx]]),
430 &elem, sizeof(elem));
431}
432
433DECLINLINE(void) vqWriteUsedRingDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx)
434{
435 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
436 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
437 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uDescIdx),
438 &uDescIdx, sizeof(uDescIdx));
439}
440
441DECLINLINE(uint16_t)vqReadUsedDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
442{
443 uint16_t uDescIdx;
444 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
445 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
446 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uDescIdx),
447 &uDescIdx, sizeof(uDescIdx));
448 return uDescIdx;
449}
450
451DECLINLINE(uint16_t) vqReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
452{
453 uint16_t fFlags;
454 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
455 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
456 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
457 &fFlags, sizeof(fFlags));
458 return fFlags;
459}
460
461DECLINLINE(void) vqWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags)
462{
463 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
464 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
465 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
466 &fFlags, sizeof(fFlags));
467}
468
469DECLINLINE(uint16_t) vqReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
470{
471 uint16_t uAvailEventIdx;
472 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
473 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
474 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
475 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
476 &uAvailEventIdx, sizeof(uAvailEventIdx));
477 return uAvailEventIdx;
478}
479
480DECLINLINE(void) vqWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx)
481{
482 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
483 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
484 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
485 pVirtio->pGcPhysQueueUsed[qIdx]
486 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
487 &uAvailEventIdx, sizeof(uAvailEventIdx));
488}
489
490/**
491 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic */
492DECLINLINE(void) virtioLogDeviceStatus( uint8_t status)
493{
494 if (status == 0)
495 Log(("RESET"));
496 else
497 {
498 int primed = 0;
499 if (status & VIRTIO_STATUS_ACKNOWLEDGE)
500 Log(("ACKNOWLEDGE", primed++));
501 if (status & VIRTIO_STATUS_DRIVER)
502 Log(("%sDRIVER", primed++ ? " | " : ""));
503 if (status & VIRTIO_STATUS_FEATURES_OK)
504 Log(("%sFEATURES_OK", primed++ ? " | " : ""));
505 if (status & VIRTIO_STATUS_DRIVER_OK)
506 Log(("%sDRIVER_OK", primed++ ? " | " : ""));
507 if (status & VIRTIO_STATUS_FAILED)
508 Log(("%sFAILED", primed++ ? " | " : ""));
509 if (status & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
510 Log(("%sNEEDS_RESET", primed++ ? " | " : ""));
511 }
512}
513
514static void virtioResetQueue (PVIRTIOSTATE pVirtio, uint16_t qIdx);
515static void vqNotifyDriver (PVIRTIOSTATE pVirtio, uint16_t qIdx);
516static int virtioRaiseInterrupt (PVIRTIOSTATE pVirtio, uint8_t uCause);
517static void virtioLowerInterrupt (PVIRTIOSTATE pVirtio);
518static void virtioQueueNotified (PVIRTIOSTATE pVirtio, uint16_t qidx, uint16_t uDescIdx);
519static int virtioCommonCfgAccessed (PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv);
520static void virtioGuestResetted (PVIRTIOSTATE pVirtio);
521
522static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
523static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
524static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
525static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass);
526
527#endif /* !VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette