VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 80692

Last change on this file since 80692 was 80683, checked in by vboxsync, 6 years ago

Storage:DevVirtioSCSI.cpp: suspend/resume/reset implemented and seems to be working. See bugref:9440, Comment #84

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 62.3 KB
Line 
1/* $Id: Virtio_1_0.cpp 80683 2019-09-09 19:57:50Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <iprt/param.h>
26#include <iprt/assert.h>
27#include <iprt/uuid.h>
28#include <iprt/mem.h>
29#include <iprt/assert.h>
30#include <iprt/sg.h>
31#include <VBox/vmm/pdmdev.h>
32#include "Virtio_1_0_impl.h"
33#include "Virtio_1_0.h"
34
35#define INSTANCE(pVirtio) pVirtio->szInstance
36#define QUEUENAME(qIdx) (pVirtio->virtqProxy[qIdx].szVirtqName)
37
38
39/**
40 * See API comments in header file for description
41 */
42int virtioQueueAttach(VIRTIOHANDLE hVirtio, uint16_t qIdx, const char *pcszName)
43{
44 LogFunc(("%s\n", pcszName));
45 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
46 PVIRTQ_PROXY_T pVirtqProxy = &(pVirtio->virtqProxy[qIdx]);
47 pVirtqProxy->pDescChain = (PVIRTQ_DESC_CHAIN_T)RTMemAllocZ(sizeof(VIRTQ_DESC_CHAIN_T));
48 if (!pVirtqProxy->pDescChain)
49 {
50 Log(("Out of memory!"));
51 return VERR_NO_MEMORY;
52 }
53 pVirtqProxy->uAvailIdx = 0;
54 pVirtqProxy->uUsedIdx = 0;
55 pVirtqProxy->fEventThresholdReached = false;
56 RTStrCopy((char *)pVirtqProxy->szVirtqName, sizeof(pVirtqProxy->szVirtqName), pcszName);
57 return VINF_SUCCESS;
58
59}
60
61/**
62 * See API comments in header file for description
63 */
64const char *virtioQueueGetName(VIRTIOHANDLE hVirtio, uint16_t qIdx)
65{
66 return (const char *)((PVIRTIOSTATE)hVirtio)->virtqProxy[qIdx].szVirtqName;
67}
68
69/**
70 * See API comments in header file for description
71 */
72int virtioQueueSkip(VIRTIOHANDLE hVirtio, uint16_t qIdx)
73{
74 Assert(qIdx < sizeof(VIRTQ_PROXY_T));
75
76 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
77 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
78
79 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
80 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
81
82 if (virtioQueueIsEmpty(pVirtio, qIdx))
83 return VERR_NOT_AVAILABLE;
84
85 Log2Func(("%s avail_idx=%u\n", pVirtqProxy->szVirtqName, pVirtqProxy->uAvailIdx));
86 pVirtqProxy->uAvailIdx++;
87
88 return VINF_SUCCESS;
89}
90
91
92/**
93 * See API comments in header file for description
94 */
95uint64_t virtioGetNegotiatedFeatures(VIRTIOHANDLE hVirtio)
96{
97 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
98 return pVirtio->uDriverFeatures;
99}
100
101/**
102 * See API comments in header file for description
103 */
104bool virtioQueueIsEmpty(VIRTIOHANDLE hVirtio, uint16_t qIdx)
105{
106 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
107 if (!(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK))
108 return true;
109 return virtqIsEmpty(pVirtio, qIdx);
110}
111
112/**
113 * See API comments in header file for description
114 */
115int virtioQueuePeek(VIRTIOHANDLE hVirtio, uint16_t qIdx, PPRTSGBUF ppInSegs, PPRTSGBUF ppOutSegs)
116{
117 return virtioQueueGet(hVirtio, qIdx, false /* fRemove */, ppInSegs, ppOutSegs);
118}
119
120 /*/**
121 * See API comments in header file for description
122 */
123int virtioQueueGet(VIRTIOHANDLE hVirtio, uint16_t qIdx, bool fRemove,
124 PPRTSGBUF ppInSegs, PPRTSGBUF ppOutSegs)
125{
126 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
127 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
128 PVIRTQ_DESC_CHAIN_T pDescChain = pVirtqProxy->pDescChain;
129
130 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
131 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
132
133 if (virtqIsEmpty(pVirtio, qIdx))
134 return VERR_NOT_AVAILABLE;
135
136 pDescChain->cSegsIn = pDescChain->cSegsOut = 0;
137
138 pDescChain->uHeadIdx = virtioReadAvailDescIdx(pVirtio, qIdx, pVirtqProxy->uAvailIdx);
139 uint16_t uDescIdx = pDescChain->uHeadIdx;
140
141 Log6Func(("%s DESC CHAIN: (head) desc_idx=%u [avail_idx=%u]\n",
142 pVirtqProxy->szVirtqName, pDescChain->uHeadIdx, pVirtqProxy->uAvailIdx));
143
144 if (fRemove)
145 pVirtqProxy->uAvailIdx++;
146
147 uint32_t cbIn = 0, cbOut = 0;
148 VIRTQ_DESC_T desc;
149 do
150 {
151 RTSGSEG *pSeg;
152
153 /**
154 * Malicious guests may go beyond aSegsIn or aSegsOut boundaries by linking
155 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
156 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
157 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
158 */
159 if (pDescChain->cSegsIn + pDescChain->cSegsOut >= VIRTQ_MAX_SIZE)
160 {
161 static volatile uint32_t s_cMessages = 0;
162 static volatile uint32_t s_cThreshold = 1;
163 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
164 {
165 LogRel(("Too many linked descriptors; "
166 "check if the guest arranges descriptors in a loop.\n"));
167 if (ASMAtomicReadU32(&s_cMessages) != 1)
168 LogRel(("(the above error has occured %u times so far)\n",
169 ASMAtomicReadU32(&s_cMessages)));
170 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
171 }
172 break;
173 }
174 RT_UNTRUSTED_VALIDATED_FENCE();
175
176 virtioReadDesc(pVirtio, qIdx, uDescIdx, &desc);
177
178 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
179 {
180 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n",
181 QUEUENAME(qIdx), uDescIdx, pDescChain->cSegsIn, desc.pGcPhysBuf, desc.cb));
182 cbIn += desc.cb;
183 pSeg = &(pDescChain->aSegsIn[pDescChain->cSegsIn++]);
184 }
185 else
186 {
187 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n",
188 QUEUENAME(qIdx), uDescIdx, pDescChain->cSegsOut, desc.pGcPhysBuf, desc.cb));
189 cbOut += desc.cb;
190 pSeg = &(pDescChain->aSegsOut[pDescChain->cSegsOut++]);
191 }
192
193 pSeg->pvSeg = (void *)desc.pGcPhysBuf;
194 pSeg->cbSeg = desc.cb;
195
196 uDescIdx = desc.uDescIdxNext;
197 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
198
199 RTSgBufInit(&pVirtqProxy->inSgBuf, (PCRTSGSEG)&pDescChain->aSegsIn, pDescChain->cSegsIn);
200 RTSgBufInit(&pVirtqProxy->outSgBuf, (PCRTSGSEG)&pDescChain->aSegsOut, pDescChain->cSegsOut);
201
202 if (ppInSegs)
203 *ppInSegs = &pVirtqProxy->inSgBuf;
204
205 if (ppOutSegs)
206 *ppOutSegs = &pVirtqProxy->outSgBuf;
207
208 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
209 pVirtqProxy->szVirtqName, pDescChain->cSegsOut, cbOut, pDescChain->cSegsIn, cbIn));
210
211 return VINF_SUCCESS;
212}
213
214 /** See API comments in header file prototype for description */
215int virtioQueuePut(VIRTIOHANDLE hVirtio, uint16_t qIdx, PRTSGBUF pSgBuf, bool fFence)
216{
217
218 Assert(qIdx < sizeof(VIRTQ_PROXY_T));
219
220 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
221 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
222 PVIRTQ_DESC_CHAIN_T pDescChain = pVirtqProxy->pDescChain;
223
224 AssertMsgReturn(DRIVER_OK(pVirtio) /*&& pVirtio->uQueueEnable[qIdx]*/,
225 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
226 /**
227 * Copy caller's virtual memory sg buffer to physical memory
228 */
229 PRTSGBUF pBufSrc = pSgBuf;
230 PRTSGBUF pBufDst = &pVirtqProxy->inSgBuf;
231
232 size_t cbRemain = RTSgBufCalcTotalLength(pBufSrc);
233 uint16_t uUsedIdx = virtioReadUsedRingIdx(pVirtio, qIdx);
234 Log6Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
235 QUEUENAME(qIdx), uUsedIdx));
236
237 while (cbRemain)
238 {
239 uint64_t dstSgStart = (uint64_t)pBufDst->paSegs[pBufDst->idxSeg].pvSeg;
240 uint64_t dstSgLen = (uint64_t)pBufDst->paSegs[pBufDst->idxSeg].cbSeg;
241 uint64_t dstSgCur = (uint64_t)pBufDst->pvSegCur;
242 size_t cbCopy = RT_MIN(pBufSrc->cbSegLeft, dstSgLen - (dstSgCur - dstSgStart));
243 PDMDevHlpPhysWrite(pVirtio->CTX_SUFF(pDevIns),
244 (RTGCPHYS)pBufDst->pvSegCur, pBufSrc->pvSegCur, cbCopy);
245 RTSgBufAdvance(pBufSrc, cbCopy);
246 RTSgBufAdvance(pBufDst, cbCopy);
247 cbRemain -= cbCopy;
248 }
249
250 if (fFence)
251 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
252
253 /** If this write-ahead crosses threshold where the driver wants to get an event flag it */
254 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
255 if (pVirtqProxy->uUsedIdx == virtioReadAvailUsedEvent(pVirtio, qIdx))
256 pVirtqProxy->fEventThresholdReached = true;
257
258 /**
259 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
260 * That will be done with a subsequent client call to virtioQueueSync() */
261 virtioWriteUsedElem(pVirtio, qIdx,
262 pVirtqProxy->uUsedIdx++,
263 pDescChain->uHeadIdx,
264 pDescChain->cSegsIn);
265
266 if (LogIs2Enabled())
267 {
268 size_t cbInSgBuf = RTSgBufCalcTotalLength(pBufDst);
269 size_t cbWritten = cbInSgBuf - RTSgBufCalcLengthLeft(pBufDst);
270 Log2Func((".... Copied %u bytes to %u byte buffer, residual=%d\n",
271 cbWritten, cbInSgBuf, cbInSgBuf - cbWritten));
272 }
273 Log6Func(("Write ahead used_idx=%d, %s used_idx=%d\n",
274 pVirtqProxy->uUsedIdx, QUEUENAME(qIdx), uUsedIdx));
275
276 return VINF_SUCCESS;
277}
278
279/**
280 * See API comments in header file for description
281 */
282int virtioQueueSync(VIRTIOHANDLE hVirtio, uint16_t qIdx)
283{
284 Assert(qIdx < sizeof(VIRTQ_PROXY_T));
285
286 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
287 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
288
289 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
290 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
291
292 uint16_t uIdx = virtioReadUsedRingIdx(pVirtio, qIdx);
293 Log6Func(("Updating %s used_idx from %u to %u\n",
294 QUEUENAME(qIdx), uIdx, pVirtqProxy->uUsedIdx));
295
296 virtioWriteUsedRingIdx(pVirtio, qIdx, pVirtqProxy->uUsedIdx);
297 virtioNotifyGuestDriver(pVirtio, qIdx, false);
298
299 return VINF_SUCCESS;
300}
301
302/**
303 * See API comments in header file for description
304 */
305static void virtioQueueNotified(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uNotifyIdx)
306{
307 Assert(uNotifyIdx == qIdx);
308
309 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
310 Log6Func(("%s\n", pVirtqProxy->szVirtqName));
311
312 /** Inform client */
313 pVirtio->virtioCallbacks.pfnVirtioQueueNotified((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext, qIdx);
314}
315
316/**
317 * See API comments in header file for description
318 */
319void virtioPropagateResumeNotification(VIRTIOHANDLE hVirtio)
320{
321 virtioNotifyGuestDriver((PVIRTIOSTATE)hVirtio, (uint16_t)NULL /* qIdx */, true /* fForce */);
322}
323
324/**
325 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
326 * the specified virtq, depending on the interrupt configuration of the device
327 * and depending on negotiated and realtime constraints flagged by the guest driver.
328 * See VirtIO 1.0 specification (section 2.4.7).
329 *
330 * @param pVirtio - Instance state
331 * @param qIdx - Queue to check for guest interrupt handling preference
332 * @param fForce - Overrides qIdx, forcing notification, regardless of driver's
333 * notification preferences. This is a safeguard to prevent
334 * stalls upon resuming the VM. VirtIO 1.0 specification Section 4.1.5.5
335 * indicates spurious interrupts are harmless to guest driver's state,
336 * as they only cause the guest driver to scan queues for work to do.
337 */
338static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t qIdx, bool fForce)
339{
340 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
341
342 AssertMsgReturnVoid(DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"));
343
344 if (pVirtio->uMsixConfig == VIRTIO_MSI_NO_VECTOR)
345 {
346 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
347 {
348 if (pVirtqProxy->fEventThresholdReached)
349 {
350 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, fForce);
351 pVirtqProxy->fEventThresholdReached = false;
352 return;
353 }
354 Log6Func(("...skipping interrupt: VIRTIO_F_EVENT_IDX set but threshold not reached\n"));
355 }
356 else
357 {
358 /** If guest driver hasn't suppressed interrupts, interrupt */
359 if (fForce || !(virtioReadUsedFlags(pVirtio, qIdx) & VIRTQ_AVAIL_F_NO_INTERRUPT))
360 {
361 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, fForce);
362 return;
363 }
364 Log6Func(("...skipping interrupt. Guest flagged VIRTQ_AVAIL_F_NO_INTERRUPT for queue\n"));
365 }
366 }
367 else
368 {
369 /* TBD, do MSI notification if criteria met */
370 }
371}
372
373/**
374 * NOTE: The consumer (PDM device) must call this function to 'forward' a relocation call.
375 *
376 * Device relocation callback.
377 *
378 * When this callback is called the device instance data, and if the
379 * device have a GC component, is being relocated, or/and the selectors
380 * have been changed. The device must use the chance to perform the
381 * necessary pointer relocations and data updates.
382 *
383 * Before the GC code is executed the first time, this function will be
384 * called with a 0 delta so GC pointer calculations can be one in one place.
385 *
386 * @param pDevIns Pointer to the device instance.
387 * @param offDelta The relocation delta relative to the old location.
388 *
389 * @remark A relocation CANNOT fail.
390 */
391void virtioRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
392{
393 RT_NOREF(offDelta);
394 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
395 LogFunc(("\n"));
396
397 pVirtio->pDevInsR3 = pDevIns;
398 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
399 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
400}
401
402/**
403 * Raise interrupt.
404 *
405 * @param pVirtio The device state structure.
406 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
407 */
408static int virtioRaiseInterrupt(PVIRTIOSTATE pVirtio, uint8_t uCause, bool fForce)
409{
410
411 if (fForce)
412 Log6Func(("reason: resumed after suspend\n"));
413 else
414 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
415 Log6Func(("reason: buffer added to 'used' ring.\n"));
416 else
417 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
418 Log6Func(("reason: device config change\n"));
419
420 pVirtio->uISR |= uCause;
421 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, 1);
422 return VINF_SUCCESS;
423}
424
425/**
426 * Lower interrupt. (Called when guest reads ISR)
427 *
428 * @param pVirtio The device state structure.
429 */
430static void virtioLowerInterrupt(PVIRTIOSTATE pVirtio)
431{
432 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, 0);
433}
434
435static void virtioResetQueue(PVIRTIOSTATE pVirtio, uint16_t qIdx)
436{
437 PVIRTQ_PROXY_T pVirtQ = &pVirtio->virtqProxy[qIdx];
438 pVirtQ->uAvailIdx = 0;
439 pVirtQ->uUsedIdx = 0;
440 pVirtio->uQueueEnable[qIdx] = false;
441 pVirtio->uQueueSize[qIdx] = VIRTQ_MAX_SIZE;
442 pVirtio->uQueueNotifyOff[qIdx] = qIdx;
443}
444
445
446static void virtioResetDevice(PVIRTIOSTATE pVirtio)
447{
448 Log2Func(("\n"));
449 pVirtio->uDeviceFeaturesSelect = 0;
450 pVirtio->uDriverFeaturesSelect = 0;
451 pVirtio->uConfigGeneration = 0;
452 pVirtio->uDeviceStatus = 0;
453 pVirtio->uISR = 0;
454
455#ifndef MSIX_SUPPORT
456 /** This is required by VirtIO 1.0 specification, section 4.1.5.1.2 */
457 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
458 for (int i = 0; i < VIRTQ_MAX_CNT; i++)
459 pVirtio->uQueueMsixVector[i] = VIRTIO_MSI_NO_VECTOR;
460#endif
461
462 pVirtio->uNumQueues = VIRTQ_MAX_CNT;
463 for (uint16_t qIdx = 0; qIdx < pVirtio->uNumQueues; qIdx++)
464 virtioResetQueue(pVirtio, qIdx);
465}
466
467/**
468 * See API comments in header file for description
469 */
470bool virtioIsQueueEnabled(VIRTIOHANDLE hVirtio, uint16_t qIdx)
471{
472 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
473 return pVirtio->uQueueEnable[qIdx];
474}
475
476/**
477 * See API comments in header file for description
478 */
479void virtioQueueEnable(VIRTIOHANDLE hVirtio, uint16_t qIdx, bool fEnabled)
480{
481 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
482 if (fEnabled)
483 pVirtio->uQueueSize[qIdx] = VIRTQ_MAX_SIZE;
484 else
485 pVirtio->uQueueSize[qIdx] = 0;
486}
487
488/**
489 * Initiate orderly reset procedure.
490 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
491 */
492void virtioResetAll(VIRTIOHANDLE hVirtio)
493{
494 LogFunc(("VIRTIO RESET REQUESTED!!!\n"));
495 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
496 pVirtio->uDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
497 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
498 {
499 pVirtio->fGenUpdatePending = true;
500 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_DEVICE_CONFIG, false /* fForce */);
501 }
502}
503
504/**
505 * Invoked by this implementation when guest driver resets the device.
506 * The driver itself will not reset until the device has read the status change.
507 */
508static void virtioGuestResetted(PVIRTIOSTATE pVirtio)
509{
510 LogFunc(("Guest reset the device\n"));
511
512 /** Let the client know */
513 pVirtio->virtioCallbacks.pfnVirtioStatusChanged((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext, false);
514 virtioResetDevice(pVirtio);
515}
516
517/**
518 * Handle accesses to Common Configuration capability
519 *
520 * @returns VBox status code
521 *
522 * @param pVirtio Virtio instance state
523 * @param fWrite If write access (otherwise read access)
524 * @param pv Pointer to location to write to or read from
525 * @param cb Number of bytes to read or write
526 */
527static int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv)
528{
529 int rc = VINF_SUCCESS;
530 uint64_t val;
531 if (MATCH_COMMON_CFG(uDeviceFeatures))
532 {
533 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
534 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
535 else /* Guest READ pCommonCfg->uDeviceFeatures */
536 {
537 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures);
538 switch(pVirtio->uDeviceFeaturesSelect)
539 {
540 case 0:
541 val = pVirtio->uDeviceFeatures & 0xffffffff;
542 memcpy((void *)pv, (const void *)&val, cb);
543 LOG_COMMON_CFG_ACCESS(uDeviceFeatures);
544 break;
545 case 1:
546 val = (pVirtio->uDeviceFeatures >> 32) & 0xffffffff;
547 uIntraOff += 4;
548 memcpy((void *)pv, (const void *)&val, cb);
549 LOG_COMMON_CFG_ACCESS(uDeviceFeatures);
550 break;
551 default:
552 LogFunc(("Guest read uDeviceFeatures with out of range selector (%d), returning 0\n",
553 pVirtio->uDeviceFeaturesSelect));
554 return VERR_ACCESS_DENIED;
555 }
556 }
557 }
558 else if (MATCH_COMMON_CFG(uDriverFeatures))
559 {
560 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
561 {
562 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
563 switch(pVirtio->uDriverFeaturesSelect)
564 {
565 case 0:
566 memcpy(&pVirtio->uDriverFeatures, pv, cb);
567 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
568 break;
569 case 1:
570 memcpy(((char *)&pVirtio->uDriverFeatures) + sizeof(uint32_t), pv, cb);
571 uIntraOff += 4;
572 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
573 break;
574 default:
575 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%d), returning 0\n",
576 pVirtio->uDriverFeaturesSelect));
577 return VERR_ACCESS_DENIED;
578 }
579 }
580 else /* Guest READ pCommonCfg->udriverFeatures */
581 {
582 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
583 switch(pVirtio->uDriverFeaturesSelect)
584 {
585 case 0:
586 val = pVirtio->uDriverFeatures & 0xffffffff;
587 memcpy((void *)pv, (const void *)&val, cb);
588 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
589 break;
590 case 1:
591 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
592 uIntraOff += 4;
593 memcpy((void *)pv, (const void *)&val, cb);
594 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
595 break;
596 default:
597 LogFunc(("Guest read uDriverFeatures with out of range selector (%d), returning 0\n",
598 pVirtio->uDriverFeaturesSelect));
599 return VERR_ACCESS_DENIED;
600 }
601 }
602 }
603 else if (MATCH_COMMON_CFG(uNumQueues))
604 {
605 if (fWrite)
606 {
607 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
608 return VERR_ACCESS_DENIED;
609 }
610 else
611 {
612 uint32_t uIntraOff = 0;
613 *(uint16_t *)pv = VIRTQ_MAX_CNT;
614 LOG_COMMON_CFG_ACCESS(uNumQueues);
615 }
616 }
617 else if (MATCH_COMMON_CFG(uDeviceStatus))
618 {
619 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
620 {
621 pVirtio->uDeviceStatus = *(uint8_t *)pv;
622 Log6Func(("Guest wrote uDeviceStatus ................ ("));
623 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
624 Log6((")\n"));
625 if (pVirtio->uDeviceStatus == 0)
626 virtioGuestResetted(pVirtio);
627 /**
628 * Notify client only if status actually changed from last time.
629 */
630 bool fOkayNow = pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
631 bool fWasOkay = pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
632 if ((fOkayNow && !fWasOkay) || (!fOkayNow && fWasOkay))
633 pVirtio->virtioCallbacks.pfnVirtioStatusChanged((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext,
634 pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
635 pVirtio->uPrevDeviceStatus = pVirtio->uDeviceStatus;
636 }
637 else /* Guest READ pCommonCfg->uDeviceStatus */
638 {
639 Log6Func(("Guest read uDeviceStatus ................ ("));
640 *(uint32_t *)pv = pVirtio->uDeviceStatus;
641 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
642 Log6((")\n"));
643 }
644 }
645 else
646 if (MATCH_COMMON_CFG(uMsixConfig))
647 COMMON_CFG_ACCESSOR(uMsixConfig);
648 else
649 if (MATCH_COMMON_CFG(uDeviceFeaturesSelect))
650 COMMON_CFG_ACCESSOR(uDeviceFeaturesSelect);
651 else
652 if (MATCH_COMMON_CFG(uDriverFeaturesSelect))
653 COMMON_CFG_ACCESSOR(uDriverFeaturesSelect);
654 else
655 if (MATCH_COMMON_CFG(uConfigGeneration))
656 COMMON_CFG_ACCESSOR_READONLY(uConfigGeneration);
657 else
658 if (MATCH_COMMON_CFG(uQueueSelect))
659 COMMON_CFG_ACCESSOR(uQueueSelect);
660 else
661 if (MATCH_COMMON_CFG(uQueueSize))
662 COMMON_CFG_ACCESSOR_INDEXED(uQueueSize, pVirtio->uQueueSelect);
663 else
664 if (MATCH_COMMON_CFG(uQueueMsixVector))
665 COMMON_CFG_ACCESSOR_INDEXED(uQueueMsixVector, pVirtio->uQueueSelect);
666 else
667 if (MATCH_COMMON_CFG(uQueueEnable))
668 COMMON_CFG_ACCESSOR_INDEXED(uQueueEnable, pVirtio->uQueueSelect);
669 else
670 if (MATCH_COMMON_CFG(uQueueNotifyOff))
671 COMMON_CFG_ACCESSOR_INDEXED_READONLY(uQueueNotifyOff, pVirtio->uQueueSelect);
672 else
673 if (MATCH_COMMON_CFG(pGcPhysQueueDesc))
674 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueDesc, pVirtio->uQueueSelect);
675 else
676 if (MATCH_COMMON_CFG(pGcPhysQueueAvail))
677 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueAvail, pVirtio->uQueueSelect);
678 else
679 if (MATCH_COMMON_CFG(pGcPhysQueueUsed))
680 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueUsed, pVirtio->uQueueSelect);
681 else
682 {
683 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffset=%d, cb=%d\n",
684 fWrite ? "write" : "read ", uOffset, cb));
685 rc = VERR_ACCESS_DENIED;
686 }
687 return rc;
688}
689
690/**
691 * Memory mapped I/O Handler for PCI Capabilities read operations.
692 *
693 * @returns VBox status code.
694 *
695 * @param pDevIns The device instance.
696 * @param pvUser User argument.
697 * @param GCPhysAddr Physical address (in GC) where the read starts.
698 * @param pv Where to store the result.
699 * @param cb Number of bytes read.
700 */
701PDMBOTHCBDECL(int) virtioR3MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
702{
703 RT_NOREF(pvUser);
704 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
705 int rc = VINF_SUCCESS;
706
707 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
708 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
709 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsr);
710
711 if (fDevSpecific)
712 {
713 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
714 /**
715 * Callback to client to manage device-specific configuration.
716 */
717 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapRead(pDevIns, uOffset, pv, cb);
718
719 /**
720 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
721 * is READ it needs to be checked to see if it changed since the last time any part was read, in
722 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
723 */
724 bool fDevSpecificFieldChanged = !!memcmp((char *)pVirtio->pDevSpecificCfg + uOffset,
725 (char *)pVirtio->pPrevDevSpecificCfg + uOffset, cb);
726
727 memcpy(pVirtio->pPrevDevSpecificCfg, pVirtio->pDevSpecificCfg, pVirtio->cbDevSpecificCfg);
728
729 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
730 {
731 ++pVirtio->uConfigGeneration;
732 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
733 pVirtio->uConfigGeneration,
734 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
735 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
736 pVirtio->fGenUpdatePending = false;
737 }
738 }
739 else
740 if (fCommonCfg)
741 {
742 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
743 virtioCommonCfgAccessed(pVirtio, 0 /* fWrite */, uOffset, cb, pv);
744 }
745 else
746 if (fIsr && cb == sizeof(uint8_t))
747 {
748 *(uint8_t *)pv = pVirtio->uISR;
749 Log6Func(("Read and clear ISR\n"));
750 pVirtio->uISR = 0; /** VirtIO specification requires reads of ISR to clear it */
751 virtioLowerInterrupt(pVirtio);
752 }
753 else {
754 LogFunc(("Bad read access to mapped capabilities region:\n"
755 " pVirtio=%#p GCPhysAddr=%RGp cb=%u\n",
756 pVirtio, GCPhysAddr, pv, cb, pv, cb));
757 }
758 return rc;
759}
760/**
761 * Memory mapped I/O Handler for PCI Capabilities write operations.
762 *
763 * @returns VBox status code.
764 *
765 * @param pDevIns The device instance.
766 * @param pvUser User argument.
767 * @param GCPhysAddr Physical address (in GC) where the write starts.
768 * @param pv Where to fetch the result.
769 * @param cb Number of bytes to write.
770 */
771PDMBOTHCBDECL(int) virtioR3MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
772{
773 RT_NOREF(pvUser);
774 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
775 int rc = VINF_SUCCESS;
776
777 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
778 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
779 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsr);
780 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysNotifyCap, pVirtio->pNotifyCap, fNotify);
781
782 if (fDevSpecific)
783 {
784 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
785 /**
786 * Pass this MMIO write access back to the client to handle
787 */
788 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapWrite(pDevIns, uOffset, pv, cb);
789 }
790 else
791 if (fCommonCfg)
792 {
793 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
794 virtioCommonCfgAccessed(pVirtio, 1 /* fWrite */, uOffset, cb, pv);
795 }
796 else
797 if (fIsr && cb == sizeof(uint8_t))
798 {
799 pVirtio->uISR = *(uint8_t *)pv;
800 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
801 pVirtio->uISR & 0xff,
802 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
803 !!(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
804 }
805 else
806 /** This *should* be guest driver dropping index of a new descriptor in avail ring */
807 if (fNotify && cb == sizeof(uint16_t))
808 {
809 uint32_t uNotifyBaseOffset = GCPhysAddr - pVirtio->pGcPhysNotifyCap;
810 uint16_t qIdx = uNotifyBaseOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
811 uint16_t uAvailDescIdx = *(uint16_t *)pv;
812 virtioQueueNotified(pVirtio, qIdx, uAvailDescIdx);
813 }
814 else
815 {
816 Log2Func(("Bad write access to mapped capabilities region:\n"
817 " pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n",
818 pVirtio, GCPhysAddr, pv, cb, pv, cb));
819 }
820 return rc;
821}
822
823/**
824 * @callback_method_impl{FNPCIIOREGIONMAP}
825 */
826static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
827 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
828{
829 RT_NOREF3(pPciDev, iRegion, enmType);
830 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
831 int rc = VINF_SUCCESS;
832
833 Assert(cb >= 32);
834
835 if (iRegion == VIRTIOSCSI_REGION_PCI_CAP)
836 {
837 /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
838 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
839 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
840 virtioR3MmioWrite, virtioR3MmioRead,
841 "virtio-scsi MMIO");
842
843 if (RT_FAILURE(rc))
844 {
845 Log2Func(("virtio: PCI Capabilities failed to map GCPhysAddr=%RGp cb=%RGp, region=%d\n",
846 GCPhysAddress, cb, iRegion));
847 return rc;
848 }
849 Log2Func(("virtio: PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp, region=%d\n",
850 GCPhysAddress, cb, iRegion));
851 pVirtio->pGcPhysPciCapBase = GCPhysAddress;
852 pVirtio->pGcPhysCommonCfg = GCPhysAddress + pVirtio->pCommonCfgCap->uOffset;
853 pVirtio->pGcPhysNotifyCap = GCPhysAddress + pVirtio->pNotifyCap->pciCap.uOffset;
854 pVirtio->pGcPhysIsrCap = GCPhysAddress + pVirtio->pIsrCap->uOffset;
855 if (pVirtio->pPrevDevSpecificCfg)
856 pVirtio->pGcPhysDeviceCap = GCPhysAddress + pVirtio->pDeviceCap->uOffset;
857 }
858 return rc;
859}
860
861/**
862 * Callback function for reading from the PCI configuration space.
863 *
864 * @returns The register value.
865 * @param pDevIns Pointer to the device instance the PCI device
866 * belongs to.
867 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
868 * @param uAddress The configuration space register address. [0..4096]
869 * @param cb The register size. [1,2,4]
870 *
871 * @remarks Called with the PDM lock held. The device lock is NOT take because
872 * that is very likely be a lock order violation.
873 */
874static DECLCALLBACK(uint32_t) virtioPciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
875 uint32_t uAddress, unsigned cb)
876{
877 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
878
879 if (uAddress == (uint64_t)&pVirtio->pPciCfgCap->uPciCfgData)
880 {
881 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
882 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
883 * (the virtio_pci_cfg_cap capability), and access data items. */
884 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
885 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
886 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
887 uint32_t pv = 0;
888 if (uBar == VIRTIOSCSI_REGION_PCI_CAP)
889 (void)virtioR3MmioRead(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->pGcPhysPciCapBase + uOffset),
890 &pv, uLength);
891 else
892 {
893 Log2Func(("Guest read virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
894 return 0;
895 }
896 Log2Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d\n",
897 uBar, uOffset, uLength, pv));
898 return pv;
899 }
900 return pVirtio->pfnPciConfigReadOld(pDevIns, pPciDev, uAddress, cb);
901}
902
903/**
904 * Callback function for writing to the PCI configuration space.
905 *
906 * @returns VINF_SUCCESS or PDMDevHlpDBGFStop status.
907 *
908 * @param pDevIns Pointer to the device instance the PCI device
909 * belongs to.
910 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
911 * @param uAddress The configuration space register address. [0..4096]
912 * @param u32Value The value that's being written. The number of bits actually used from
913 * this value is determined by the cb parameter.
914 * @param cb The register size. [1,2,4]
915 *
916 * @remarks Called with the PDM lock held. The device lock is NOT take because
917 * that is very likely be a lock order violation.
918 */
919static DECLCALLBACK(VBOXSTRICTRC) virtioPciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
920 uint32_t uAddress, uint32_t u32Value, unsigned cb)
921{
922 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
923
924 if (uAddress == pVirtio->uPciCfgDataOff)
925 {
926 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
927 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
928 * (the virtio_pci_cfg_cap capability), and access data items. */
929 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
930 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
931 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
932 if (uBar == VIRTIOSCSI_REGION_PCI_CAP)
933 (void)virtioR3MmioWrite(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->pGcPhysPciCapBase + uOffset),
934 (void *)&u32Value, uLength);
935 else
936 {
937 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
938 return VINF_SUCCESS;
939 }
940 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d\n",
941 uBar, uOffset, uLength, u32Value));
942 return VINF_SUCCESS;
943 }
944 return pVirtio->pfnPciConfigWriteOld(pDevIns, pPciDev, uAddress, u32Value, cb);
945}
946
947/**
948 * Get VirtIO accepted host-side features
949 *
950 * @returns feature bits selected or 0 if selector out of range.
951 *
952 * @param pState Virtio state
953 */
954uint64_t virtioGetAcceptedFeatures(PVIRTIOSTATE pVirtio)
955{
956 return pVirtio->uDriverFeatures;
957}
958
959/**
960 * Destruct PCI-related part of device.
961 *
962 * We need to free non-VM resources only.
963 *
964 * @returns VBox status code.
965 * @param pState The device state structure.
966 */
967int virtioDestruct(PVIRTIOSTATE pVirtio)
968{
969 RT_NOREF(pVirtio);
970 Log(("%s Destroying PCI instance\n", INSTANCE(pVirtio)));
971 return VINF_SUCCESS;
972}
973
974/**
975 * Setup PCI device controller and Virtio state
976 *
977 * @param pDevIns Device instance data
978 * @param pClientContext Opaque client context (such as state struct, ...)
979 * @param pVirtio Device State
980 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
981 * @param pcszInstance Device instance name (format-specifier)
982 * @param uDevSpecificFeatures VirtIO device-specific features offered by client
983 * @param devCapReadCallback Client handler to call upon guest read to device specific capabilities.
984 * @param devCapWriteCallback Client handler to call upon guest write to device specific capabilities.
985 * @param devStatusChangedCallback Client handler to call for major device status changes
986 * @param queueNotifiedCallback Client handler for guest-to-host notifications that avail queue has ring data
987 * @param ssmLiveExecCallback Client handler for SSM live exec
988 * @param ssmSaveExecCallback Client handler for SSM save exec
989 * @param ssmLoadExecCallback Client handler for SSM load exec
990 * @param ssmLoadDoneCallback Client handler for SSM load done
991 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
992 * @param pDevSpecificCfg Address of client's dev-specific configuration struct.
993 */
994int virtioConstruct(PPDMDEVINS pDevIns,
995 void *pClientContext,
996 VIRTIOHANDLE *phVirtio,
997 PVIRTIOPCIPARAMS pPciParams,
998 const char *pcszInstance,
999 uint64_t uDevSpecificFeatures,
1000 PFNVIRTIODEVCAPREAD devCapReadCallback,
1001 PFNVIRTIODEVCAPWRITE devCapWriteCallback,
1002 PFNVIRTIOSTATUSCHANGED devStatusChangedCallback,
1003 PFNVIRTIOQUEUENOTIFIED queueNotifiedCallback,
1004 PFNSSMDEVLIVEEXEC ssmLiveExecCallback,
1005 PFNSSMDEVSAVEEXEC ssmSaveExecCallback,
1006 PFNSSMDEVLOADEXEC ssmLoadExecCallback,
1007 PFNSSMDEVLOADDONE ssmLoadDoneCallback,
1008 uint16_t cbDevSpecificCfg,
1009 void *pDevSpecificCfg)
1010{
1011
1012 int rc = VINF_SUCCESS;
1013
1014 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)RTMemAllocZ(sizeof(VIRTIOSTATE));
1015 if (!pVirtio)
1016 {
1017 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1018 return VERR_NO_MEMORY;
1019 }
1020
1021 pVirtio->pClientContext = pClientContext;
1022
1023 /**
1024 * The host features offered include both device-specific features
1025 * and reserved feature bits (device independent)
1026 */
1027 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1028 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1029 | uDevSpecificFeatures;
1030
1031 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1032
1033 pVirtio->pDevInsR3 = pDevIns;
1034 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
1035 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
1036 pVirtio->uDeviceStatus = 0;
1037 pVirtio->cbDevSpecificCfg = cbDevSpecificCfg;
1038 pVirtio->pDevSpecificCfg = pDevSpecificCfg;
1039
1040 pVirtio->pPrevDevSpecificCfg = RTMemAllocZ(cbDevSpecificCfg);
1041 if (!pVirtio->pPrevDevSpecificCfg)
1042 {
1043 RTMemFree(pVirtio);
1044 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1045 return VERR_NO_MEMORY;
1046 }
1047
1048 memcpy(pVirtio->pPrevDevSpecificCfg, pVirtio->pDevSpecificCfg, cbDevSpecificCfg);
1049 pVirtio->virtioCallbacks.pfnVirtioDevCapRead = devCapReadCallback;
1050 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite = devCapWriteCallback;
1051 pVirtio->virtioCallbacks.pfnVirtioStatusChanged = devStatusChangedCallback;
1052 pVirtio->virtioCallbacks.pfnVirtioQueueNotified = queueNotifiedCallback;
1053 pVirtio->virtioCallbacks.pfnSSMDevLiveExec = ssmLiveExecCallback;
1054 pVirtio->virtioCallbacks.pfnSSMDevSaveExec = ssmSaveExecCallback;
1055 pVirtio->virtioCallbacks.pfnSSMDevLoadExec = ssmLoadExecCallback;
1056 pVirtio->virtioCallbacks.pfnSSMDevLoadDone = ssmLoadDoneCallback;
1057
1058
1059 /* Set PCI config registers (assume 32-bit mode) */
1060 PCIDevSetRevisionId (&pVirtio->dev, DEVICE_PCI_REVISION_ID_VIRTIO);
1061 PCIDevSetVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1062 PCIDevSetSubSystemVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1063 PCIDevSetDeviceId (&pVirtio->dev, pPciParams->uDeviceId);
1064 PCIDevSetClassBase (&pVirtio->dev, pPciParams->uClassBase);
1065 PCIDevSetClassSub (&pVirtio->dev, pPciParams->uClassSub);
1066 PCIDevSetClassProg (&pVirtio->dev, pPciParams->uClassProg);
1067 PCIDevSetSubSystemId (&pVirtio->dev, pPciParams->uSubsystemId);
1068 PCIDevSetInterruptLine (&pVirtio->dev, pPciParams->uInterruptLine);
1069 PCIDevSetInterruptPin (&pVirtio->dev, pPciParams->uInterruptPin);
1070
1071 /* Register PCI device */
1072 rc = PDMDevHlpPCIRegister(pDevIns, &pVirtio->dev);
1073 if (RT_FAILURE(rc))
1074 {
1075 RTMemFree(pVirtio);
1076 return PDMDEV_SET_ERROR(pDevIns, rc,
1077 N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1078 }
1079
1080 rc = PDMDevHlpSSMRegisterEx(pDevIns, VIRTIO_SAVEDSTATE_VERSION, sizeof(*pVirtio), NULL,
1081 NULL, virtioR3LiveExec, NULL, NULL, virtioR3SaveExec, NULL,
1082 NULL, virtioR3LoadExec, virtioR3LoadDone);
1083 if (RT_FAILURE(rc))
1084 {
1085 RTMemFree(pVirtio);
1086 return PDMDEV_SET_ERROR(pDevIns, rc,
1087 N_("virtio: cannot register SSM callbacks"));
1088 }
1089
1090 PDMDevHlpPCISetConfigCallbacks(pDevIns, &pVirtio->dev,
1091 virtioPciConfigRead, &pVirtio->pfnPciConfigReadOld,
1092 virtioPciConfigWrite, &pVirtio->pfnPciConfigWriteOld);
1093
1094
1095 /** Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1096
1097#if 0 && defined(VBOX_WITH_MSI_DEVICES) /* T.B.D. */
1098 uint8_t fMsiSupport = true;
1099#else
1100 uint8_t fMsiSupport = false;
1101#endif
1102
1103 /** The following capability mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CFG_CAP_T)
1104 * as a mandatory but suboptimal alternative interface to host device capabilities, facilitating
1105 * access the memory of any BAR. If the guest uses it (the VirtIO driver on Linux doesn't),
1106 * Unlike Common, Notify, ISR and Device capabilities, it is accessed directly via PCI Config region.
1107 * therefore does not contribute to the capabilities region (BAR) the other capabilities use.
1108 */
1109#define CFGADDR2IDX(addr) ((uint64_t)addr - (uint64_t)&pVirtio->dev.abConfig)
1110
1111 PVIRTIO_PCI_CAP_T pCfg;
1112 uint32_t cbRegion = 0;
1113
1114 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1115 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[0x40];
1116 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1117 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1118 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1119 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1120 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1121 pCfg->uOffset = RT_ALIGN_32(0, 4); /* reminder, in case someone changes offset */
1122 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1123 cbRegion += pCfg->uLength;
1124 pVirtio->pCommonCfgCap = pCfg;
1125
1126 /**
1127 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based the choice
1128 * of this implementation that each queue's uQueueNotifyOff is set equal to (QueueSelect) ordinal
1129 * value of the queue */
1130 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1131 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1132 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1133 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1134 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1135 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1136 pCfg->uOffset = pVirtio->pCommonCfgCap->uOffset + pVirtio->pCommonCfgCap->uLength;
1137 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 2);
1138 pCfg->uLength = VIRTQ_MAX_CNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1139 cbRegion += pCfg->uLength;
1140 pVirtio->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1141 pVirtio->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1142
1143 /** ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1144 *
1145 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1146 * of spec shows it as a 32-bit field with upper bits 'reserved'
1147 * Will take spec words more literally than the diagram for now.
1148 */
1149 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1150 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1151 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1152 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1153 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1154 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1155 pCfg->uOffset = pVirtio->pNotifyCap->pciCap.uOffset + pVirtio->pNotifyCap->pciCap.uLength;
1156 pCfg->uLength = sizeof(uint8_t);
1157 cbRegion += pCfg->uLength;
1158 pVirtio->pIsrCap = pCfg;
1159
1160 /** PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1161 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1162 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1163 * values from any region. NOTE: The linux driver not only doesn't use this feature, and will not
1164 * even list it as present if uLength isn't non-zero and 4-byte-aligned as the linux driver is
1165 * initializing. */
1166
1167 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1168 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1169 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1170 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1171 pCfg->uCapNext = (fMsiSupport || pVirtio->pDevSpecificCfg) ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1172 pCfg->uBar = 0;
1173 pCfg->uOffset = 0;
1174 pCfg->uLength = 0;
1175 cbRegion += pCfg->uLength;
1176 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1177
1178 if (pVirtio->pDevSpecificCfg)
1179 {
1180 /** Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
1181 * device-specific config fields struct and passes size to this constructor */
1182 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1183 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1184 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1185 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1186 pCfg->uCapNext = fMsiSupport ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1187 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1188 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1189 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1190 pCfg->uLength = cbDevSpecificCfg;
1191 cbRegion += pCfg->uLength;
1192 pVirtio->pDeviceCap = pCfg;
1193 }
1194
1195 /** Set offset to first capability and enable PCI dev capabilities */
1196 PCIDevSetCapabilityList (&pVirtio->dev, 0x40);
1197 PCIDevSetStatus (&pVirtio->dev, VBOX_PCI_STATUS_CAP_LIST);
1198
1199 if (fMsiSupport)
1200 {
1201 PDMMSIREG aMsiReg;
1202 RT_ZERO(aMsiReg);
1203 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1204 aMsiReg.iMsixNextOffset = 0;
1205 aMsiReg.iMsixBar = 0;
1206 aMsiReg.cMsixVectors = 1;
1207 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1208 if (RT_FAILURE (rc))
1209 /** The following is moot, we need to flag no MSI-X support */
1210 PCIDevSetCapabilityList(&pVirtio->dev, 0x40);
1211 }
1212
1213 /** Linux drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1214 * 'unknown' device-specific capability without querying the capability to figure
1215 * out size, so pad with an extra page */
1216
1217 rc = PDMDevHlpPCIIORegionRegister(pDevIns, VIRTIOSCSI_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + 0x1000, 0x1000),
1218 PCI_ADDRESS_SPACE_MEM, virtioR3Map);
1219 if (RT_FAILURE(rc))
1220 {
1221 RTMemFree(pVirtio->pPrevDevSpecificCfg);
1222 RTMemFree(pVirtio);
1223 return PDMDEV_SET_ERROR(pDevIns, rc,
1224 N_("virtio: cannot register PCI Capabilities address space"));
1225 }
1226 *phVirtio = (VIRTIOHANDLE)pVirtio;
1227 return rc;
1228}
1229
1230#ifdef VBOX_DEVICE_STRUCT_TESTCASE
1231# define virtioDumpState(x, s) do {} while (0)
1232#else
1233# ifdef DEBUG
1234
1235static void virtioDumpState(PVIRTIOSTATE pVirtio, const char *pcszCaller)
1236{
1237 Log2Func(("(called from %s)\n"
1238 " uDeviceFeatures = 0x%08x\n uDriverFeatures = 0x%08x\n"
1239 " uDeviceFeaturesSelect = 0x%04x\n uGuestFeaturesSelect = 0x%04x\n"
1240 " uDeviceStatus = 0x%02x\n uConfigGeneration = 0x%02x\n"
1241 " uQueueSelect = 0x%04x\n uNumQueues = 0x%04x\n"
1242 " uISR = 0x%02x\n fGenUpdatePending = 0x%02x\n"
1243 " uPciCfgDataOff = 0x%02x\n pGcPhysPciCapBase = %RGp\n"
1244 " pGcPhysCommonCfg = %RGp\n pGcPhysNotifyCap = %RGp\n"
1245 " pGcPhysIsrCap = %RGp\n pGcPhysDeviceCap = %RGp\n"
1246 " pDevSpecificCap = %p\n cbDevSpecificCap = 0x%04x\n"
1247 " pfnVirtioStatusChanged = %p\n pfnVirtioQueueNotified = %p\n"
1248 " pfnVirtioDevCapRead = %p\n pfnVirtioDevCapWrite = %p\n"
1249 " pfnSSMDevLiveExec = %p\n pfnSSMDevSaveExec = %p\n"
1250 " pfnSSMDevLoadExec = %p\n pfnSSMDevLoadDone = %p\n"
1251 " pfnPciConfigReadOld = %p\n pfnPciConfigWriteOld = %p\n",
1252 pcszCaller ? pcszCaller : "<unspecified>",
1253 pVirtio->uDeviceFeatures, pVirtio->uDriverFeatures, pVirtio->uDeviceFeaturesSelect,
1254 pVirtio->uDriverFeaturesSelect, pVirtio->uDeviceStatus, pVirtio->uConfigGeneration,
1255 pVirtio->uQueueSelect, pVirtio->uNumQueues, pVirtio->uISR, pVirtio->fGenUpdatePending,
1256 pVirtio->uPciCfgDataOff, pVirtio->pGcPhysPciCapBase, pVirtio->pGcPhysCommonCfg,
1257 pVirtio->pGcPhysNotifyCap, pVirtio->pGcPhysIsrCap, pVirtio->pGcPhysDeviceCap,
1258 pVirtio->pDevSpecificCfg, pVirtio->cbDevSpecificCfg, pVirtio->virtioCallbacks.pfnVirtioStatusChanged,
1259 pVirtio->virtioCallbacks.pfnVirtioQueueNotified, pVirtio->virtioCallbacks.pfnVirtioDevCapRead,
1260 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite, pVirtio->virtioCallbacks.pfnSSMDevLiveExec,
1261 pVirtio->virtioCallbacks.pfnSSMDevSaveExec, pVirtio->virtioCallbacks.pfnSSMDevLoadExec,
1262 pVirtio->virtioCallbacks.pfnSSMDevLoadDone, pVirtio->pfnPciConfigReadOld,
1263 pVirtio->pfnPciConfigWriteOld
1264 ));
1265
1266 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1267 {
1268 Log2Func(("%s queue:\n",
1269 " virtqProxy[%u].uAvailIdx = %u\n virtqProxy[%u].uUsedIdx = %u\n"
1270 " uQueueSize[%u] = %u\n uQueueNotifyOff[%u] = %04x\n"
1271 " uQueueMsixVector[%u] = %04x\n uQueueEnable[%u] = %04x\n"
1272 " pGcPhysQueueDesc[%u] = %RGp\n pGcPhysQueueAvail[%u] = %RGp\n"
1273 " pGcPhysQueueUsed[%u] = %RGp\n",
1274 i, pVirtio->virtqProxy[i].szVirtqName, i, pVirtio->virtqProxy[i].uAvailIdx,
1275 i, pVirtio->virtqProxy[i].uUsedIdx, i, pVirtio->uQueueSize[i],
1276 i, pVirtio->uQueueNotifyOff[i],i, pVirtio->uQueueMsixVector[i],
1277 i, pVirtio->uQueueEnable[i], i, pVirtio->pGcPhysQueueDesc[i],
1278 i, pVirtio->pGcPhysQueueAvail[i], i, pVirtio->pGcPhysQueueUsed[i]
1279 ));
1280 }
1281}
1282# endif
1283#endif
1284
1285#ifdef IN_RING3
1286
1287 /** @callback_method_impl{FNSSMDEVSAVEEXEC} */
1288static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
1289{
1290 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1291
1292 int rc = VINF_SUCCESS;
1293 virtioDumpState(pVirtio, "virtioSaveExec");
1294
1295 rc = SSMR3PutBool(pSSM, pVirtio->fGenUpdatePending);
1296 rc = SSMR3PutU8(pSSM, pVirtio->uDeviceStatus);
1297 rc = SSMR3PutU8(pSSM, pVirtio->uConfigGeneration);
1298 rc = SSMR3PutU8(pSSM, pVirtio->uPciCfgDataOff);
1299 rc = SSMR3PutU8(pSSM, pVirtio->uISR);
1300 rc = SSMR3PutU16(pSSM, pVirtio->uQueueSelect);
1301 rc = SSMR3PutU32(pSSM, pVirtio->uDeviceFeaturesSelect);
1302 rc = SSMR3PutU32(pSSM, pVirtio->uDriverFeaturesSelect);
1303 rc = SSMR3PutU32(pSSM, pVirtio->uNumQueues);
1304 rc = SSMR3PutU32(pSSM, pVirtio->cbDevSpecificCfg);
1305 rc = SSMR3PutU64(pSSM, pVirtio->uDeviceFeatures);
1306 rc = SSMR3PutU64(pSSM, pVirtio->uDriverFeatures);
1307 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pDevSpecificCfg);
1308 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioStatusChanged);
1309 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioQueueNotified);
1310 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioDevCapRead);
1311 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioDevCapWrite);
1312 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLiveExec);
1313 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevSaveExec);
1314 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLoadExec);
1315 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLoadDone);
1316 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pfnPciConfigReadOld);
1317 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pfnPciConfigWriteOld);
1318 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysCommonCfg);
1319 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysNotifyCap);
1320 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysIsrCap);
1321 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysDeviceCap);
1322 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysPciCapBase);
1323
1324 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1325 {
1326 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueDesc[i]);
1327 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueAvail[i]);
1328 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueUsed[i]);
1329 rc = SSMR3PutU16(pSSM, pVirtio->uQueueNotifyOff[i]);
1330 rc = SSMR3PutU16(pSSM, pVirtio->uQueueMsixVector[i]);
1331 rc = SSMR3PutU16(pSSM, pVirtio->uQueueEnable[i]);
1332 rc = SSMR3PutU16(pSSM, pVirtio->uQueueSize[i]);
1333 rc = SSMR3PutU16(pSSM, pVirtio->virtqProxy[i].uAvailIdx);
1334 rc = SSMR3PutU16(pSSM, pVirtio->virtqProxy[i].uUsedIdx);
1335 rc = SSMR3PutMem(pSSM, pVirtio->virtqProxy[i].szVirtqName, 32);
1336 }
1337
1338 rc = pVirtio->virtioCallbacks.pfnSSMDevSaveExec(pDevIns, pSSM);
1339 return rc;
1340}
1341
1342 /** @callback_method_impl{FNSSMDEVLOADEXEC} */
1343static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1344{
1345 RT_NOREF(uVersion);
1346
1347 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1348
1349 int rc = VINF_SUCCESS;
1350 virtioDumpState(pVirtio, "virtioLoadExec");
1351
1352 if (uPass == SSM_PASS_FINAL)
1353 {
1354 rc = SSMR3GetBool(pSSM, &pVirtio->fGenUpdatePending);
1355 rc = SSMR3GetU8(pSSM, &pVirtio->uDeviceStatus);
1356 rc = SSMR3GetU8(pSSM, &pVirtio->uConfigGeneration);
1357 rc = SSMR3GetU8(pSSM, &pVirtio->uPciCfgDataOff);
1358 rc = SSMR3GetU8(pSSM, &pVirtio->uISR);
1359 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueSelect);
1360 rc = SSMR3GetU32(pSSM, &pVirtio->uDeviceFeaturesSelect);
1361 rc = SSMR3GetU32(pSSM, &pVirtio->uDriverFeaturesSelect);
1362 rc = SSMR3GetU32(pSSM, &pVirtio->uNumQueues);
1363 rc = SSMR3GetU32(pSSM, &pVirtio->cbDevSpecificCfg);
1364 rc = SSMR3GetU64(pSSM, &pVirtio->uDeviceFeatures);
1365 rc = SSMR3GetU64(pSSM, &pVirtio->uDriverFeatures);
1366 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pDevSpecificCfg);
1367 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioStatusChanged);
1368 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioQueueNotified);
1369 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioDevCapRead);
1370 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioDevCapWrite);
1371 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLiveExec);
1372 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevSaveExec);
1373 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLoadExec);
1374 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLoadDone);
1375 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pfnPciConfigReadOld);
1376 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pfnPciConfigWriteOld);
1377 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysCommonCfg);
1378 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysNotifyCap);
1379 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysIsrCap);
1380 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysDeviceCap);
1381 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysPciCapBase);
1382
1383 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1384 {
1385 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueDesc[i]);
1386 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueAvail[i]);
1387 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueUsed[i]);
1388 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueNotifyOff[i]);
1389 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueMsixVector[i]);
1390 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueEnable[i]);
1391 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueSize[i]);
1392 rc = SSMR3GetU16(pSSM, &pVirtio->virtqProxy[i].uAvailIdx);
1393 rc = SSMR3GetU16(pSSM, &pVirtio->virtqProxy[i].uUsedIdx);
1394 rc = SSMR3GetMem(pSSM, (void *)&pVirtio->virtqProxy[i].szVirtqName, 32);
1395 }
1396 }
1397
1398 rc = pVirtio->virtioCallbacks.pfnSSMDevLoadExec(pDevIns, pSSM, uVersion, uPass);
1399
1400 return rc;
1401}
1402
1403/** @callback_method_impl{FNSSMDEVLOADDONE} */
1404static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
1405{
1406 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1407
1408 int rc = VINF_SUCCESS;
1409 virtioDumpState(pVirtio, "virtioLoadDone");
1410
1411 rc = pVirtio->virtioCallbacks.pfnSSMDevLoadDone(pDevIns, pSSM);
1412
1413 return rc;
1414}
1415
1416/** @callback_method_impl{FNSSMDEVLIVEEXEC} */
1417static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
1418{
1419 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1420
1421 int rc = VINF_SUCCESS;
1422 virtioDumpState(pVirtio, "virtioLiveExec");
1423
1424 rc = pVirtio->virtioCallbacks.pfnSSMDevLiveExec(pDevIns, pSSM, uPass);
1425
1426 return rc;
1427}
1428
1429 /**
1430 * Do a hex dump of a buffer
1431 *
1432 * @param pv Pointer to array to dump
1433 * @param cb Number of characters to dump
1434 * @param uBase Base address of offset addresses displayed
1435 * @param pszTitle Header line/title for the dump
1436 *
1437 */
1438 void virtioHexDump(uint8_t *pv, size_t cb, uint32_t uBase, const char *pszTitle)
1439 {
1440 if (pszTitle)
1441 Log(("%s [%d bytes]:\n", pszTitle, cb));
1442 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
1443 {
1444 Log(("%04x: ", row * 16 + uBase)); /* line address */
1445 for (uint8_t col = 0; col < 16; col++)
1446 {
1447 uint32_t idx = row * 16 + col;
1448 if (idx >= cb)
1449 Log(("-- %s", (col + 1) % 8 ? "" : " "));
1450 else
1451 Log(("%02x %s", pv[idx], (col + 1) % 8 ? "" : " "));
1452 }
1453 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
1454 Log(("%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.')));
1455 Log(("\n"));
1456 }
1457 Log(("\n"));
1458 }
1459
1460
1461/**
1462 * Formats the logging of a memory-mapped I/O input or output value
1463 *
1464 * @param pszFunc - To avoid displaying this function's name via __FUNCTION__ or Log2Func()
1465 * @param pszMember - Name of struct member
1466 * @param pv - Pointer to value
1467 * @param cb - Size of value
1468 * @param uOffset - Offset into member where value starts
1469 * @param fWrite - True if write I/O
1470 * @param fHasIndex - True if the member is indexed
1471 * @param idx - The index, if fHasIndex is true
1472 */
1473void virtioLogMappedIoValue(const char *pszFunc, const char *pszMember, size_t uMemberSize,
1474 const void *pv, uint32_t cb, uint32_t uOffset, bool fWrite,
1475 bool fHasIndex, uint32_t idx)
1476{
1477
1478#define FMTHEX(fmtout, val, cNybbles) \
1479 fmtout[cNybbles] = '\0'; \
1480 for (uint8_t i = 0; i < cNybbles; i++) \
1481 fmtout[(cNybbles - i) - 1] = "0123456789abcdef"[(val >> (i * 4)) & 0xf];
1482
1483#define MAX_STRING 64
1484 char pszIdx[MAX_STRING] = { 0 };
1485 char pszDepiction[MAX_STRING] = { 0 };
1486 char pszFormattedVal[MAX_STRING] = { 0 };
1487 if (fHasIndex)
1488 RTStrPrintf(pszIdx, sizeof(pszIdx), "[%d]", idx);
1489 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
1490 {
1491 /* manually padding with 0's instead of \b due to different impl of %x precision than printf() */
1492 uint64_t val = 0;
1493 memcpy((char *)&val, pv, cb);
1494 FMTHEX(pszFormattedVal, val, cb * 2);
1495 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
1496 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s[%d:%d]",
1497 pszMember, pszIdx, uOffset, uOffset + cb - 1);
1498 else
1499 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s", pszMember, pszIdx);
1500 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%-30s", pszDepiction);
1501 uint32_t first = 0;
1502 for (uint8_t i = 0; i < sizeof(pszDepiction); i++)
1503 if (pszDepiction[i] == ' ' && first++)
1504 pszDepiction[i] = '.';
1505 Log6Func(("%s: Guest %s %s 0x%s\n",
1506 pszFunc, fWrite ? "wrote" : "read ", pszDepiction, pszFormattedVal));
1507 }
1508 else /* odd number or oversized access, ... log inline hex-dump style */
1509 {
1510 Log6Func(("%s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
1511 pszFunc, fWrite ? "wrote" : "read ", pszMember,
1512 pszIdx, uOffset, uOffset + cb, cb, pv));
1513 }
1514}
1515
1516#endif /* IN_RING3 */
1517
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette