VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 80802

Last change on this file since 80802 was 80762, checked in by vboxsync, 6 years ago

Storage/DevVirtioSCSI.cpp: Added multi-target support. See bugref:9440, Comment #90

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 63.2 KB
Line 
1/* $Id: Virtio_1_0.cpp 80762 2019-09-13 02:33:47Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <iprt/param.h>
26#include <iprt/assert.h>
27#include <iprt/uuid.h>
28#include <iprt/mem.h>
29#include <iprt/assert.h>
30#include <iprt/sg.h>
31#include <VBox/vmm/pdmdev.h>
32#include "Virtio_1_0_impl.h"
33#include "Virtio_1_0.h"
34
35#define INSTANCE(pVirtio) pVirtio->szInstance
36#define QUEUENAME(qIdx) (pVirtio->virtqState[qIdx].szVirtqName)
37
38/**
39 * See API comments in header file for description
40 */
41void virtioVirtToSgPhys(VIRTIOHANDLE hVirtio, PRTSGBUF pSgDst, void *pvSrc, size_t cb)
42{
43 while (cb)
44 {
45 size_t cbSeg = cb;
46 RTGCPHYS GCPhys = (RTGCPHYS)RTSgBufGetNextSegment(pSgDst, &cbSeg);
47 PDMDevHlpPhysWrite(((PVIRTIOSTATE)hVirtio)->CTX_SUFF(pDevIns), GCPhys, pvSrc, cbSeg);
48 pvSrc = ((uint8_t *)pvSrc) + cbSeg;
49 cb -= cbSeg;
50 }
51}
52
53/**
54 * See API comments in header file for description
55 */
56void virtioSgPhysToVirt(VIRTIOHANDLE hVirtio, PRTSGBUF pSgSrc, void *pvDst, size_t cb)
57{
58 while (cb)
59 {
60 size_t cbSeg = cb;
61 RTGCPHYS GCPhys = (RTGCPHYS)RTSgBufGetNextSegment(pSgSrc, &cbSeg);
62 PDMDevHlpPhysRead(((PVIRTIOSTATE)hVirtio)->CTX_SUFF(pDevIns), GCPhys, pvDst, cbSeg);
63 pvDst = ((uint8_t *)pvDst) + cbSeg;
64 cb -= cbSeg;
65 }
66}
67
68/**
69 * See API comments in header file for description
70 */
71int virtioQueueAttach(VIRTIOHANDLE hVirtio, uint16_t qIdx, const char *pcszName)
72{
73 LogFunc(("%s\n", pcszName));
74 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
75 PVIRTQSTATE pVirtq = &(pVirtio->virtqState[qIdx]);
76 pVirtq->uAvailIdx = 0;
77 pVirtq->uUsedIdx = 0;
78 pVirtq->fEventThresholdReached = false;
79 RTStrCopy((char *)pVirtq->szVirtqName, sizeof(pVirtq->szVirtqName), pcszName);
80 return VINF_SUCCESS;
81
82}
83
84/**
85 * See API comments in header file for description
86 */
87const char *virtioQueueGetName(VIRTIOHANDLE hVirtio, uint16_t qIdx)
88{
89 return (const char *)((PVIRTIOSTATE)hVirtio)->virtqState[qIdx].szVirtqName;
90}
91
92/**
93 * See API comments in header file for description
94 */
95int virtioQueueSkip(VIRTIOHANDLE hVirtio, uint16_t qIdx)
96{
97 Assert(qIdx < sizeof(VIRTQSTATE));
98
99 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
100 PVIRTQSTATE pVirtq = &pVirtio->virtqState[qIdx];
101
102 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
103 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
104
105 if (virtioQueueIsEmpty(pVirtio, qIdx))
106 return VERR_NOT_AVAILABLE;
107
108 Log2Func(("%s avail_idx=%u\n", pVirtq->szVirtqName, pVirtq->uAvailIdx));
109 pVirtq->uAvailIdx++;
110
111 return VINF_SUCCESS;
112}
113
114/**
115 * See API comments in header file for description
116 */
117uint64_t virtioGetNegotiatedFeatures(VIRTIOHANDLE hVirtio)
118{
119 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
120 return pVirtio->uDriverFeatures;
121}
122
123/**
124 * See API comments in header file for description
125 */
126bool virtioQueueIsEmpty(VIRTIOHANDLE hVirtio, uint16_t qIdx)
127{
128 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
129 if (!(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK))
130 return true;
131 return virtqIsEmpty(pVirtio, qIdx);
132}
133
134/**
135 * See API comments in header file for description
136 */
137int virtioQueueGet(VIRTIOHANDLE hVirtio, uint16_t qIdx, PPVIRTIO_DESC_CHAIN_T ppDescChain, bool fRemove)
138{
139 Assert(ppDescChain);
140
141 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
142 PVIRTQSTATE pVirtq = &pVirtio->virtqState[qIdx];
143
144 PRTSGSEG paSegsIn = (PRTSGSEG)RTMemAlloc(VIRTQ_MAX_SIZE * sizeof(RTSGSEG));
145 AssertReturn(paSegsIn, VERR_NO_MEMORY);
146
147 PRTSGSEG paSegsOut = (PRTSGSEG)RTMemAlloc(VIRTQ_MAX_SIZE * sizeof(RTSGSEG));
148 AssertReturn(paSegsOut, VERR_NO_MEMORY);
149
150 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
151 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
152
153 if (virtqIsEmpty(pVirtio, qIdx))
154 return VERR_NOT_AVAILABLE;
155
156 uint16_t uHeadIdx = virtioReadAvailDescIdx(pVirtio, qIdx, pVirtq->uAvailIdx);
157 uint16_t uDescIdx = uHeadIdx;
158
159 Log6Func(("%s DESC CHAIN: (head) desc_idx=%u [avail_idx=%u]\n",
160 pVirtq->szVirtqName, uHeadIdx, pVirtq->uAvailIdx));
161
162 if (fRemove)
163 pVirtq->uAvailIdx++;
164
165 VIRTQ_DESC_T desc;
166
167 uint32_t cbIn = 0, cbOut = 0, cSegsIn = 0, cSegsOut = 0;
168
169 do
170 {
171 RTSGSEG *pSeg;
172
173 /**
174 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
175 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
176 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
177 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
178 */
179 if (cSegsIn + cSegsOut >= VIRTQ_MAX_SIZE)
180 {
181 static volatile uint32_t s_cMessages = 0;
182 static volatile uint32_t s_cThreshold = 1;
183 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
184 {
185 LogRel(("Too many linked descriptors; "
186 "check if the guest arranges descriptors in a loop.\n"));
187 if (ASMAtomicReadU32(&s_cMessages) != 1)
188 LogRel(("(the above error has occured %u times so far)\n",
189 ASMAtomicReadU32(&s_cMessages)));
190 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
191 }
192 break;
193 }
194 RT_UNTRUSTED_VALIDATED_FENCE();
195
196 virtioReadDesc(pVirtio, qIdx, uDescIdx, &desc);
197
198 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
199 {
200 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n",
201 QUEUENAME(qIdx), uDescIdx, cSegsIn, desc.pGcPhysBuf, desc.cb));
202 cbIn += desc.cb;
203 pSeg = &(paSegsIn[cSegsIn++]);
204 }
205 else
206 {
207 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n",
208 QUEUENAME(qIdx), uDescIdx, cSegsOut, desc.pGcPhysBuf, desc.cb));
209 cbOut += desc.cb;
210 pSeg = &(paSegsOut[cSegsOut++]);
211 }
212
213 pSeg->pvSeg = (void *)desc.pGcPhysBuf;
214 pSeg->cbSeg = desc.cb;
215
216 uDescIdx = desc.uDescIdxNext;
217 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
218
219
220 PRTSGBUF pSgPhysIn = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
221 AssertReturn(pSgPhysIn, VERR_NO_MEMORY);
222
223 RTSgBufInit(pSgPhysIn, (PCRTSGSEG)paSegsIn, cSegsIn);
224
225 void *pSgVirtOut = RTMemAlloc(cbOut);
226 AssertReturn(pSgVirtOut, VERR_NO_MEMORY);
227
228 if (cSegsOut)
229 {
230 RTSGBUF outSgPhys;
231 RTSgBufInit(&outSgPhys, (PCRTSGSEG)paSegsOut, cSegsOut);
232 virtioSgPhysToVirt((PVIRTIOSTATE)hVirtio, &outSgPhys, pSgVirtOut, cbOut);
233 RTMemFree(paSegsOut);
234 }
235
236 PVIRTIO_DESC_CHAIN_T pDescChain = (PVIRTIO_DESC_CHAIN_T)RTMemAllocZ(sizeof(VIRTIO_DESC_CHAIN_T));
237 AssertReturn(pDescChain, VERR_NO_MEMORY);
238
239 pDescChain->uHeadIdx = uHeadIdx;
240 pDescChain->cbVirtSrc = cbOut;
241 pDescChain->pVirtSrc = pSgVirtOut;
242 pDescChain->cbPhysDst = cbIn;
243 pDescChain->pSgPhysDst = pSgPhysIn;
244 *ppDescChain = pDescChain;
245
246 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
247 pVirtq->szVirtqName, cSegsOut, cbOut, cSegsIn, cbIn));
248
249 return VINF_SUCCESS;
250}
251
252 /** See API comments in header file prototype for description */
253int virtioQueuePut(VIRTIOHANDLE hVirtio, uint16_t qIdx, PRTSGBUF pSgVirtReturn,
254 PVIRTIO_DESC_CHAIN_T pDescChain, bool fFence)
255{
256 Assert(qIdx < VIRTQ_MAX_CNT);
257
258 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
259 PVIRTQSTATE pVirtq = &pVirtio->virtqState[qIdx];
260 PRTSGBUF pSgPhysReturn = pDescChain->pSgPhysDst;
261
262 AssertMsgReturn(DRIVER_OK(pVirtio) /*&& pVirtio->uQueueEnable[qIdx]*/,
263 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
264
265 uint16_t uUsedIdx = virtioReadUsedRingIdx(pVirtio, qIdx);
266 Log6Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
267 QUEUENAME(qIdx), uUsedIdx));
268
269 /*
270 * Copy virtual memory s/g buffer containing data to return to the guest
271 * to phys. memory described by (IN direction ) s/g buffer of the descriptor chain
272 * original pulled from the queue, to 'send back' to the guest driver.
273 */
274 size_t cbRemain = RTSgBufCalcTotalLength(pSgVirtReturn);
275 size_t cbCopy = 0;
276 while (cbRemain)
277 {
278 PCRTSGSEG paSeg = &pSgPhysReturn->paSegs[pSgPhysReturn->idxSeg];
279 uint64_t dstSgStart = (uint64_t)paSeg->pvSeg;
280 uint64_t dstSgLen = (uint64_t)paSeg->cbSeg;
281 uint64_t dstSgCur = (uint64_t)pSgPhysReturn->pvSegCur;
282 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, dstSgLen - (dstSgCur - dstSgStart));
283 PDMDevHlpPhysWrite(pVirtio->CTX_SUFF(pDevIns),
284 (RTGCPHYS)pSgPhysReturn->pvSegCur, pSgVirtReturn->pvSegCur, cbCopy);
285 RTSgBufAdvance(pSgVirtReturn, cbCopy);
286 RTSgBufAdvance(pSgPhysReturn, cbCopy);
287 cbRemain -= cbCopy;
288 }
289
290
291 if (fFence)
292 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
293
294 /** If this write-ahead crosses threshold where the driver wants to get an event flag it */
295 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
296 if (pVirtq->uUsedIdx == virtioReadAvailUsedEvent(pVirtio, qIdx))
297 pVirtq->fEventThresholdReached = true;
298
299 /**
300 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
301 * That will be done with a subsequent client call to virtioQueueSync() */
302 virtioWriteUsedElem(pVirtio, qIdx, pVirtq->uUsedIdx++, pDescChain->uHeadIdx, cbCopy);
303
304
305 if (LogIs2Enabled())
306 {
307 Log2Func((".... Copied %u bytes to %u byte buffer, residual=%d\n",
308 cbCopy, pDescChain->cbPhysDst, pDescChain->cbPhysDst - cbCopy));
309 }
310 Log6Func(("Write ahead used_idx=%d, %s used_idx=%d\n",
311 pVirtq->uUsedIdx, QUEUENAME(qIdx), uUsedIdx));
312
313 RTMemFree((void *)pSgPhysReturn->paSegs);
314 RTMemFree(pSgPhysReturn);
315 RTMemFree(pDescChain);
316
317 return VINF_SUCCESS;
318}
319
320/**
321 * See API comments in header file for description
322 */
323int virtioQueueSync(VIRTIOHANDLE hVirtio, uint16_t qIdx)
324{
325 Assert(qIdx < sizeof(VIRTQSTATE));
326
327 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
328 PVIRTQSTATE pVirtq = &pVirtio->virtqState[qIdx];
329
330 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
331 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
332
333 uint16_t uIdx = virtioReadUsedRingIdx(pVirtio, qIdx);
334 Log6Func(("Updating %s used_idx from %u to %u\n",
335 QUEUENAME(qIdx), uIdx, pVirtq->uUsedIdx));
336
337 virtioWriteUsedRingIdx(pVirtio, qIdx, pVirtq->uUsedIdx);
338 virtioNotifyGuestDriver(pVirtio, qIdx, false);
339
340 return VINF_SUCCESS;
341}
342
343/**
344 * See API comments in header file for description
345 */
346static void virtioQueueNotified(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uNotifyIdx)
347{
348 Assert(uNotifyIdx == qIdx);
349
350 PVIRTQSTATE pVirtq = &pVirtio->virtqState[qIdx];
351 Log6Func(("%s\n", pVirtq->szVirtqName));
352
353 /** Inform client */
354 pVirtio->virtioCallbacks.pfnVirtioQueueNotified((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext, qIdx);
355}
356
357/**
358 * See API comments in header file for description
359 */
360void virtioPropagateResumeNotification(VIRTIOHANDLE hVirtio)
361{
362 virtioNotifyGuestDriver((PVIRTIOSTATE)hVirtio, (uint16_t)NULL /* qIdx */, true /* fForce */);
363}
364
365/**
366 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
367 * the specified virtq, depending on the interrupt configuration of the device
368 * and depending on negotiated and realtime constraints flagged by the guest driver.
369 * See VirtIO 1.0 specification (section 2.4.7).
370 *
371 * @param pVirtio - Instance state
372 * @param qIdx - Queue to check for guest interrupt handling preference
373 * @param fForce - Overrides qIdx, forcing notification regardless of driver's
374 * notification preferences. This is a safeguard to prevent
375 * stalls upon resuming the VM. VirtIO 1.0 specification Section 4.1.5.5
376 * indicates spurious interrupts are harmless to guest driver's state,
377 * as they only cause the guest driver to [re]scan queues for work to do.
378 */
379static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t qIdx, bool fForce)
380{
381 PVIRTQSTATE pVirtq = &pVirtio->virtqState[qIdx];
382
383 AssertMsgReturnVoid(DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"));
384
385 if (pVirtio->uMsixConfig == VIRTIO_MSI_NO_VECTOR)
386 {
387 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
388 {
389 if (pVirtq->fEventThresholdReached)
390 {
391 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, fForce);
392 pVirtq->fEventThresholdReached = false;
393 return;
394 }
395 Log6Func(("...skipping interrupt: VIRTIO_F_EVENT_IDX set but threshold not reached\n"));
396 }
397 else
398 {
399 /** If guest driver hasn't suppressed interrupts, interrupt */
400 if (fForce || !(virtioReadUsedFlags(pVirtio, qIdx) & VIRTQ_AVAIL_F_NO_INTERRUPT))
401 {
402 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, fForce);
403 return;
404 }
405 Log6Func(("...skipping interrupt. Guest flagged VIRTQ_AVAIL_F_NO_INTERRUPT for queue\n"));
406 }
407 }
408 else
409 {
410 /* TBD, do MSI notification if criteria met */
411 }
412}
413
414/**
415 * NOTE: The consumer (PDM device) must call this function to 'forward' a relocation call.
416 *
417 * Device relocation callback.
418 *
419 * When this callback is called the device instance data, and if the
420 * device have a GC component, is being relocated, or/and the selectors
421 * have been changed. The device must use the chance to perform the
422 * necessary pointer relocations and data updates.
423 *
424 * Before the GC code is executed the first time, this function will be
425 * called with a 0 delta so GC pointer calculations can be one in one place.
426 *
427 * @param pDevIns Pointer to the device instance.
428 * @param offDelta The relocation delta relative to the old location.
429 *
430 * @remark A relocation CANNOT fail.
431 */
432void virtioRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
433{
434 RT_NOREF(offDelta);
435 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
436 LogFunc(("\n"));
437
438 pVirtio->pDevInsR3 = pDevIns;
439 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
440 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
441}
442
443/**
444 * Raise interrupt.
445 *
446 * @param pVirtio The device state structure.
447 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
448 */
449static int virtioRaiseInterrupt(PVIRTIOSTATE pVirtio, uint8_t uCause, bool fForce)
450{
451
452 if (fForce)
453 Log6Func(("reason: resumed after suspend\n"));
454 else
455 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
456 Log6Func(("reason: buffer added to 'used' ring.\n"));
457 else
458 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
459 Log6Func(("reason: device config change\n"));
460
461 pVirtio->uISR |= uCause;
462 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, 1);
463 return VINF_SUCCESS;
464}
465
466/**
467 * Lower interrupt. (Called when guest reads ISR)
468 *
469 * @param pVirtio The device state structure.
470 */
471static void virtioLowerInterrupt(PVIRTIOSTATE pVirtio)
472{
473 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, 0);
474}
475
476static void virtioResetQueue(PVIRTIOSTATE pVirtio, uint16_t qIdx)
477{
478 PVIRTQSTATE pVirtQ = &pVirtio->virtqState[qIdx];
479 pVirtQ->uAvailIdx = 0;
480 pVirtQ->uUsedIdx = 0;
481 pVirtio->uQueueEnable[qIdx] = false;
482 pVirtio->uQueueSize[qIdx] = VIRTQ_MAX_SIZE;
483 pVirtio->uQueueNotifyOff[qIdx] = qIdx;
484}
485
486
487static void virtioResetDevice(PVIRTIOSTATE pVirtio)
488{
489 Log2Func(("\n"));
490 pVirtio->uDeviceFeaturesSelect = 0;
491 pVirtio->uDriverFeaturesSelect = 0;
492 pVirtio->uConfigGeneration = 0;
493 pVirtio->uDeviceStatus = 0;
494 pVirtio->uISR = 0;
495
496#ifndef MSIX_SUPPORT
497 /** This is required by VirtIO 1.0 specification, section 4.1.5.1.2 */
498 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
499 for (int i = 0; i < VIRTQ_MAX_CNT; i++)
500 pVirtio->uQueueMsixVector[i] = VIRTIO_MSI_NO_VECTOR;
501#endif
502
503 pVirtio->uNumQueues = VIRTQ_MAX_CNT;
504 for (uint16_t qIdx = 0; qIdx < pVirtio->uNumQueues; qIdx++)
505 virtioResetQueue(pVirtio, qIdx);
506}
507
508/**
509 * See API comments in header file for description
510 */
511bool virtioIsQueueEnabled(VIRTIOHANDLE hVirtio, uint16_t qIdx)
512{
513 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
514 return pVirtio->uQueueEnable[qIdx];
515}
516
517/**
518 * See API comments in header file for description
519 */
520void virtioQueueEnable(VIRTIOHANDLE hVirtio, uint16_t qIdx, bool fEnabled)
521{
522 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
523 if (fEnabled)
524 pVirtio->uQueueSize[qIdx] = VIRTQ_MAX_SIZE;
525 else
526 pVirtio->uQueueSize[qIdx] = 0;
527}
528
529/**
530 * Initiate orderly reset procedure.
531 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
532 */
533void virtioResetAll(VIRTIOHANDLE hVirtio)
534{
535 LogFunc(("VIRTIO RESET REQUESTED!!!\n"));
536 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
537 pVirtio->uDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
538 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
539 {
540 pVirtio->fGenUpdatePending = true;
541 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_DEVICE_CONFIG, false /* fForce */);
542 }
543}
544
545/**
546 * Invoked by this implementation when guest driver resets the device.
547 * The driver itself will not reset until the device has read the status change.
548 */
549static void virtioGuestResetted(PVIRTIOSTATE pVirtio)
550{
551 LogFunc(("Guest reset the device\n"));
552
553 /** Let the client know */
554 pVirtio->virtioCallbacks.pfnVirtioStatusChanged((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext, false);
555 virtioResetDevice(pVirtio);
556}
557
558/**
559 * Handle accesses to Common Configuration capability
560 *
561 * @returns VBox status code
562 *
563 * @param pVirtio Virtio instance state
564 * @param fWrite If write access (otherwise read access)
565 * @param pv Pointer to location to write to or read from
566 * @param cb Number of bytes to read or write
567 */
568static int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv)
569{
570 int rc = VINF_SUCCESS;
571 uint64_t val;
572 if (MATCH_COMMON_CFG(uDeviceFeatures))
573 {
574 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
575 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
576 else /* Guest READ pCommonCfg->uDeviceFeatures */
577 {
578 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures);
579 switch(pVirtio->uDeviceFeaturesSelect)
580 {
581 case 0:
582 val = pVirtio->uDeviceFeatures & 0xffffffff;
583 memcpy((void *)pv, (const void *)&val, cb);
584 LOG_COMMON_CFG_ACCESS(uDeviceFeatures);
585 break;
586 case 1:
587 val = (pVirtio->uDeviceFeatures >> 32) & 0xffffffff;
588 uIntraOff += 4;
589 memcpy((void *)pv, (const void *)&val, cb);
590 LOG_COMMON_CFG_ACCESS(uDeviceFeatures);
591 break;
592 default:
593 LogFunc(("Guest read uDeviceFeatures with out of range selector (%d), returning 0\n",
594 pVirtio->uDeviceFeaturesSelect));
595 return VERR_ACCESS_DENIED;
596 }
597 }
598 }
599 else if (MATCH_COMMON_CFG(uDriverFeatures))
600 {
601 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
602 {
603 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
604 switch(pVirtio->uDriverFeaturesSelect)
605 {
606 case 0:
607 memcpy(&pVirtio->uDriverFeatures, pv, cb);
608 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
609 break;
610 case 1:
611 memcpy(((char *)&pVirtio->uDriverFeatures) + sizeof(uint32_t), pv, cb);
612 uIntraOff += 4;
613 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
614 break;
615 default:
616 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%d), returning 0\n",
617 pVirtio->uDriverFeaturesSelect));
618 return VERR_ACCESS_DENIED;
619 }
620 }
621 else /* Guest READ pCommonCfg->udriverFeatures */
622 {
623 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
624 switch(pVirtio->uDriverFeaturesSelect)
625 {
626 case 0:
627 val = pVirtio->uDriverFeatures & 0xffffffff;
628 memcpy((void *)pv, (const void *)&val, cb);
629 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
630 break;
631 case 1:
632 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
633 uIntraOff += 4;
634 memcpy((void *)pv, (const void *)&val, cb);
635 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
636 break;
637 default:
638 LogFunc(("Guest read uDriverFeatures with out of range selector (%d), returning 0\n",
639 pVirtio->uDriverFeaturesSelect));
640 return VERR_ACCESS_DENIED;
641 }
642 }
643 }
644 else if (MATCH_COMMON_CFG(uNumQueues))
645 {
646 if (fWrite)
647 {
648 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
649 return VERR_ACCESS_DENIED;
650 }
651 else
652 {
653 uint32_t uIntraOff = 0;
654 *(uint16_t *)pv = VIRTQ_MAX_CNT;
655 LOG_COMMON_CFG_ACCESS(uNumQueues);
656 }
657 }
658 else if (MATCH_COMMON_CFG(uDeviceStatus))
659 {
660 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
661 {
662 pVirtio->uDeviceStatus = *(uint8_t *)pv;
663 Log6Func(("Guest wrote uDeviceStatus ................ ("));
664 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
665 Log6((")\n"));
666 if (pVirtio->uDeviceStatus == 0)
667 virtioGuestResetted(pVirtio);
668 /**
669 * Notify client only if status actually changed from last time.
670 */
671 bool fOkayNow = pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
672 bool fWasOkay = pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
673 if ((fOkayNow && !fWasOkay) || (!fOkayNow && fWasOkay))
674 pVirtio->virtioCallbacks.pfnVirtioStatusChanged((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext,
675 pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
676 pVirtio->uPrevDeviceStatus = pVirtio->uDeviceStatus;
677 }
678 else /* Guest READ pCommonCfg->uDeviceStatus */
679 {
680 Log6Func(("Guest read uDeviceStatus ................ ("));
681 *(uint32_t *)pv = pVirtio->uDeviceStatus;
682 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
683 Log6((")\n"));
684 }
685 }
686 else
687 if (MATCH_COMMON_CFG(uMsixConfig))
688 COMMON_CFG_ACCESSOR(uMsixConfig);
689 else
690 if (MATCH_COMMON_CFG(uDeviceFeaturesSelect))
691 COMMON_CFG_ACCESSOR(uDeviceFeaturesSelect);
692 else
693 if (MATCH_COMMON_CFG(uDriverFeaturesSelect))
694 COMMON_CFG_ACCESSOR(uDriverFeaturesSelect);
695 else
696 if (MATCH_COMMON_CFG(uConfigGeneration))
697 COMMON_CFG_ACCESSOR_READONLY(uConfigGeneration);
698 else
699 if (MATCH_COMMON_CFG(uQueueSelect))
700 COMMON_CFG_ACCESSOR(uQueueSelect);
701 else
702 if (MATCH_COMMON_CFG(uQueueSize))
703 COMMON_CFG_ACCESSOR_INDEXED(uQueueSize, pVirtio->uQueueSelect);
704 else
705 if (MATCH_COMMON_CFG(uQueueMsixVector))
706 COMMON_CFG_ACCESSOR_INDEXED(uQueueMsixVector, pVirtio->uQueueSelect);
707 else
708 if (MATCH_COMMON_CFG(uQueueEnable))
709 COMMON_CFG_ACCESSOR_INDEXED(uQueueEnable, pVirtio->uQueueSelect);
710 else
711 if (MATCH_COMMON_CFG(uQueueNotifyOff))
712 COMMON_CFG_ACCESSOR_INDEXED_READONLY(uQueueNotifyOff, pVirtio->uQueueSelect);
713 else
714 if (MATCH_COMMON_CFG(pGcPhysQueueDesc))
715 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueDesc, pVirtio->uQueueSelect);
716 else
717 if (MATCH_COMMON_CFG(pGcPhysQueueAvail))
718 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueAvail, pVirtio->uQueueSelect);
719 else
720 if (MATCH_COMMON_CFG(pGcPhysQueueUsed))
721 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueUsed, pVirtio->uQueueSelect);
722 else
723 {
724 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffset=%d, cb=%d\n",
725 fWrite ? "write" : "read ", uOffset, cb));
726 rc = VERR_ACCESS_DENIED;
727 }
728 return rc;
729}
730
731/**
732 * Memory mapped I/O Handler for PCI Capabilities read operations.
733 *
734 * @returns VBox status code.
735 *
736 * @param pDevIns The device instance.
737 * @param pvUser User argument.
738 * @param GCPhysAddr Physical address (in GC) where the read starts.
739 * @param pv Where to store the result.
740 * @param cb Number of bytes read.
741 */
742PDMBOTHCBDECL(int) virtioR3MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
743{
744 RT_NOREF(pvUser);
745 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
746 int rc = VINF_SUCCESS;
747
748 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
749 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
750 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsr);
751
752 if (fDevSpecific)
753 {
754 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
755 /*
756 * Callback to client to manage device-specific configuration.
757 */
758 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapRead(pDevIns, uOffset, pv, cb);
759
760 /*
761 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
762 * is READ it needs to be checked to see if it changed since the last time any part was read, in
763 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
764 */
765 bool fDevSpecificFieldChanged = !!memcmp((char *)pVirtio->pDevSpecificCfg + uOffset,
766 (char *)pVirtio->pPrevDevSpecificCfg + uOffset, cb);
767
768 memcpy(pVirtio->pPrevDevSpecificCfg, pVirtio->pDevSpecificCfg, pVirtio->cbDevSpecificCfg);
769
770 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
771 {
772 ++pVirtio->uConfigGeneration;
773 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
774 pVirtio->uConfigGeneration,
775 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
776 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
777 pVirtio->fGenUpdatePending = false;
778 }
779 }
780 else
781 if (fCommonCfg)
782 {
783 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
784 virtioCommonCfgAccessed(pVirtio, 0 /* fWrite */, uOffset, cb, pv);
785 }
786 else
787 if (fIsr && cb == sizeof(uint8_t))
788 {
789 *(uint8_t *)pv = pVirtio->uISR;
790 Log6Func(("Read and clear ISR\n"));
791 pVirtio->uISR = 0; /* VirtIO specification requires reads of ISR to clear it */
792 virtioLowerInterrupt(pVirtio);
793 }
794 else {
795 LogFunc(("Bad read access to mapped capabilities region:\n"
796 " pVirtio=%#p GCPhysAddr=%RGp cb=%u\n",
797 pVirtio, GCPhysAddr, pv, cb, pv, cb));
798 }
799 return rc;
800}
801/**
802 * Memory mapped I/O Handler for PCI Capabilities write operations.
803 *
804 * @returns VBox status code.
805 *
806 * @param pDevIns The device instance.
807 * @param pvUser User argument.
808 * @param GCPhysAddr Physical address (in GC) where the write starts.
809 * @param pv Where to fetch the result.
810 * @param cb Number of bytes to write.
811 */
812PDMBOTHCBDECL(int) virtioR3MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
813{
814 RT_NOREF(pvUser);
815 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
816 int rc = VINF_SUCCESS;
817
818 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
819 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
820 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsr);
821 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysNotifyCap, pVirtio->pNotifyCap, fNotify);
822
823 if (fDevSpecific)
824 {
825 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
826 /*
827 * Pass this MMIO write access back to the client to handle
828 */
829 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapWrite(pDevIns, uOffset, pv, cb);
830 }
831 else
832 if (fCommonCfg)
833 {
834 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
835 virtioCommonCfgAccessed(pVirtio, 1 /* fWrite */, uOffset, cb, pv);
836 }
837 else
838 if (fIsr && cb == sizeof(uint8_t))
839 {
840 pVirtio->uISR = *(uint8_t *)pv;
841 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
842 pVirtio->uISR & 0xff,
843 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
844 !!(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
845 }
846 else
847 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
848 if (fNotify && cb == sizeof(uint16_t))
849 {
850 uint32_t uNotifyBaseOffset = GCPhysAddr - pVirtio->pGcPhysNotifyCap;
851 uint16_t qIdx = uNotifyBaseOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
852 uint16_t uAvailDescIdx = *(uint16_t *)pv;
853 virtioQueueNotified(pVirtio, qIdx, uAvailDescIdx);
854 }
855 else
856 {
857 Log2Func(("Bad write access to mapped capabilities region:\n"
858 " pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n",
859 pVirtio, GCPhysAddr, pv, cb, pv, cb));
860 }
861 return rc;
862}
863
864/**
865 * @callback_method_impl{FNPCIIOREGIONMAP}
866 */
867static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
868 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
869{
870 RT_NOREF3(pPciDev, iRegion, enmType);
871 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
872 int rc = VINF_SUCCESS;
873
874 Assert(cb >= 32);
875
876 if (iRegion == VIRTIO_REGION_PCI_CAP)
877 {
878 /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
879 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
880 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
881 virtioR3MmioWrite, virtioR3MmioRead,
882 "virtio-scsi MMIO");
883
884 if (RT_FAILURE(rc))
885 {
886 Log2Func(("virtio: PCI Capabilities failed to map GCPhysAddr=%RGp cb=%RGp, region=%d\n",
887 GCPhysAddress, cb, iRegion));
888 return rc;
889 }
890 Log2Func(("virtio: PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp, region=%d\n",
891 GCPhysAddress, cb, iRegion));
892 pVirtio->pGcPhysPciCapBase = GCPhysAddress;
893 pVirtio->pGcPhysCommonCfg = GCPhysAddress + pVirtio->pCommonCfgCap->uOffset;
894 pVirtio->pGcPhysNotifyCap = GCPhysAddress + pVirtio->pNotifyCap->pciCap.uOffset;
895 pVirtio->pGcPhysIsrCap = GCPhysAddress + pVirtio->pIsrCap->uOffset;
896 if (pVirtio->pPrevDevSpecificCfg)
897 pVirtio->pGcPhysDeviceCap = GCPhysAddress + pVirtio->pDeviceCap->uOffset;
898 }
899 return rc;
900}
901
902/**
903 * Callback function for reading from the PCI configuration space.
904 *
905 * @returns The register value.
906 * @param pDevIns Pointer to the device instance the PCI device
907 * belongs to.
908 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
909 * @param uAddress The configuration space register address. [0..4096]
910 * @param cb The register size. [1,2,4]
911 *
912 * @remarks Called with the PDM lock held. The device lock is NOT take because
913 * that is very likely be a lock order violation.
914 */
915static DECLCALLBACK(uint32_t) virtioPciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
916 uint32_t uAddress, unsigned cb)
917{
918 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
919
920 if (uAddress == (uint64_t)&pVirtio->pPciCfgCap->uPciCfgData)
921 {
922 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
923 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
924 * (the virtio_pci_cfg_cap capability), and access data items. */
925 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
926 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
927 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
928 uint32_t pv = 0;
929 if (uBar == VIRTIO_REGION_PCI_CAP)
930 (void)virtioR3MmioRead(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->pGcPhysPciCapBase + uOffset),
931 &pv, uLength);
932 else
933 {
934 Log2Func(("Guest read virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
935 return 0;
936 }
937 Log2Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d\n",
938 uBar, uOffset, uLength, pv));
939 return pv;
940 }
941 return pVirtio->pfnPciConfigReadOld(pDevIns, pPciDev, uAddress, cb);
942}
943
944/**
945 * Callback function for writing to the PCI configuration space.
946 *
947 * @returns VINF_SUCCESS or PDMDevHlpDBGFStop status.
948 *
949 * @param pDevIns Pointer to the device instance the PCI device
950 * belongs to.
951 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
952 * @param uAddress The configuration space register address. [0..4096]
953 * @param u32Value The value that's being written. The number of bits actually used from
954 * this value is determined by the cb parameter.
955 * @param cb The register size. [1,2,4]
956 *
957 * @remarks Called with the PDM lock held. The device lock is NOT take because
958 * that is very likely be a lock order violation.
959 */
960static DECLCALLBACK(VBOXSTRICTRC) virtioPciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
961 uint32_t uAddress, uint32_t u32Value, unsigned cb)
962{
963 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
964
965 if (uAddress == pVirtio->uPciCfgDataOff)
966 {
967 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
968 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
969 * (the virtio_pci_cfg_cap capability), and access data items. */
970 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
971 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
972 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
973 if (uBar == VIRTIO_REGION_PCI_CAP)
974 (void)virtioR3MmioWrite(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->pGcPhysPciCapBase + uOffset),
975 (void *)&u32Value, uLength);
976 else
977 {
978 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
979 return VINF_SUCCESS;
980 }
981 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d\n",
982 uBar, uOffset, uLength, u32Value));
983 return VINF_SUCCESS;
984 }
985 return pVirtio->pfnPciConfigWriteOld(pDevIns, pPciDev, uAddress, u32Value, cb);
986}
987
988/**
989 * Get VirtIO accepted host-side features
990 *
991 * @returns feature bits selected or 0 if selector out of range.
992 *
993 * @param pState Virtio state
994 */
995uint64_t virtioGetAcceptedFeatures(PVIRTIOSTATE pVirtio)
996{
997 return pVirtio->uDriverFeatures;
998}
999
1000/**
1001 * Destruct PCI-related part of device.
1002 *
1003 * We need to free non-VM resources only.
1004 *
1005 * @returns VBox status code.
1006 * @param pState The device state structure.
1007 */
1008int virtioDestruct(PVIRTIOSTATE pVirtio)
1009{
1010 RT_NOREF(pVirtio);
1011 Log(("%s Destroying PCI instance\n", INSTANCE(pVirtio)));
1012 return VINF_SUCCESS;
1013}
1014
1015/**
1016 * Setup PCI device controller and Virtio state
1017 *
1018 * @param pDevIns Device instance data
1019 * @param pClientContext Opaque client context (such as state struct, ...)
1020 * @param pVirtio Device State
1021 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1022 * @param pcszInstance Device instance name (format-specifier)
1023 * @param uDevSpecificFeatures VirtIO device-specific features offered by client
1024 * @param devCapReadCallback Client handler to call upon guest read to device specific capabilities.
1025 * @param devCapWriteCallback Client handler to call upon guest write to device specific capabilities.
1026 * @param devStatusChangedCallback Client handler to call for major device status changes
1027 * @param queueNotifiedCallback Client handler for guest-to-host notifications that avail queue has ring data
1028 * @param ssmLiveExecCallback Client handler for SSM live exec
1029 * @param ssmSaveExecCallback Client handler for SSM save exec
1030 * @param ssmLoadExecCallback Client handler for SSM load exec
1031 * @param ssmLoadDoneCallback Client handler for SSM load done
1032 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
1033 * @param pDevSpecificCfg Address of client's dev-specific configuration struct.
1034 */
1035int virtioConstruct(PPDMDEVINS pDevIns,
1036 void *pClientContext,
1037 VIRTIOHANDLE *phVirtio,
1038 PVIRTIOPCIPARAMS pPciParams,
1039 const char *pcszInstance,
1040 uint64_t uDevSpecificFeatures,
1041 PFNVIRTIODEVCAPREAD devCapReadCallback,
1042 PFNVIRTIODEVCAPWRITE devCapWriteCallback,
1043 PFNVIRTIOSTATUSCHANGED devStatusChangedCallback,
1044 PFNVIRTIOQUEUENOTIFIED queueNotifiedCallback,
1045 PFNSSMDEVLIVEEXEC ssmLiveExecCallback,
1046 PFNSSMDEVSAVEEXEC ssmSaveExecCallback,
1047 PFNSSMDEVLOADEXEC ssmLoadExecCallback,
1048 PFNSSMDEVLOADDONE ssmLoadDoneCallback,
1049 uint16_t cbDevSpecificCfg,
1050 void *pDevSpecificCfg)
1051{
1052
1053 int rc = VINF_SUCCESS;
1054
1055 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)RTMemAllocZ(sizeof(VIRTIOSTATE));
1056 if (!pVirtio)
1057 {
1058 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1059 return VERR_NO_MEMORY;
1060 }
1061
1062 pVirtio->pClientContext = pClientContext;
1063
1064 /*
1065 * The host features offered include both device-specific features
1066 * and reserved feature bits (device independent)
1067 */
1068 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1069 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1070 | uDevSpecificFeatures;
1071
1072 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1073
1074 pVirtio->pDevInsR3 = pDevIns;
1075 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
1076 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
1077 pVirtio->uDeviceStatus = 0;
1078 pVirtio->cbDevSpecificCfg = cbDevSpecificCfg;
1079 pVirtio->pDevSpecificCfg = pDevSpecificCfg;
1080
1081 pVirtio->pPrevDevSpecificCfg = RTMemAllocZ(cbDevSpecificCfg);
1082 if (!pVirtio->pPrevDevSpecificCfg)
1083 {
1084 RTMemFree(pVirtio);
1085 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1086 return VERR_NO_MEMORY;
1087 }
1088
1089 memcpy(pVirtio->pPrevDevSpecificCfg, pVirtio->pDevSpecificCfg, cbDevSpecificCfg);
1090 pVirtio->virtioCallbacks.pfnVirtioDevCapRead = devCapReadCallback;
1091 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite = devCapWriteCallback;
1092 pVirtio->virtioCallbacks.pfnVirtioStatusChanged = devStatusChangedCallback;
1093 pVirtio->virtioCallbacks.pfnVirtioQueueNotified = queueNotifiedCallback;
1094 pVirtio->virtioCallbacks.pfnSSMDevLiveExec = ssmLiveExecCallback;
1095 pVirtio->virtioCallbacks.pfnSSMDevSaveExec = ssmSaveExecCallback;
1096 pVirtio->virtioCallbacks.pfnSSMDevLoadExec = ssmLoadExecCallback;
1097 pVirtio->virtioCallbacks.pfnSSMDevLoadDone = ssmLoadDoneCallback;
1098
1099
1100 /* Set PCI config registers (assume 32-bit mode) */
1101 PCIDevSetRevisionId (&pVirtio->dev, DEVICE_PCI_REVISION_ID_VIRTIO);
1102 PCIDevSetVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1103 PCIDevSetSubSystemVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1104 PCIDevSetDeviceId (&pVirtio->dev, pPciParams->uDeviceId);
1105 PCIDevSetClassBase (&pVirtio->dev, pPciParams->uClassBase);
1106 PCIDevSetClassSub (&pVirtio->dev, pPciParams->uClassSub);
1107 PCIDevSetClassProg (&pVirtio->dev, pPciParams->uClassProg);
1108 PCIDevSetSubSystemId (&pVirtio->dev, pPciParams->uSubsystemId);
1109 PCIDevSetInterruptLine (&pVirtio->dev, pPciParams->uInterruptLine);
1110 PCIDevSetInterruptPin (&pVirtio->dev, pPciParams->uInterruptPin);
1111
1112 /* Register PCI device */
1113 rc = PDMDevHlpPCIRegister(pDevIns, &pVirtio->dev);
1114 if (RT_FAILURE(rc))
1115 {
1116 RTMemFree(pVirtio);
1117 return PDMDEV_SET_ERROR(pDevIns, rc,
1118 N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1119 }
1120
1121 rc = PDMDevHlpSSMRegisterEx(pDevIns, VIRTIO_SAVEDSTATE_VERSION, sizeof(*pVirtio), NULL,
1122 NULL, virtioR3LiveExec, NULL, NULL, virtioR3SaveExec, NULL,
1123 NULL, virtioR3LoadExec, virtioR3LoadDone);
1124 if (RT_FAILURE(rc))
1125 {
1126 RTMemFree(pVirtio);
1127 return PDMDEV_SET_ERROR(pDevIns, rc,
1128 N_("virtio: cannot register SSM callbacks"));
1129 }
1130
1131 PDMDevHlpPCISetConfigCallbacks(pDevIns, &pVirtio->dev,
1132 virtioPciConfigRead, &pVirtio->pfnPciConfigReadOld,
1133 virtioPciConfigWrite, &pVirtio->pfnPciConfigWriteOld);
1134
1135
1136 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1137
1138#if 0 && defined(VBOX_WITH_MSI_DEVICES) /* T.B.D. */
1139 uint8_t fMsiSupport = true;
1140#else
1141 uint8_t fMsiSupport = false;
1142#endif
1143
1144 /* The following capability mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CFG_CAP_T)
1145 * as a mandatory but suboptimal alternative interface to host device capabilities, facilitating
1146 * access the memory of any BAR. If the guest uses it (the VirtIO driver on Linux doesn't),
1147 * Unlike Common, Notify, ISR and Device capabilities, it is accessed directly via PCI Config region.
1148 * therefore does not contribute to the capabilities region (BAR) the other capabilities use.
1149 */
1150#define CFGADDR2IDX(addr) ((uint64_t)addr - (uint64_t)&pVirtio->dev.abConfig)
1151
1152 PVIRTIO_PCI_CAP_T pCfg;
1153 uint32_t cbRegion = 0;
1154
1155 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1156 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[0x40];
1157 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1158 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1159 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1160 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1161 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1162 pCfg->uOffset = RT_ALIGN_32(0, 4); /* reminder, in case someone changes offset */
1163 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1164 cbRegion += pCfg->uLength;
1165 pVirtio->pCommonCfgCap = pCfg;
1166
1167 /*
1168 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based the choice
1169 * of this implementation that each queue's uQueueNotifyOff is set equal to (QueueSelect) ordinal
1170 * value of the queue */
1171 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1172 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1173 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1174 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1175 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1176 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1177 pCfg->uOffset = pVirtio->pCommonCfgCap->uOffset + pVirtio->pCommonCfgCap->uLength;
1178 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 2);
1179 pCfg->uLength = VIRTQ_MAX_CNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1180 cbRegion += pCfg->uLength;
1181 pVirtio->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1182 pVirtio->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1183
1184 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1185 *
1186 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1187 * of spec shows it as a 32-bit field with upper bits 'reserved'
1188 * Will take spec words more literally than the diagram for now.
1189 */
1190 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1191 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1192 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1193 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1194 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1195 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1196 pCfg->uOffset = pVirtio->pNotifyCap->pciCap.uOffset + pVirtio->pNotifyCap->pciCap.uLength;
1197 pCfg->uLength = sizeof(uint8_t);
1198 cbRegion += pCfg->uLength;
1199 pVirtio->pIsrCap = pCfg;
1200
1201 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1202 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1203 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1204 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1205 * even list it as present if uLength isn't non-zero and 4-byte-aligned as the linux driver is
1206 * initializing. */
1207
1208 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1209 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1210 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1211 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1212 pCfg->uCapNext = (fMsiSupport || pVirtio->pDevSpecificCfg) ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1213 pCfg->uBar = 0;
1214 pCfg->uOffset = 0;
1215 pCfg->uLength = 0;
1216 cbRegion += pCfg->uLength;
1217 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1218
1219 if (pVirtio->pDevSpecificCfg)
1220 {
1221 /* Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
1222 * device-specific config fields struct and passes size to this constructor */
1223 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1224 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1225 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1226 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1227 pCfg->uCapNext = fMsiSupport ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1228 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1229 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1230 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1231 pCfg->uLength = cbDevSpecificCfg;
1232 cbRegion += pCfg->uLength;
1233 pVirtio->pDeviceCap = pCfg;
1234 }
1235
1236 /* Set offset to first capability and enable PCI dev capabilities */
1237 PCIDevSetCapabilityList (&pVirtio->dev, 0x40);
1238 PCIDevSetStatus (&pVirtio->dev, VBOX_PCI_STATUS_CAP_LIST);
1239
1240 if (fMsiSupport)
1241 {
1242 PDMMSIREG aMsiReg;
1243 RT_ZERO(aMsiReg);
1244 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1245 aMsiReg.iMsixNextOffset = 0;
1246 aMsiReg.iMsixBar = 0;
1247 aMsiReg.cMsixVectors = 1;
1248 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1249 if (RT_FAILURE (rc))
1250 /* The following is moot, we need to flag no MSI-X support */
1251 PCIDevSetCapabilityList(&pVirtio->dev, 0x40);
1252 }
1253
1254 /* Linux drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1255 * 'unknown' device-specific capability without querying the capability to figure
1256 * out size, so pad with an extra page */
1257
1258 rc = PDMDevHlpPCIIORegionRegister(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + 0x1000, 0x1000),
1259 PCI_ADDRESS_SPACE_MEM, virtioR3Map);
1260 if (RT_FAILURE(rc))
1261 {
1262 RTMemFree(pVirtio->pPrevDevSpecificCfg);
1263 RTMemFree(pVirtio);
1264 return PDMDEV_SET_ERROR(pDevIns, rc,
1265 N_("virtio: cannot register PCI Capabilities address space"));
1266 }
1267 *phVirtio = (VIRTIOHANDLE)pVirtio;
1268 return rc;
1269}
1270
1271#ifdef VBOX_DEVICE_STRUCT_TESTCASE
1272# define virtioDumpState(x, s) do {} while (0)
1273#else
1274# ifdef DEBUG
1275
1276static void virtioDumpState(PVIRTIOSTATE pVirtio, const char *pcszCaller)
1277{
1278 Log2Func(("(called from %s)\n"
1279 " uDeviceFeatures = 0x%08x\n uDriverFeatures = 0x%08x\n"
1280 " uDeviceFeaturesSelect = 0x%04x\n uGuestFeaturesSelect = 0x%04x\n"
1281 " uDeviceStatus = 0x%02x\n uConfigGeneration = 0x%02x\n"
1282 " uQueueSelect = 0x%04x\n uNumQueues = 0x%04x\n"
1283 " uISR = 0x%02x\n fGenUpdatePending = 0x%02x\n"
1284 " uPciCfgDataOff = 0x%02x\n pGcPhysPciCapBase = %RGp\n"
1285 " pGcPhysCommonCfg = %RGp\n pGcPhysNotifyCap = %RGp\n"
1286 " pGcPhysIsrCap = %RGp\n pGcPhysDeviceCap = %RGp\n"
1287 " pDevSpecificCap = %p\n cbDevSpecificCap = 0x%04x\n"
1288 " pfnVirtioStatusChanged = %p\n pfnVirtioQueueNotified = %p\n"
1289 " pfnVirtioDevCapRead = %p\n pfnVirtioDevCapWrite = %p\n"
1290 " pfnSSMDevLiveExec = %p\n pfnSSMDevSaveExec = %p\n"
1291 " pfnSSMDevLoadExec = %p\n pfnSSMDevLoadDone = %p\n"
1292 " pfnPciConfigReadOld = %p\n pfnPciConfigWriteOld = %p\n",
1293 pcszCaller ? pcszCaller : "<unspecified>",
1294 pVirtio->uDeviceFeatures, pVirtio->uDriverFeatures, pVirtio->uDeviceFeaturesSelect,
1295 pVirtio->uDriverFeaturesSelect, pVirtio->uDeviceStatus, pVirtio->uConfigGeneration,
1296 pVirtio->uQueueSelect, pVirtio->uNumQueues, pVirtio->uISR, pVirtio->fGenUpdatePending,
1297 pVirtio->uPciCfgDataOff, pVirtio->pGcPhysPciCapBase, pVirtio->pGcPhysCommonCfg,
1298 pVirtio->pGcPhysNotifyCap, pVirtio->pGcPhysIsrCap, pVirtio->pGcPhysDeviceCap,
1299 pVirtio->pDevSpecificCfg, pVirtio->cbDevSpecificCfg, pVirtio->virtioCallbacks.pfnVirtioStatusChanged,
1300 pVirtio->virtioCallbacks.pfnVirtioQueueNotified, pVirtio->virtioCallbacks.pfnVirtioDevCapRead,
1301 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite, pVirtio->virtioCallbacks.pfnSSMDevLiveExec,
1302 pVirtio->virtioCallbacks.pfnSSMDevSaveExec, pVirtio->virtioCallbacks.pfnSSMDevLoadExec,
1303 pVirtio->virtioCallbacks.pfnSSMDevLoadDone, pVirtio->pfnPciConfigReadOld,
1304 pVirtio->pfnPciConfigWriteOld
1305 ));
1306
1307 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1308 {
1309 Log2Func(("%s queue:\n",
1310 " virtqState[%u].uAvailIdx = %u\n virtqState[%u].uUsedIdx = %u\n"
1311 " uQueueSize[%u] = %u\n uQueueNotifyOff[%u] = %04x\n"
1312 " uQueueMsixVector[%u] = %04x\n uQueueEnable[%u] = %04x\n"
1313 " pGcPhysQueueDesc[%u] = %RGp\n pGcPhysQueueAvail[%u] = %RGp\n"
1314 " pGcPhysQueueUsed[%u] = %RGp\n",
1315 i, pVirtio->virtqState[i].szVirtqName, i, pVirtio->virtqState[i].uAvailIdx,
1316 i, pVirtio->virtqState[i].uUsedIdx, i, pVirtio->uQueueSize[i],
1317 i, pVirtio->uQueueNotifyOff[i],i, pVirtio->uQueueMsixVector[i],
1318 i, pVirtio->uQueueEnable[i], i, pVirtio->pGcPhysQueueDesc[i],
1319 i, pVirtio->pGcPhysQueueAvail[i], i, pVirtio->pGcPhysQueueUsed[i]
1320 ));
1321 }
1322}
1323# endif
1324#endif
1325
1326#ifdef IN_RING3
1327
1328 /** @callback_method_impl{FNSSMDEVSAVEEXEC} */
1329static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
1330{
1331 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1332
1333 int rc = VINF_SUCCESS;
1334 virtioDumpState(pVirtio, "virtioSaveExec");
1335
1336 rc = SSMR3PutBool(pSSM, pVirtio->fGenUpdatePending);
1337 rc = SSMR3PutU8(pSSM, pVirtio->uDeviceStatus);
1338 rc = SSMR3PutU8(pSSM, pVirtio->uConfigGeneration);
1339 rc = SSMR3PutU8(pSSM, pVirtio->uPciCfgDataOff);
1340 rc = SSMR3PutU8(pSSM, pVirtio->uISR);
1341 rc = SSMR3PutU16(pSSM, pVirtio->uQueueSelect);
1342 rc = SSMR3PutU32(pSSM, pVirtio->uDeviceFeaturesSelect);
1343 rc = SSMR3PutU32(pSSM, pVirtio->uDriverFeaturesSelect);
1344 rc = SSMR3PutU32(pSSM, pVirtio->uNumQueues);
1345 rc = SSMR3PutU32(pSSM, pVirtio->cbDevSpecificCfg);
1346 rc = SSMR3PutU64(pSSM, pVirtio->uDeviceFeatures);
1347 rc = SSMR3PutU64(pSSM, pVirtio->uDriverFeatures);
1348 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pDevSpecificCfg);
1349 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioStatusChanged);
1350 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioQueueNotified);
1351 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioDevCapRead);
1352 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioDevCapWrite);
1353 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLiveExec);
1354 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevSaveExec);
1355 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLoadExec);
1356 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLoadDone);
1357 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pfnPciConfigReadOld);
1358 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pfnPciConfigWriteOld);
1359 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysCommonCfg);
1360 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysNotifyCap);
1361 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysIsrCap);
1362 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysDeviceCap);
1363 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysPciCapBase);
1364
1365 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1366 {
1367 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueDesc[i]);
1368 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueAvail[i]);
1369 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueUsed[i]);
1370 rc = SSMR3PutU16(pSSM, pVirtio->uQueueNotifyOff[i]);
1371 rc = SSMR3PutU16(pSSM, pVirtio->uQueueMsixVector[i]);
1372 rc = SSMR3PutU16(pSSM, pVirtio->uQueueEnable[i]);
1373 rc = SSMR3PutU16(pSSM, pVirtio->uQueueSize[i]);
1374 rc = SSMR3PutU16(pSSM, pVirtio->virtqState[i].uAvailIdx);
1375 rc = SSMR3PutU16(pSSM, pVirtio->virtqState[i].uUsedIdx);
1376 rc = SSMR3PutMem(pSSM, pVirtio->virtqState[i].szVirtqName, 32);
1377 }
1378
1379 rc = pVirtio->virtioCallbacks.pfnSSMDevSaveExec(pDevIns, pSSM);
1380 return rc;
1381}
1382
1383 /** @callback_method_impl{FNSSMDEVLOADEXEC} */
1384static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1385{
1386 RT_NOREF(uVersion);
1387
1388 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1389
1390 int rc = VINF_SUCCESS;
1391 virtioDumpState(pVirtio, "virtioLoadExec");
1392
1393 if (uPass == SSM_PASS_FINAL)
1394 {
1395 rc = SSMR3GetBool(pSSM, &pVirtio->fGenUpdatePending);
1396 rc = SSMR3GetU8(pSSM, &pVirtio->uDeviceStatus);
1397 rc = SSMR3GetU8(pSSM, &pVirtio->uConfigGeneration);
1398 rc = SSMR3GetU8(pSSM, &pVirtio->uPciCfgDataOff);
1399 rc = SSMR3GetU8(pSSM, &pVirtio->uISR);
1400 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueSelect);
1401 rc = SSMR3GetU32(pSSM, &pVirtio->uDeviceFeaturesSelect);
1402 rc = SSMR3GetU32(pSSM, &pVirtio->uDriverFeaturesSelect);
1403 rc = SSMR3GetU32(pSSM, &pVirtio->uNumQueues);
1404 rc = SSMR3GetU32(pSSM, &pVirtio->cbDevSpecificCfg);
1405 rc = SSMR3GetU64(pSSM, &pVirtio->uDeviceFeatures);
1406 rc = SSMR3GetU64(pSSM, &pVirtio->uDriverFeatures);
1407 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pDevSpecificCfg);
1408 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioStatusChanged);
1409 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioQueueNotified);
1410 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioDevCapRead);
1411 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioDevCapWrite);
1412 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLiveExec);
1413 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevSaveExec);
1414 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLoadExec);
1415 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLoadDone);
1416 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pfnPciConfigReadOld);
1417 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pfnPciConfigWriteOld);
1418 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysCommonCfg);
1419 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysNotifyCap);
1420 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysIsrCap);
1421 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysDeviceCap);
1422 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysPciCapBase);
1423
1424 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1425 {
1426 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueDesc[i]);
1427 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueAvail[i]);
1428 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueUsed[i]);
1429 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueNotifyOff[i]);
1430 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueMsixVector[i]);
1431 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueEnable[i]);
1432 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueSize[i]);
1433 rc = SSMR3GetU16(pSSM, &pVirtio->virtqState[i].uAvailIdx);
1434 rc = SSMR3GetU16(pSSM, &pVirtio->virtqState[i].uUsedIdx);
1435 rc = SSMR3GetMem(pSSM, (void *)&pVirtio->virtqState[i].szVirtqName, 32);
1436 }
1437 }
1438
1439 rc = pVirtio->virtioCallbacks.pfnSSMDevLoadExec(pDevIns, pSSM, uVersion, uPass);
1440
1441 return rc;
1442}
1443
1444/** @callback_method_impl{FNSSMDEVLOADDONE} */
1445static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
1446{
1447 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1448
1449 int rc = VINF_SUCCESS;
1450 virtioDumpState(pVirtio, "virtioLoadDone");
1451
1452 rc = pVirtio->virtioCallbacks.pfnSSMDevLoadDone(pDevIns, pSSM);
1453
1454 return rc;
1455}
1456
1457/** @callback_method_impl{FNSSMDEVLIVEEXEC} */
1458static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
1459{
1460 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1461
1462 int rc = VINF_SUCCESS;
1463 virtioDumpState(pVirtio, "virtioLiveExec");
1464
1465 rc = pVirtio->virtioCallbacks.pfnSSMDevLiveExec(pDevIns, pSSM, uPass);
1466
1467 return rc;
1468}
1469
1470 /**
1471 * Do a hex dump of a buffer
1472 *
1473 * @param pv Pointer to array to dump
1474 * @param cb Number of characters to dump
1475 * @param uBase Base address of offset addresses displayed
1476 * @param pszTitle Header line/title for the dump
1477 *
1478 */
1479 void virtioHexDump(uint8_t *pv, size_t cb, uint32_t uBase, const char *pszTitle)
1480 {
1481 if (pszTitle)
1482 Log(("%s [%d bytes]:\n", pszTitle, cb));
1483 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
1484 {
1485 Log(("%04x: ", row * 16 + uBase)); /* line address */
1486 for (uint8_t col = 0; col < 16; col++)
1487 {
1488 uint32_t idx = row * 16 + col;
1489 if (idx >= cb)
1490 Log(("-- %s", (col + 1) % 8 ? "" : " "));
1491 else
1492 Log(("%02x %s", pv[idx], (col + 1) % 8 ? "" : " "));
1493 }
1494 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
1495 Log(("%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.')));
1496 Log(("\n"));
1497 }
1498 Log(("\n"));
1499 }
1500
1501
1502/**
1503 * Formats the logging of a memory-mapped I/O input or output value
1504 *
1505 * @param pszFunc - To avoid displaying this function's name via __FUNCTION__ or Log2Func()
1506 * @param pszMember - Name of struct member
1507 * @param pv - Pointer to value
1508 * @param cb - Size of value
1509 * @param uOffset - Offset into member where value starts
1510 * @param fWrite - True if write I/O
1511 * @param fHasIndex - True if the member is indexed
1512 * @param idx - The index, if fHasIndex is true
1513 */
1514void virtioLogMappedIoValue(const char *pszFunc, const char *pszMember, size_t uMemberSize,
1515 const void *pv, uint32_t cb, uint32_t uOffset, bool fWrite,
1516 bool fHasIndex, uint32_t idx)
1517{
1518
1519#define FMTHEX(fmtout, val, cNybbles) \
1520 fmtout[cNybbles] = '\0'; \
1521 for (uint8_t i = 0; i < cNybbles; i++) \
1522 fmtout[(cNybbles - i) - 1] = "0123456789abcdef"[(val >> (i * 4)) & 0xf];
1523
1524#define MAX_STRING 64
1525 char pszIdx[MAX_STRING] = { 0 };
1526 char pszDepiction[MAX_STRING] = { 0 };
1527 char pszFormattedVal[MAX_STRING] = { 0 };
1528 if (fHasIndex)
1529 RTStrPrintf(pszIdx, sizeof(pszIdx), "[%d]", idx);
1530 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
1531 {
1532 /* manually padding with 0's instead of \b due to different impl of %x precision than printf() */
1533 uint64_t val = 0;
1534 memcpy((char *)&val, pv, cb);
1535 FMTHEX(pszFormattedVal, val, cb * 2);
1536 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
1537 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s[%d:%d]",
1538 pszMember, pszIdx, uOffset, uOffset + cb - 1);
1539 else
1540 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s", pszMember, pszIdx);
1541 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%-30s", pszDepiction);
1542 uint32_t first = 0;
1543 for (uint8_t i = 0; i < sizeof(pszDepiction); i++)
1544 if (pszDepiction[i] == ' ' && first++)
1545 pszDepiction[i] = '.';
1546 Log6Func(("%s: Guest %s %s 0x%s\n",
1547 pszFunc, fWrite ? "wrote" : "read ", pszDepiction, pszFormattedVal));
1548 }
1549 else /* odd number or oversized access, ... log inline hex-dump style */
1550 {
1551 Log6Func(("%s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
1552 pszFunc, fWrite ? "wrote" : "read ", pszMember,
1553 pszIdx, uOffset, uOffset + cb, cb, pv));
1554 }
1555}
1556
1557#endif /* IN_RING3 */
1558
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette