VirtualBox

source: vbox/trunk/src/VBox/Storage/VCICache.cpp@ 106297

Last change on this file since 106297 was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.8 KB
Line 
1/* $Id: VCICache.cpp 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * VCICacheCore - VirtualBox Cache Image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_VD_RAW /** @todo logging group */
33#include <VBox/vd-cache-backend.h>
34#include <VBox/err.h>
35
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/asm-mem.h>
39#include <iprt/asm.h>
40#include <iprt/mem.h>
41#include <iprt/file.h>
42
43#include "VDBackends.h"
44
45/*******************************************************************************
46* On disk data structures *
47*******************************************************************************/
48
49/** @note All structures which are written to the disk are written in camel case
50 * and packed. */
51
52/** Block size used internally, because we cache sectors the smallest unit we
53 * have to care about is 512 bytes. */
54#define VCI_BLOCK_SIZE 512
55
56/** Convert block number/size to byte offset/size. */
57#define VCI_BLOCK2BYTE(u) ((uint64_t)(u) << 9)
58
59/** Convert byte offset/size to block number/size. */
60#define VCI_BYTE2BLOCK(u) ((u) >> 9)
61
62/**
63 * The VCI header - at the beginning of the file.
64 *
65 * All entries a stored in little endian order.
66 */
67#pragma pack(1)
68typedef struct VciHdr
69{
70 /** The signature to identify a cache image. */
71 uint32_t u32Signature;
72 /** Version of the layout of metadata in the cache. */
73 uint32_t u32Version;
74 /** Maximum size of the cache file in blocks.
75 * This includes all metadata. */
76 uint64_t cBlocksCache;
77 /** Flag indicating whether the cache was closed cleanly. */
78 uint8_t fUncleanShutdown;
79 /** Cache type. */
80 uint32_t u32CacheType;
81 /** Offset of the B+-Tree root in the image in blocks. */
82 uint64_t offTreeRoot;
83 /** Offset of the block allocation bitmap in blocks. */
84 uint64_t offBlkMap;
85 /** Size of the block allocation bitmap in blocks. */
86 uint32_t cBlkMap;
87 /** UUID of the image. */
88 RTUUID uuidImage;
89 /** Modification UUID for the cache. */
90 RTUUID uuidModification;
91 /** Reserved for future use. */
92 uint8_t abReserved[951];
93} VciHdr, *PVciHdr;
94#pragma pack()
95AssertCompileSize(VciHdr, 2 * VCI_BLOCK_SIZE);
96
97/** VCI signature to identify a valid image. */
98#define VCI_HDR_SIGNATURE UINT32_C(0x00494356) /* \0ICV */
99/** Current version we support. */
100#define VCI_HDR_VERSION UINT32_C(0x00000001)
101
102/** Value for an unclean cache shutdown. */
103#define VCI_HDR_UNCLEAN_SHUTDOWN UINT8_C(0x01)
104/** Value for a clean cache shutdown. */
105#define VCI_HDR_CLEAN_SHUTDOWN UINT8_C(0x00)
106
107/** Cache type: Dynamic image growing to the maximum value. */
108#define VCI_HDR_CACHE_TYPE_DYNAMIC UINT32_C(0x00000001)
109/** Cache type: Fixed image, space is preallocated. */
110#define VCI_HDR_CACHE_TYPE_FIXED UINT32_C(0x00000002)
111
112/**
113 * On disk representation of an extent describing a range of cached data.
114 *
115 * All entries a stored in little endian order.
116 */
117#pragma pack(1)
118typedef struct VciCacheExtent
119{
120 /** Block address of the previous extent in the LRU list. */
121 uint64_t u64ExtentPrev;
122 /** Block address of the next extent in the LRU list. */
123 uint64_t u64ExtentNext;
124 /** Flags (for compression, encryption etc.) - currently unused and should be always 0. */
125 uint8_t u8Flags;
126 /** Reserved */
127 uint8_t u8Reserved;
128 /** First block of cached data the extent represents. */
129 uint64_t u64BlockOffset;
130 /** Number of blocks the extent represents. */
131 uint32_t u32Blocks;
132 /** First block in the image where the data is stored. */
133 uint64_t u64BlockAddr;
134} VciCacheExtent, *PVciCacheExtent;
135#pragma pack()
136AssertCompileSize(VciCacheExtent, 38);
137
138/**
139 * On disk representation of an internal node.
140 *
141 * All entries a stored in little endian order.
142 */
143#pragma pack(1)
144typedef struct VciTreeNodeInternal
145{
146 /** First block of cached data the internal node represents. */
147 uint64_t u64BlockOffset;
148 /** Number of blocks the internal node represents. */
149 uint32_t u32Blocks;
150 /** Block address in the image where the next node in the tree is stored. */
151 uint64_t u64ChildAddr;
152} VciTreeNodeInternal, *PVciTreeNodeInternal;
153#pragma pack()
154AssertCompileSize(VciTreeNodeInternal, 20);
155
156/**
157 * On-disk representation of a node in the B+-Tree.
158 *
159 * All entries a stored in little endian order.
160 */
161#pragma pack(1)
162typedef struct VciTreeNode
163{
164 /** Type of the node (root, internal, leaf). */
165 uint8_t u8Type;
166 /** Data in the node. */
167 uint8_t au8Data[4095];
168} VciTreeNode, *PVciTreeNode;
169#pragma pack()
170AssertCompileSize(VciTreeNode, 8 * VCI_BLOCK_SIZE);
171
172/** Node type: Internal node containing links to other nodes (VciTreeNodeInternal). */
173#define VCI_TREE_NODE_TYPE_INTERNAL UINT8_C(0x01)
174/** Node type: Leaf of the tree (VciCacheExtent). */
175#define VCI_TREE_NODE_TYPE_LEAF UINT8_C(0x02)
176
177/** Number of cache extents described by one node. */
178#define VCI_TREE_EXTENTS_PER_NODE ((sizeof(VciTreeNode)-1) / sizeof(VciCacheExtent))
179/** Number of internal nodes managed by one tree node. */
180#define VCI_TREE_INTERNAL_NODES_PER_NODE ((sizeof(VciTreeNode)-1) / sizeof(VciTreeNodeInternal))
181
182/**
183 * VCI block bitmap header.
184 *
185 * All entries a stored in little endian order.
186 */
187#pragma pack(1)
188typedef struct VciBlkMap
189{
190 /** Magic of the block bitmap. */
191 uint32_t u32Magic;
192 /** Version of the block bitmap. */
193 uint32_t u32Version;
194 /** Number of blocks this block map manages. */
195 uint64_t cBlocks;
196 /** Number of free blocks. */
197 uint64_t cBlocksFree;
198 /** Number of blocks allocated for metadata. */
199 uint64_t cBlocksAllocMeta;
200 /** Number of blocks allocated for actual cached data. */
201 uint64_t cBlocksAllocData;
202 /** Reserved for future use. */
203 uint8_t au8Reserved[472];
204} VciBlkMap, *PVciBlkMap;
205#pragma pack()
206AssertCompileSize(VciBlkMap, VCI_BLOCK_SIZE);
207
208/** The magic which identifies a block map. */
209#define VCI_BLKMAP_MAGIC UINT32_C(0x4b4c4256) /* KLBV */
210/** Current version. */
211#define VCI_BLKMAP_VERSION UINT32_C(0x00000001)
212
213/** Block bitmap entry */
214typedef uint8_t VciBlkMapEnt;
215
216
217/*********************************************************************************************************************************
218* Constants And Macros, Structures and Typedefs *
219*********************************************************************************************************************************/
220
221/**
222 * Block range descriptor.
223 */
224typedef struct VCIBLKRANGEDESC
225{
226 /** Previous entry in the list. */
227 struct VCIBLKRANGEDESC *pPrev;
228 /** Next entry in the list. */
229 struct VCIBLKRANGEDESC *pNext;
230 /** Start address of the range. */
231 uint64_t offAddrStart;
232 /** Number of blocks in the range. */
233 uint64_t cBlocks;
234 /** Flag whether the range is free or allocated. */
235 bool fFree;
236} VCIBLKRANGEDESC, *PVCIBLKRANGEDESC;
237
238/**
239 * Block map for the cache image - in memory structure.
240 */
241typedef struct VCIBLKMAP
242{
243 /** Number of blocks the map manages. */
244 uint64_t cBlocks;
245 /** Number of blocks allocated for metadata. */
246 uint64_t cBlocksAllocMeta;
247 /** Number of blocks allocated for actual cached data. */
248 uint64_t cBlocksAllocData;
249 /** Number of free blocks. */
250 uint64_t cBlocksFree;
251
252 /** Pointer to the head of the block range list. */
253 PVCIBLKRANGEDESC pRangesHead;
254 /** Pointer to the tail of the block range list. */
255 PVCIBLKRANGEDESC pRangesTail;
256
257} VCIBLKMAP;
258/** Pointer to a block map. */
259typedef VCIBLKMAP *PVCIBLKMAP;
260
261/**
262 * B+-Tree node header.
263 */
264typedef struct VCITREENODE
265{
266 /** Type of the node (VCI_TREE_NODE_TYPE_*). */
267 uint8_t u8Type;
268 /** Block address where the node is stored. */
269 uint64_t u64BlockAddr;
270 /** Pointer to the parent. */
271 struct VCITREENODE *pParent;
272} VCITREENODE, *PVCITREENODE;
273
274/**
275 * B+-Tree node pointer.
276 */
277typedef struct VCITREENODEPTR
278{
279 /** Flag whether the node is in memory or still on the disk. */
280 bool fInMemory;
281 /** Type dependent data. */
282 union
283 {
284 /** Pointer to a in memory node. */
285 PVCITREENODE pNode;
286 /** Start block address of the node. */
287 uint64_t offAddrBlockNode;
288 } u;
289} VCITREENODEPTR, *PVCITREENODEPTR;
290
291/**
292 * Internal node.
293 */
294typedef struct VCINODEINTERNAL
295{
296 /** First block of cached data the internal node represents. */
297 uint64_t u64BlockOffset;
298 /** Number of blocks the internal node represents. */
299 uint32_t u32Blocks;
300 /** Pointer to the child node. */
301 VCITREENODEPTR PtrChild;
302} VCINODEINTERNAL, *PVCINODEINTERNAL;
303
304/**
305 * A in memory internal B+-tree node.
306 */
307typedef struct VCITREENODEINT
308{
309 /** Node core. */
310 VCITREENODE Core;
311 /** Number of used nodes. */
312 unsigned cUsedNodes;
313 /** Array of internal nodes. */
314 VCINODEINTERNAL aIntNodes[VCI_TREE_INTERNAL_NODES_PER_NODE];
315} VCITREENODEINT, *PVCITREENODEINT;
316
317/**
318 * A in memory cache extent.
319 */
320typedef struct VCICACHEEXTENT
321{
322 /** First block of cached data the extent represents. */
323 uint64_t u64BlockOffset;
324 /** Number of blocks the extent represents. */
325 uint32_t u32Blocks;
326 /** First block in the image where the data is stored. */
327 uint64_t u64BlockAddr;
328} VCICACHEEXTENT, *PVCICACHEEXTENT;
329
330/**
331 * A in memory leaf B+-tree node.
332 */
333typedef struct VCITREENODELEAF
334{
335 /** Node core. */
336 VCITREENODE Core;
337 /** Next leaf node in the list. */
338 struct VCITREENODELEAF *pNext;
339 /** Number of used nodes. */
340 unsigned cUsedNodes;
341 /** The extents in the node. */
342 VCICACHEEXTENT aExtents[VCI_TREE_EXTENTS_PER_NODE];
343} VCITREENODELEAF, *PVCITREENODELEAF;
344
345/**
346 * VCI image data structure.
347 */
348typedef struct VCICACHE
349{
350 /** Image name. */
351 const char *pszFilename;
352 /** Storage handle. */
353 PVDIOSTORAGE pStorage;
354
355 /** Pointer to the per-disk VD interface list. */
356 PVDINTERFACE pVDIfsDisk;
357 /** Pointer to the per-image VD interface list. */
358 PVDINTERFACE pVDIfsImage;
359 /** Error interface. */
360 PVDINTERFACEERROR pIfError;
361 /** I/O interface. */
362 PVDINTERFACEIOINT pIfIo;
363
364 /** Open flags passed by VBoxHD layer. */
365 unsigned uOpenFlags;
366 /** Image flags defined during creation or determined during open. */
367 unsigned uImageFlags;
368 /** Total size of the image. */
369 uint64_t cbSize;
370
371 /** Offset of the B+-Tree in the image in bytes. */
372 uint64_t offTreeRoot;
373 /** Pointer to the root node of the B+-Tree. */
374 PVCITREENODE pRoot;
375 /** Offset to the block allocation bitmap in bytes. */
376 uint64_t offBlksBitmap;
377 /** Block map. */
378 PVCIBLKMAP pBlkMap;
379} VCICACHE, *PVCICACHE;
380
381/** No block free in bitmap error code. */
382#define VERR_VCI_NO_BLOCKS_FREE (-65536)
383
384/** Flags for the block map allocator. */
385#define VCIBLKMAP_ALLOC_DATA 0
386#define VCIBLKMAP_ALLOC_META RT_BIT(0)
387#define VCIBLKMAP_ALLOC_MASK 0x1
388
389
390/*********************************************************************************************************************************
391* Static Variables *
392*********************************************************************************************************************************/
393
394/** NULL-terminated array of supported file extensions. */
395static const char *const s_apszVciFileExtensions[] =
396{
397 "vci",
398 NULL
399};
400
401
402/*********************************************************************************************************************************
403* Internal Functions *
404*********************************************************************************************************************************/
405
406/**
407 * Internal. Flush image data to disk.
408 */
409static int vciFlushImage(PVCICACHE pCache)
410{
411 int rc = VINF_SUCCESS;
412
413 if ( pCache->pStorage
414 && !(pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY))
415 {
416 rc = vdIfIoIntFileFlushSync(pCache->pIfIo, pCache->pStorage);
417 }
418
419 return rc;
420}
421
422/**
423 * Internal. Free all allocated space for representing an image except pCache,
424 * and optionally delete the image from disk.
425 */
426static int vciFreeImage(PVCICACHE pCache, bool fDelete)
427{
428 int rc = VINF_SUCCESS;
429
430 /* Freeing a never allocated image (e.g. because the open failed) is
431 * not signalled as an error. After all nothing bad happens. */
432 if (pCache)
433 {
434 if (pCache->pStorage)
435 {
436 /* No point updating the file that is deleted anyway. */
437 if (!fDelete)
438 vciFlushImage(pCache);
439
440 vdIfIoIntFileClose(pCache->pIfIo, pCache->pStorage);
441 pCache->pStorage = NULL;
442 }
443
444 if (fDelete && pCache->pszFilename)
445 vdIfIoIntFileDelete(pCache->pIfIo, pCache->pszFilename);
446 }
447
448 LogFlowFunc(("returns %Rrc\n", rc));
449 return rc;
450}
451
452/**
453 * Creates a new block map which can manage the given number of blocks.
454 *
455 * The size of the bitmap is aligned to the VCI block size.
456 *
457 * @returns VBox status code.
458 * @param cBlocks The number of blocks the bitmap can manage.
459 * @param ppBlkMap Where to store the pointer to the block bitmap.
460 * @param pcBlkMap Where to store the size of the block bitmap in blocks
461 * needed on the disk.
462 */
463static int vciBlkMapCreate(uint64_t cBlocks, PVCIBLKMAP *ppBlkMap, uint32_t *pcBlkMap)
464{
465 int rc = VINF_SUCCESS;
466 uint32_t cbBlkMap = RT_ALIGN_Z(cBlocks / sizeof(VciBlkMapEnt) / 8, VCI_BLOCK_SIZE);
467 PVCIBLKMAP pBlkMap = (PVCIBLKMAP)RTMemAllocZ(sizeof(VCIBLKMAP));
468 PVCIBLKRANGEDESC pFree = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
469
470 LogFlowFunc(("cBlocks=%u ppBlkMap=%#p pcBlkMap=%#p\n", cBlocks, ppBlkMap, pcBlkMap));
471
472 if (pBlkMap && pFree)
473 {
474 pBlkMap->cBlocks = cBlocks;
475 pBlkMap->cBlocksAllocMeta = 0;
476 pBlkMap->cBlocksAllocData = 0;
477 pBlkMap->cBlocksFree = cBlocks;
478
479 pFree->pPrev = NULL;
480 pFree->pNext = NULL;
481 pFree->offAddrStart = 0;
482 pFree->cBlocks = cBlocks;
483 pFree->fFree = true;
484
485 pBlkMap->pRangesHead = pFree;
486 pBlkMap->pRangesTail = pFree;
487
488 Assert(!((cbBlkMap + sizeof(VciBlkMap)) % VCI_BLOCK_SIZE));
489 *ppBlkMap = pBlkMap;
490 *pcBlkMap = VCI_BYTE2BLOCK(cbBlkMap + sizeof(VciBlkMap));
491 }
492 else
493 {
494 if (pBlkMap)
495 RTMemFree(pBlkMap);
496 if (pFree)
497 RTMemFree(pFree);
498
499 rc = VERR_NO_MEMORY;
500 }
501
502 LogFlowFunc(("returns rc=%Rrc cBlkMap=%u\n", rc, *pcBlkMap));
503 return rc;
504}
505
506#if 0 /** @todo unsued vciBlkMapDestroy */
507/**
508 * Frees a block map.
509 *
510 * @param pBlkMap The block bitmap to destroy.
511 */
512static void vciBlkMapDestroy(PVCIBLKMAP pBlkMap)
513{
514 LogFlowFunc(("pBlkMap=%#p\n", pBlkMap));
515
516 PVCIBLKRANGEDESC pRangeCur = pBlkMap->pRangesHead;
517
518 while (pRangeCur)
519 {
520 PVCIBLKRANGEDESC pTmp = pRangeCur;
521
522 RTMemFree(pTmp);
523
524 pRangeCur = pRangeCur->pNext;
525 }
526
527 RTMemFree(pBlkMap);
528
529 LogFlowFunc(("returns\n"));
530}
531#endif
532
533/**
534 * Loads the block map from the specified medium and creates all necessary
535 * in memory structures to manage used and free blocks.
536 *
537 * @returns VBox status code.
538 * @param pStorage Storage handle to read the block bitmap from.
539 * @param offBlkMap Start of the block bitmap in blocks.
540 * @param cBlkMap Size of the block bitmap on the disk in blocks.
541 * @param ppBlkMap Where to store the block bitmap on success.
542 */
543static int vciBlkMapLoad(PVCICACHE pStorage, uint64_t offBlkMap, uint32_t cBlkMap, PVCIBLKMAP *ppBlkMap)
544{
545 int rc = VINF_SUCCESS;
546 VciBlkMap BlkMap;
547
548 LogFlowFunc(("pStorage=%#p offBlkMap=%llu cBlkMap=%u ppBlkMap=%#p\n",
549 pStorage, offBlkMap, cBlkMap, ppBlkMap));
550
551 if (cBlkMap >= VCI_BYTE2BLOCK(sizeof(VciBlkMap)))
552 {
553 cBlkMap -= VCI_BYTE2BLOCK(sizeof(VciBlkMap));
554
555 rc = vdIfIoIntFileReadSync(pStorage->pIfIo, pStorage->pStorage, offBlkMap,
556 &BlkMap, VCI_BYTE2BLOCK(sizeof(VciBlkMap)));
557 if (RT_SUCCESS(rc))
558 {
559 offBlkMap += VCI_BYTE2BLOCK(sizeof(VciBlkMap));
560
561 BlkMap.u32Magic = RT_LE2H_U32(BlkMap.u32Magic);
562 BlkMap.u32Version = RT_LE2H_U32(BlkMap.u32Version);
563 BlkMap.cBlocks = RT_LE2H_U32(BlkMap.cBlocks);
564 BlkMap.cBlocksFree = RT_LE2H_U32(BlkMap.cBlocksFree);
565 BlkMap.cBlocksAllocMeta = RT_LE2H_U32(BlkMap.cBlocksAllocMeta);
566 BlkMap.cBlocksAllocData = RT_LE2H_U32(BlkMap.cBlocksAllocData);
567
568 if ( BlkMap.u32Magic == VCI_BLKMAP_MAGIC
569 && BlkMap.u32Version == VCI_BLKMAP_VERSION
570 && BlkMap.cBlocks == BlkMap.cBlocksFree + BlkMap.cBlocksAllocMeta + BlkMap.cBlocksAllocData
571 && VCI_BYTE2BLOCK(BlkMap.cBlocks / 8) == cBlkMap)
572 {
573 PVCIBLKMAP pBlkMap = (PVCIBLKMAP)RTMemAllocZ(sizeof(VCIBLKMAP));
574 if (pBlkMap)
575 {
576 pBlkMap->cBlocks = BlkMap.cBlocks;
577 pBlkMap->cBlocksFree = BlkMap.cBlocksFree;
578 pBlkMap->cBlocksAllocMeta = BlkMap.cBlocksAllocMeta;
579 pBlkMap->cBlocksAllocData = BlkMap.cBlocksAllocData;
580
581 /* Load the bitmap and construct the range list. */
582 PVCIBLKRANGEDESC pRangeCur = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
583
584 if (pRangeCur)
585 {
586 uint8_t abBitmapBuffer[16 * _1K];
587 uint32_t cBlocksRead = 0;
588 uint64_t cBlocksLeft = VCI_BYTE2BLOCK(pBlkMap->cBlocks / 8);
589
590 cBlocksRead = RT_MIN(VCI_BYTE2BLOCK(sizeof(abBitmapBuffer)), cBlocksLeft);
591 rc = vdIfIoIntFileReadSync(pStorage->pIfIo, pStorage->pStorage,
592 offBlkMap, abBitmapBuffer,
593 cBlocksRead);
594
595 if (RT_SUCCESS(rc))
596 {
597 pRangeCur->fFree = !(abBitmapBuffer[0] & 0x01);
598 pRangeCur->offAddrStart = 0;
599 pRangeCur->cBlocks = 0;
600 pRangeCur->pNext = NULL;
601 pRangeCur->pPrev = NULL;
602 pBlkMap->pRangesHead = pRangeCur;
603 pBlkMap->pRangesTail = pRangeCur;
604 }
605 else
606 RTMemFree(pRangeCur);
607
608 while ( RT_SUCCESS(rc)
609 && cBlocksLeft)
610 {
611 int iBit = 0;
612 uint32_t cBits = VCI_BLOCK2BYTE(cBlocksRead) * 8;
613 uint32_t iBitPrev = 0xffffffff;
614
615 while (cBits)
616 {
617 if (pRangeCur->fFree)
618 {
619 /* Check for the first set bit. */
620 iBit = ASMBitNextSet(abBitmapBuffer, cBits, iBitPrev);
621 }
622 else
623 {
624 /* Check for the first free bit. */
625 iBit = ASMBitNextClear(abBitmapBuffer, cBits, iBitPrev);
626 }
627
628 if (iBit == -1)
629 {
630 /* No change. */
631 pRangeCur->cBlocks += cBits;
632 cBits = 0;
633 }
634 else
635 {
636 Assert((uint32_t)iBit < cBits);
637 pRangeCur->cBlocks += iBit;
638
639 /* Create a new range descriptor. */
640 PVCIBLKRANGEDESC pRangeNew = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
641 if (!pRangeNew)
642 {
643 rc = VERR_NO_MEMORY;
644 break;
645 }
646
647 pRangeNew->fFree = !pRangeCur->fFree;
648 pRangeNew->offAddrStart = pRangeCur->offAddrStart + pRangeCur->cBlocks;
649 pRangeNew->cBlocks = 0;
650 pRangeNew->pPrev = pRangeCur;
651 pRangeCur->pNext = pRangeNew;
652 pBlkMap->pRangesTail = pRangeNew;
653 pRangeCur = pRangeNew;
654 cBits -= iBit;
655 iBitPrev = iBit;
656 }
657 }
658
659 cBlocksLeft -= cBlocksRead;
660 offBlkMap += cBlocksRead;
661
662 if ( RT_SUCCESS(rc)
663 && cBlocksLeft)
664 {
665 /* Read next chunk. */
666 cBlocksRead = RT_MIN(VCI_BYTE2BLOCK(sizeof(abBitmapBuffer)), cBlocksLeft);
667 rc = vdIfIoIntFileReadSync(pStorage->pIfIo, pStorage->pStorage,
668 offBlkMap, abBitmapBuffer, cBlocksRead);
669 }
670 }
671 }
672 else
673 rc = VERR_NO_MEMORY;
674
675 if (RT_SUCCESS(rc))
676 {
677 *ppBlkMap = pBlkMap;
678 LogFlowFunc(("return success\n"));
679 return VINF_SUCCESS;
680 }
681
682 RTMemFree(pBlkMap);
683 }
684 else
685 rc = VERR_NO_MEMORY;
686 }
687 else
688 rc = VERR_VD_GEN_INVALID_HEADER;
689 }
690 else
691 rc = VERR_VD_GEN_INVALID_HEADER;
692 }
693 else
694 rc = VERR_VD_GEN_INVALID_HEADER;
695
696 LogFlowFunc(("returns rc=%Rrc\n", rc));
697 return rc;
698}
699
700/**
701 * Saves the block map in the cache image. All necessary on disk structures
702 * are written.
703 *
704 * @returns VBox status code.
705 * @param pBlkMap The block bitmap to save.
706 * @param pStorage Where the block bitmap should be written to.
707 * @param offBlkMap Start of the block bitmap in blocks.
708 * @param cBlkMap Size of the block bitmap on the disk in blocks.
709 */
710static int vciBlkMapSave(PVCIBLKMAP pBlkMap, PVCICACHE pStorage, uint64_t offBlkMap, uint32_t cBlkMap)
711{
712 int rc = VINF_SUCCESS;
713 VciBlkMap BlkMap;
714
715 LogFlowFunc(("pBlkMap=%#p pStorage=%#p offBlkMap=%llu cBlkMap=%u\n",
716 pBlkMap, pStorage, offBlkMap, cBlkMap));
717
718 /* Make sure the number of blocks allocated for us match our expectations. */
719 if (VCI_BYTE2BLOCK(pBlkMap->cBlocks / 8) + VCI_BYTE2BLOCK(sizeof(VciBlkMap)) == cBlkMap)
720 {
721 /* Setup the header */
722 memset(&BlkMap, 0, sizeof(VciBlkMap));
723
724 BlkMap.u32Magic = RT_H2LE_U32(VCI_BLKMAP_MAGIC);
725 BlkMap.u32Version = RT_H2LE_U32(VCI_BLKMAP_VERSION);
726 BlkMap.cBlocks = RT_H2LE_U32(pBlkMap->cBlocks);
727 BlkMap.cBlocksFree = RT_H2LE_U32(pBlkMap->cBlocksFree);
728 BlkMap.cBlocksAllocMeta = RT_H2LE_U32(pBlkMap->cBlocksAllocMeta);
729 BlkMap.cBlocksAllocData = RT_H2LE_U32(pBlkMap->cBlocksAllocData);
730
731 rc = vdIfIoIntFileWriteSync(pStorage->pIfIo, pStorage->pStorage, offBlkMap,
732 &BlkMap, VCI_BYTE2BLOCK(sizeof(VciBlkMap)));
733 if (RT_SUCCESS(rc))
734 {
735 uint8_t abBitmapBuffer[16*_1K];
736 unsigned iBit = 0;
737 PVCIBLKRANGEDESC pCur = pBlkMap->pRangesHead;
738
739 offBlkMap += VCI_BYTE2BLOCK(sizeof(VciBlkMap));
740
741 /* Write the descriptor ranges. */
742 while (pCur)
743 {
744 uint64_t cBlocks = pCur->cBlocks;
745
746 while (cBlocks)
747 {
748 uint64_t cBlocksMax = RT_MIN(cBlocks, sizeof(abBitmapBuffer) * 8 - iBit);
749
750 if (pCur->fFree)
751 ASMBitClearRange(abBitmapBuffer, iBit, iBit + cBlocksMax);
752 else
753 ASMBitSetRange(abBitmapBuffer, iBit, iBit + cBlocksMax);
754
755 iBit += cBlocksMax;
756 cBlocks -= cBlocksMax;
757
758 if (iBit == sizeof(abBitmapBuffer) * 8)
759 {
760 /* Buffer is full, write to file and reset. */
761 rc = vdIfIoIntFileWriteSync(pStorage->pIfIo, pStorage->pStorage,
762 offBlkMap, abBitmapBuffer,
763 VCI_BYTE2BLOCK(sizeof(abBitmapBuffer)));
764 if (RT_FAILURE(rc))
765 break;
766
767 offBlkMap += VCI_BYTE2BLOCK(sizeof(abBitmapBuffer));
768 iBit = 0;
769 }
770 }
771
772 pCur = pCur->pNext;
773 }
774
775 Assert(iBit % 8 == 0);
776
777 if (RT_SUCCESS(rc) && iBit)
778 rc = vdIfIoIntFileWriteSync(pStorage->pIfIo, pStorage->pStorage,
779 offBlkMap, abBitmapBuffer, VCI_BYTE2BLOCK(iBit / 8));
780 }
781 }
782 else
783 rc = VERR_INTERNAL_ERROR; /** @todo Better error code. */
784
785 LogFlowFunc(("returns rc=%Rrc\n", rc));
786 return rc;
787}
788
789#if 0 /* unused */
790/**
791 * Finds the range block describing the given block address.
792 *
793 * @returns Pointer to the block range descriptor or NULL if none could be found.
794 * @param pBlkMap The block bitmap to search on.
795 * @param offBlockAddr The block address to search for.
796 */
797static PVCIBLKRANGEDESC vciBlkMapFindByBlock(PVCIBLKMAP pBlkMap, uint64_t offBlockAddr)
798{
799 PVCIBLKRANGEDESC pBlk = pBlkMap->pRangesHead;
800
801 while ( pBlk
802 && pBlk->offAddrStart < offBlockAddr)
803 pBlk = pBlk->pNext;
804
805 return pBlk;
806}
807#endif
808
809/**
810 * Allocates the given number of blocks in the bitmap and returns the start block address.
811 *
812 * @returns VBox status code.
813 * @param pBlkMap The block bitmap to allocate the blocks from.
814 * @param cBlocks How many blocks to allocate.
815 * @param fFlags Allocation flags, comgination of VCIBLKMAP_ALLOC_*.
816 * @param poffBlockAddr Where to store the start address of the allocated region.
817 */
818static int vciBlkMapAllocate(PVCIBLKMAP pBlkMap, uint32_t cBlocks, uint32_t fFlags,
819 uint64_t *poffBlockAddr)
820{
821 PVCIBLKRANGEDESC pBestFit = NULL;
822 PVCIBLKRANGEDESC pCur = NULL;
823 int rc = VINF_SUCCESS;
824
825 LogFlowFunc(("pBlkMap=%#p cBlocks=%u poffBlockAddr=%#p\n",
826 pBlkMap, cBlocks, poffBlockAddr));
827
828 pCur = pBlkMap->pRangesHead;
829
830 while (pCur)
831 {
832 if ( pCur->fFree
833 && pCur->cBlocks >= cBlocks)
834 {
835 if ( !pBestFit
836 || pCur->cBlocks < pBestFit->cBlocks)
837 {
838 pBestFit = pCur;
839 /* Stop searching if the size is matching exactly. */
840 if (pBestFit->cBlocks == cBlocks)
841 break;
842 }
843 }
844 pCur = pCur->pNext;
845 }
846
847 Assert(!pBestFit || pBestFit->fFree);
848
849 if (pBestFit)
850 {
851 pBestFit->fFree = false;
852
853 if (pBestFit->cBlocks > cBlocks)
854 {
855 /* Create a new free block. */
856 PVCIBLKRANGEDESC pFree = (PVCIBLKRANGEDESC)RTMemAllocZ(sizeof(VCIBLKRANGEDESC));
857
858 if (pFree)
859 {
860 pFree->fFree = true;
861 pFree->cBlocks = pBestFit->cBlocks - cBlocks;
862 pBestFit->cBlocks -= pFree->cBlocks;
863 pFree->offAddrStart = pBestFit->offAddrStart + cBlocks;
864
865 /* Link into the list. */
866 pFree->pNext = pBestFit->pNext;
867 pBestFit->pNext = pFree;
868 pFree->pPrev = pBestFit;
869 if (!pFree->pNext)
870 pBlkMap->pRangesTail = pFree;
871
872 *poffBlockAddr = pBestFit->offAddrStart;
873 }
874 else
875 {
876 rc = VERR_NO_MEMORY;
877 pBestFit->fFree = true;
878 }
879 }
880 }
881 else
882 rc = VERR_VCI_NO_BLOCKS_FREE;
883
884 if (RT_SUCCESS(rc))
885 {
886 if ((fFlags & VCIBLKMAP_ALLOC_MASK) == VCIBLKMAP_ALLOC_DATA)
887 pBlkMap->cBlocksAllocMeta += cBlocks;
888 else
889 pBlkMap->cBlocksAllocData += cBlocks;
890
891 pBlkMap->cBlocksFree -= cBlocks;
892 }
893
894 LogFlowFunc(("returns rc=%Rrc offBlockAddr=%llu\n", rc, *poffBlockAddr));
895 return rc;
896}
897
898#if 0 /* unused */
899/**
900 * Try to extend the space of an already allocated block.
901 *
902 * @returns VBox status code.
903 * @param pBlkMap The block bitmap to allocate the blocks from.
904 * @param cBlocksNew How many blocks the extended block should have.
905 * @param offBlockAddrOld The start address of the block to reallocate.
906 * @param poffBlockAddr Where to store the start address of the allocated region.
907 */
908static int vciBlkMapRealloc(PVCIBLKMAP pBlkMap, uint32_t cBlocksNew, uint64_t offBlockAddrOld,
909 uint64_t *poffBlockAddr)
910{
911 int rc = VINF_SUCCESS;
912
913 LogFlowFunc(("pBlkMap=%#p cBlocksNew=%u offBlockAddrOld=%llu poffBlockAddr=%#p\n",
914 pBlkMap, cBlocksNew, offBlockAddrOld, poffBlockAddr));
915
916 AssertMsgFailed(("Implement\n"));
917 RT_NOREF4(pBlkMap, cBlocksNew, offBlockAddrOld, poffBlockAddr);
918
919 LogFlowFunc(("returns rc=%Rrc offBlockAddr=%llu\n", rc, *poffBlockAddr));
920 return rc;
921}
922#endif /* unused */
923
924#if 0 /* unused */
925/**
926 * Frees a range of blocks.
927 *
928 * @param pBlkMap The block bitmap.
929 * @param offBlockAddr Address of the first block to free.
930 * @param cBlocks How many blocks to free.
931 * @param fFlags Allocation flags, comgination of VCIBLKMAP_ALLOC_*.
932 */
933static void vciBlkMapFree(PVCIBLKMAP pBlkMap, uint64_t offBlockAddr, uint32_t cBlocks,
934 uint32_t fFlags)
935{
936 PVCIBLKRANGEDESC pBlk;
937
938 LogFlowFunc(("pBlkMap=%#p offBlockAddr=%llu cBlocks=%u\n",
939 pBlkMap, offBlockAddr, cBlocks));
940
941 while (cBlocks)
942 {
943 pBlk = vciBlkMapFindByBlock(pBlkMap, offBlockAddr);
944 AssertPtr(pBlk);
945
946 /* Easy case, the whole block is freed. */
947 if ( pBlk->offAddrStart == offBlockAddr
948 && pBlk->cBlocks <= cBlocks)
949 {
950 pBlk->fFree = true;
951 cBlocks -= pBlk->cBlocks;
952 offBlockAddr += pBlk->cBlocks;
953
954 /* Check if it is possible to merge free blocks. */
955 if ( pBlk->pPrev
956 && pBlk->pPrev->fFree)
957 {
958 PVCIBLKRANGEDESC pBlkPrev = pBlk->pPrev;
959
960 Assert(pBlkPrev->offAddrStart + pBlkPrev->cBlocks == pBlk->offAddrStart);
961 pBlkPrev->cBlocks += pBlk->cBlocks;
962 pBlkPrev->pNext = pBlk->pNext;
963 if (pBlk->pNext)
964 pBlk->pNext->pPrev = pBlkPrev;
965 else
966 pBlkMap->pRangesTail = pBlkPrev;
967
968 RTMemFree(pBlk);
969 pBlk = pBlkPrev;
970 }
971
972 /* Now the one to the right. */
973 if ( pBlk->pNext
974 && pBlk->pNext->fFree)
975 {
976 PVCIBLKRANGEDESC pBlkNext = pBlk->pNext;
977
978 Assert(pBlk->offAddrStart + pBlk->cBlocks == pBlkNext->offAddrStart);
979 pBlk->cBlocks += pBlkNext->cBlocks;
980 pBlk->pNext = pBlkNext->pNext;
981 if (pBlkNext->pNext)
982 pBlkNext->pNext->pPrev = pBlk;
983 else
984 pBlkMap->pRangesTail = pBlk;
985
986 RTMemFree(pBlkNext);
987 }
988 }
989 else
990 {
991 /* The block is intersecting. */
992 AssertMsgFailed(("TODO\n"));
993 }
994 }
995
996 if ((fFlags & VCIBLKMAP_ALLOC_MASK) == VCIBLKMAP_ALLOC_DATA)
997 pBlkMap->cBlocksAllocMeta -= cBlocks;
998 else
999 pBlkMap->cBlocksAllocData -= cBlocks;
1000
1001 pBlkMap->cBlocksFree += cBlocks;
1002
1003 LogFlowFunc(("returns\n"));
1004}
1005#endif /* unused */
1006
1007/**
1008 * Converts a tree node from the image to the in memory structure.
1009 *
1010 * @returns Pointer to the in memory tree node.
1011 * @param offBlockAddrNode Block address of the node.
1012 * @param pNodeImage Pointer to the image representation of the node.
1013 */
1014static PVCITREENODE vciTreeNodeImage2Host(uint64_t offBlockAddrNode, PVciTreeNode pNodeImage)
1015{
1016 PVCITREENODE pNode = NULL;
1017
1018 if (pNodeImage->u8Type == VCI_TREE_NODE_TYPE_LEAF)
1019 {
1020 PVCITREENODELEAF pLeaf = (PVCITREENODELEAF)RTMemAllocZ(sizeof(VCITREENODELEAF));
1021
1022 if (pLeaf)
1023 {
1024 PVciCacheExtent pExtent = (PVciCacheExtent)&pNodeImage->au8Data[0];
1025
1026 pLeaf->Core.u8Type = VCI_TREE_NODE_TYPE_LEAF;
1027
1028 for (unsigned idx = 0; idx < RT_ELEMENTS(pLeaf->aExtents); idx++)
1029 {
1030 pLeaf->aExtents[idx].u64BlockOffset = RT_LE2H_U64(pExtent->u64BlockOffset);
1031 pLeaf->aExtents[idx].u32Blocks = RT_LE2H_U32(pExtent->u32Blocks);
1032 pLeaf->aExtents[idx].u64BlockAddr = RT_LE2H_U64(pExtent->u64BlockAddr);
1033 pExtent++;
1034
1035 if ( pLeaf->aExtents[idx].u32Blocks
1036 && pLeaf->aExtents[idx].u64BlockAddr)
1037 pLeaf->cUsedNodes++;
1038 }
1039
1040 pNode = &pLeaf->Core;
1041 }
1042 }
1043 else if (pNodeImage->u8Type == VCI_TREE_NODE_TYPE_INTERNAL)
1044 {
1045 PVCITREENODEINT pInt = (PVCITREENODEINT)RTMemAllocZ(sizeof(VCITREENODEINT));
1046
1047 if (pInt)
1048 {
1049 PVciTreeNodeInternal pIntImage = (PVciTreeNodeInternal)&pNodeImage->au8Data[0];
1050
1051 pInt->Core.u8Type = VCI_TREE_NODE_TYPE_INTERNAL;
1052
1053 for (unsigned idx = 0; idx < RT_ELEMENTS(pInt->aIntNodes); idx++)
1054 {
1055 pInt->aIntNodes[idx].u64BlockOffset = RT_LE2H_U64(pIntImage->u64BlockOffset);
1056 pInt->aIntNodes[idx].u32Blocks = RT_LE2H_U32(pIntImage->u32Blocks);
1057 pInt->aIntNodes[idx].PtrChild.fInMemory = false;
1058 pInt->aIntNodes[idx].PtrChild.u.offAddrBlockNode = RT_LE2H_U64(pIntImage->u64ChildAddr);
1059 pIntImage++;
1060
1061 if ( pInt->aIntNodes[idx].u32Blocks
1062 && pInt->aIntNodes[idx].PtrChild.u.offAddrBlockNode)
1063 pInt->cUsedNodes++;
1064 }
1065
1066 pNode = &pInt->Core;
1067 }
1068 }
1069 else
1070 AssertMsgFailed(("Invalid node type %d\n", pNodeImage->u8Type));
1071
1072 if (pNode)
1073 pNode->u64BlockAddr = offBlockAddrNode;
1074
1075 return pNode;
1076}
1077
1078/**
1079 * Looks up the cache extent for the given virtual block address.
1080 *
1081 * @returns Pointer to the cache extent or NULL if none could be found.
1082 * @param pCache The cache image instance.
1083 * @param offBlockOffset The block offset to search for.
1084 * @param ppNextBestFit Where to store the pointer to the next best fit
1085 * cache extent above offBlockOffset if existing. - Optional
1086 * This is always filled if possible even if the function returns NULL.
1087 */
1088static PVCICACHEEXTENT vciCacheExtentLookup(PVCICACHE pCache, uint64_t offBlockOffset,
1089 PVCICACHEEXTENT *ppNextBestFit)
1090{
1091 int rc = VINF_SUCCESS;
1092 PVCICACHEEXTENT pExtent = NULL;
1093 PVCITREENODE pNodeCur = pCache->pRoot;
1094
1095 while ( RT_SUCCESS(rc)
1096 && pNodeCur
1097 && pNodeCur->u8Type != VCI_TREE_NODE_TYPE_LEAF)
1098 {
1099 PVCITREENODEINT pNodeInt = (PVCITREENODEINT)pNodeCur;
1100
1101 Assert(pNodeCur->u8Type == VCI_TREE_NODE_TYPE_INTERNAL);
1102
1103 /* Search for the correct internal node. */
1104 unsigned idxMin = 0;
1105 unsigned idxMax = pNodeInt->cUsedNodes;
1106 unsigned idxCur = pNodeInt->cUsedNodes / 2;
1107
1108 while (idxMin < idxMax)
1109 {
1110 PVCINODEINTERNAL pInt = &pNodeInt->aIntNodes[idxCur];
1111
1112 /* Determine the search direction. */
1113 if (offBlockOffset < pInt->u64BlockOffset)
1114 {
1115 /* Search left from the current extent. */
1116 idxMax = idxCur;
1117 }
1118 else if (offBlockOffset >= pInt->u64BlockOffset + pInt->u32Blocks)
1119 {
1120 /* Search right from the current extent. */
1121 idxMin = idxCur;
1122 }
1123 else
1124 {
1125 /* The block lies in the node, stop searching. */
1126 if (pInt->PtrChild.fInMemory)
1127 pNodeCur = pInt->PtrChild.u.pNode;
1128 else
1129 {
1130 PVCITREENODE pNodeNew;
1131 VciTreeNode NodeTree;
1132
1133 /* Read from disk and add to the tree. */
1134 rc = vdIfIoIntFileReadSync(pCache->pIfIo, pCache->pStorage,
1135 VCI_BLOCK2BYTE(pInt->PtrChild.u.offAddrBlockNode),
1136 &NodeTree, sizeof(NodeTree));
1137 AssertRC(rc);
1138
1139 pNodeNew = vciTreeNodeImage2Host(pInt->PtrChild.u.offAddrBlockNode, &NodeTree);
1140 if (pNodeNew)
1141 {
1142 /* Link to the parent. */
1143 pInt->PtrChild.fInMemory = true;
1144 pInt->PtrChild.u.pNode = pNodeNew;
1145 pNodeNew->pParent = pNodeCur;
1146 pNodeCur = pNodeNew;
1147 }
1148 else
1149 rc = VERR_NO_MEMORY;
1150 }
1151 break;
1152 }
1153
1154 idxCur = idxMin + (idxMax - idxMin) / 2;
1155 }
1156 }
1157
1158 if ( RT_SUCCESS(rc)
1159 && pNodeCur)
1160 {
1161 PVCITREENODELEAF pLeaf = (PVCITREENODELEAF)pNodeCur;
1162 Assert(pNodeCur->u8Type == VCI_TREE_NODE_TYPE_LEAF);
1163
1164 /* Search the range. */
1165 unsigned idxMin = 0;
1166 unsigned idxMax = pLeaf->cUsedNodes;
1167 unsigned idxCur = pLeaf->cUsedNodes / 2;
1168
1169 while (idxMin < idxMax)
1170 {
1171 PVCICACHEEXTENT pExtentCur = &pLeaf->aExtents[idxCur];
1172
1173 /* Determine the search direction. */
1174 if (offBlockOffset < pExtentCur->u64BlockOffset)
1175 {
1176 /* Search left from the current extent. */
1177 idxMax = idxCur;
1178 }
1179 else if (offBlockOffset >= pExtentCur->u64BlockOffset + pExtentCur->u32Blocks)
1180 {
1181 /* Search right from the current extent. */
1182 idxMin = idxCur;
1183 }
1184 else
1185 {
1186 /* We found the extent, stop searching. */
1187 pExtent = pExtentCur;
1188 break;
1189 }
1190
1191 idxCur = idxMin + (idxMax - idxMin) / 2;
1192 }
1193
1194 /* Get the next best fit extent if it exists. */
1195 if (ppNextBestFit)
1196 {
1197 if (idxCur < pLeaf->cUsedNodes - 1)
1198 *ppNextBestFit = &pLeaf->aExtents[idxCur + 1];
1199 else
1200 {
1201 /*
1202 * Go up the tree and find the best extent
1203 * in the leftmost tree of the child subtree to the right.
1204 */
1205 PVCITREENODEINT pInt = (PVCITREENODEINT)pLeaf->Core.pParent;
1206
1207 while (pInt)
1208 {
1209
1210 }
1211 }
1212 }
1213 }
1214
1215 return pExtent;
1216}
1217
1218/**
1219 * Internal: Open an image, constructing all necessary data structures.
1220 */
1221static int vciOpenImage(PVCICACHE pCache, unsigned uOpenFlags)
1222{
1223 VciHdr Hdr;
1224 uint64_t cbFile;
1225 int rc;
1226
1227 pCache->uOpenFlags = uOpenFlags;
1228
1229 pCache->pIfError = VDIfErrorGet(pCache->pVDIfsDisk);
1230 pCache->pIfIo = VDIfIoIntGet(pCache->pVDIfsImage);
1231 AssertPtrReturn(pCache->pIfIo, VERR_INVALID_PARAMETER);
1232
1233 /*
1234 * Open the image.
1235 */
1236 rc = vdIfIoIntFileOpen(pCache->pIfIo, pCache->pszFilename,
1237 VDOpenFlagsToFileOpenFlags(uOpenFlags,
1238 false /* fCreate */),
1239 &pCache->pStorage);
1240 if (RT_FAILURE(rc))
1241 {
1242 /* Do NOT signal an appropriate error here, as the VD layer has the
1243 * choice of retrying the open if it failed. */
1244 goto out;
1245 }
1246
1247 rc = vdIfIoIntFileGetSize(pCache->pIfIo, pCache->pStorage, &cbFile);
1248 if (RT_FAILURE(rc) || cbFile < sizeof(VciHdr))
1249 {
1250 rc = VERR_VD_GEN_INVALID_HEADER;
1251 goto out;
1252 }
1253
1254 rc = vdIfIoIntFileReadSync(pCache->pIfIo, pCache->pStorage, 0, &Hdr,
1255 VCI_BYTE2BLOCK(sizeof(Hdr)));
1256 if (RT_FAILURE(rc))
1257 {
1258 rc = VERR_VD_GEN_INVALID_HEADER;
1259 goto out;
1260 }
1261
1262 Hdr.u32Signature = RT_LE2H_U32(Hdr.u32Signature);
1263 Hdr.u32Version = RT_LE2H_U32(Hdr.u32Version);
1264 Hdr.cBlocksCache = RT_LE2H_U64(Hdr.cBlocksCache);
1265 Hdr.u32CacheType = RT_LE2H_U32(Hdr.u32CacheType);
1266 Hdr.offTreeRoot = RT_LE2H_U64(Hdr.offTreeRoot);
1267 Hdr.offBlkMap = RT_LE2H_U64(Hdr.offBlkMap);
1268 Hdr.cBlkMap = RT_LE2H_U32(Hdr.cBlkMap);
1269
1270 if ( Hdr.u32Signature == VCI_HDR_SIGNATURE
1271 && Hdr.u32Version == VCI_HDR_VERSION)
1272 {
1273 pCache->offTreeRoot = Hdr.offTreeRoot;
1274 pCache->offBlksBitmap = Hdr.offBlkMap;
1275
1276 /* Load the block map. */
1277 rc = vciBlkMapLoad(pCache, pCache->offBlksBitmap, Hdr.cBlkMap, &pCache->pBlkMap);
1278 if (RT_SUCCESS(rc))
1279 {
1280 /* Load the first tree node. */
1281 VciTreeNode RootNode;
1282
1283 rc = vdIfIoIntFileReadSync(pCache->pIfIo, pCache->pStorage,
1284 pCache->offTreeRoot, &RootNode,
1285 VCI_BYTE2BLOCK(sizeof(VciTreeNode)));
1286 if (RT_SUCCESS(rc))
1287 {
1288 pCache->pRoot = vciTreeNodeImage2Host(pCache->offTreeRoot, &RootNode);
1289 if (!pCache->pRoot)
1290 rc = VERR_NO_MEMORY;
1291 }
1292 }
1293 }
1294 else
1295 rc = VERR_VD_GEN_INVALID_HEADER;
1296
1297out:
1298 if (RT_FAILURE(rc))
1299 vciFreeImage(pCache, false);
1300 return rc;
1301}
1302
1303/**
1304 * Internal: Create a vci image.
1305 */
1306static int vciCreateImage(PVCICACHE pCache, uint64_t cbSize,
1307 unsigned uImageFlags, const char *pszComment,
1308 unsigned uOpenFlags, PFNVDPROGRESS pfnProgress,
1309 void *pvUser, unsigned uPercentStart,
1310 unsigned uPercentSpan)
1311{
1312 RT_NOREF1(pszComment);
1313 VciHdr Hdr;
1314 VciTreeNode NodeRoot;
1315 int rc;
1316 uint64_t cBlocks = cbSize / VCI_BLOCK_SIZE; /* Size of the cache in blocks. */
1317
1318 pCache->uImageFlags = uImageFlags;
1319 pCache->uOpenFlags = uOpenFlags & ~VD_OPEN_FLAGS_READONLY;
1320
1321 pCache->pIfError = VDIfErrorGet(pCache->pVDIfsDisk);
1322 pCache->pIfIo = VDIfIoIntGet(pCache->pVDIfsImage);
1323 AssertPtrReturn(pCache->pIfIo, VERR_INVALID_PARAMETER);
1324
1325 if (uImageFlags & VD_IMAGE_FLAGS_DIFF)
1326 {
1327 rc = vdIfError(pCache->pIfError, VERR_VD_RAW_INVALID_TYPE, RT_SRC_POS, N_("VCI: cannot create diff image '%s'"), pCache->pszFilename);
1328 return rc;
1329 }
1330
1331 do
1332 {
1333 /* Create image file. */
1334 rc = vdIfIoIntFileOpen(pCache->pIfIo, pCache->pszFilename,
1335 VDOpenFlagsToFileOpenFlags(uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
1336 true /* fCreate */),
1337 &pCache->pStorage);
1338 if (RT_FAILURE(rc))
1339 {
1340 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot create image '%s'"), pCache->pszFilename);
1341 break;
1342 }
1343
1344 /* Allocate block bitmap. */
1345 uint32_t cBlkMap = 0;
1346 rc = vciBlkMapCreate(cBlocks, &pCache->pBlkMap, &cBlkMap);
1347 if (RT_FAILURE(rc))
1348 {
1349 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot create block bitmap '%s'"), pCache->pszFilename);
1350 break;
1351 }
1352
1353 /*
1354 * Allocate space for the header in the block bitmap.
1355 * Because the block map is empty the header has to start at block 0
1356 */
1357 uint64_t offHdr = 0;
1358 rc = vciBlkMapAllocate(pCache->pBlkMap, VCI_BYTE2BLOCK(sizeof(VciHdr)), VCIBLKMAP_ALLOC_META, &offHdr);
1359 if (RT_FAILURE(rc))
1360 {
1361 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate space for header in block bitmap '%s'"), pCache->pszFilename);
1362 break;
1363 }
1364
1365 Assert(offHdr == 0);
1366
1367 /*
1368 * Allocate space for the block map itself.
1369 */
1370 uint64_t offBlkMap = 0;
1371 rc = vciBlkMapAllocate(pCache->pBlkMap, cBlkMap, VCIBLKMAP_ALLOC_META, &offBlkMap);
1372 if (RT_FAILURE(rc))
1373 {
1374 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate space for block map in block map '%s'"), pCache->pszFilename);
1375 break;
1376 }
1377
1378 /*
1379 * Allocate space for the tree root node.
1380 */
1381 uint64_t offTreeRoot = 0;
1382 rc = vciBlkMapAllocate(pCache->pBlkMap, VCI_BYTE2BLOCK(sizeof(VciTreeNode)), VCIBLKMAP_ALLOC_META, &offTreeRoot);
1383 if (RT_FAILURE(rc))
1384 {
1385 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate space for block map in block map '%s'"), pCache->pszFilename);
1386 break;
1387 }
1388
1389 /*
1390 * Allocate the in memory root node.
1391 */
1392 pCache->pRoot = (PVCITREENODE)RTMemAllocZ(sizeof(VCITREENODELEAF));
1393 if (!pCache->pRoot)
1394 {
1395 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot allocate B+-Tree root pointer '%s'"), pCache->pszFilename);
1396 break;
1397 }
1398
1399 pCache->pRoot->u8Type = VCI_TREE_NODE_TYPE_LEAF;
1400 /* Rest remains 0 as the tree is still empty. */
1401
1402 /*
1403 * Now that we are here we have all the basic structures and know where to place them in the image.
1404 * It's time to write it now.
1405 */
1406
1407 /* Setup the header. */
1408 memset(&Hdr, 0, sizeof(VciHdr));
1409 Hdr.u32Signature = RT_H2LE_U32(VCI_HDR_SIGNATURE);
1410 Hdr.u32Version = RT_H2LE_U32(VCI_HDR_VERSION);
1411 Hdr.cBlocksCache = RT_H2LE_U64(cBlocks);
1412 Hdr.fUncleanShutdown = VCI_HDR_UNCLEAN_SHUTDOWN;
1413 Hdr.u32CacheType = uImageFlags & VD_IMAGE_FLAGS_FIXED
1414 ? RT_H2LE_U32(VCI_HDR_CACHE_TYPE_FIXED)
1415 : RT_H2LE_U32(VCI_HDR_CACHE_TYPE_DYNAMIC);
1416 Hdr.offTreeRoot = RT_H2LE_U64(offTreeRoot);
1417 Hdr.offBlkMap = RT_H2LE_U64(offBlkMap);
1418 Hdr.cBlkMap = RT_H2LE_U32(cBlkMap);
1419
1420 rc = vdIfIoIntFileWriteSync(pCache->pIfIo, pCache->pStorage, offHdr, &Hdr,
1421 VCI_BYTE2BLOCK(sizeof(VciHdr)));
1422 if (RT_FAILURE(rc))
1423 {
1424 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot write header '%s'"), pCache->pszFilename);
1425 break;
1426 }
1427
1428 rc = vciBlkMapSave(pCache->pBlkMap, pCache, offBlkMap, cBlkMap);
1429 if (RT_FAILURE(rc))
1430 {
1431 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot write block map '%s'"), pCache->pszFilename);
1432 break;
1433 }
1434
1435 /* Setup the root tree. */
1436 memset(&NodeRoot, 0, sizeof(VciTreeNode));
1437 NodeRoot.u8Type = VCI_TREE_NODE_TYPE_LEAF;
1438
1439 rc = vdIfIoIntFileWriteSync(pCache->pIfIo, pCache->pStorage, offTreeRoot,
1440 &NodeRoot, VCI_BYTE2BLOCK(sizeof(VciTreeNode)));
1441 if (RT_FAILURE(rc))
1442 {
1443 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot write root node '%s'"), pCache->pszFilename);
1444 break;
1445 }
1446
1447 rc = vciFlushImage(pCache);
1448 if (RT_FAILURE(rc))
1449 {
1450 rc = vdIfError(pCache->pIfError, rc, RT_SRC_POS, N_("VCI: cannot flush '%s'"), pCache->pszFilename);
1451 break;
1452 }
1453
1454 pCache->cbSize = cbSize;
1455
1456 } while (0);
1457
1458 if (RT_SUCCESS(rc) && pfnProgress)
1459 pfnProgress(pvUser, uPercentStart + uPercentSpan);
1460
1461 if (RT_FAILURE(rc))
1462 vciFreeImage(pCache, rc != VERR_ALREADY_EXISTS);
1463 return rc;
1464}
1465
1466/** @copydoc VDCACHEBACKEND::pfnProbe */
1467static DECLCALLBACK(int) vciProbe(const char *pszFilename, PVDINTERFACE pVDIfsDisk,
1468 PVDINTERFACE pVDIfsImage)
1469{
1470 RT_NOREF1(pVDIfsDisk);
1471 VciHdr Hdr;
1472 PVDIOSTORAGE pStorage = NULL;
1473 uint64_t cbFile;
1474 int rc = VINF_SUCCESS;
1475
1476 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
1477
1478 PVDINTERFACEIOINT pIfIo = VDIfIoIntGet(pVDIfsImage);
1479 AssertPtrReturn(pIfIo, VERR_INVALID_PARAMETER);
1480
1481 rc = vdIfIoIntFileOpen(pIfIo, pszFilename,
1482 VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_READONLY,
1483 false /* fCreate */),
1484 &pStorage);
1485 if (RT_FAILURE(rc))
1486 goto out;
1487
1488 rc = vdIfIoIntFileGetSize(pIfIo, pStorage, &cbFile);
1489 if (RT_FAILURE(rc) || cbFile < sizeof(VciHdr))
1490 {
1491 rc = VERR_VD_GEN_INVALID_HEADER;
1492 goto out;
1493 }
1494
1495 rc = vdIfIoIntFileReadSync(pIfIo, pStorage, 0, &Hdr, sizeof(Hdr));
1496 if (RT_FAILURE(rc))
1497 {
1498 rc = VERR_VD_GEN_INVALID_HEADER;
1499 goto out;
1500 }
1501
1502 Hdr.u32Signature = RT_LE2H_U32(Hdr.u32Signature);
1503 Hdr.u32Version = RT_LE2H_U32(Hdr.u32Version);
1504 Hdr.cBlocksCache = RT_LE2H_U64(Hdr.cBlocksCache);
1505 Hdr.u32CacheType = RT_LE2H_U32(Hdr.u32CacheType);
1506 Hdr.offTreeRoot = RT_LE2H_U64(Hdr.offTreeRoot);
1507 Hdr.offBlkMap = RT_LE2H_U64(Hdr.offBlkMap);
1508 Hdr.cBlkMap = RT_LE2H_U32(Hdr.cBlkMap);
1509
1510 if ( Hdr.u32Signature == VCI_HDR_SIGNATURE
1511 && Hdr.u32Version == VCI_HDR_VERSION)
1512 rc = VINF_SUCCESS;
1513 else
1514 rc = VERR_VD_GEN_INVALID_HEADER;
1515
1516out:
1517 if (pStorage)
1518 vdIfIoIntFileClose(pIfIo, pStorage);
1519
1520 LogFlowFunc(("returns %Rrc\n", rc));
1521 return rc;
1522}
1523
1524/** @copydoc VDCACHEBACKEND::pfnOpen */
1525static DECLCALLBACK(int) vciOpen(const char *pszFilename, unsigned uOpenFlags,
1526 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
1527 void **ppBackendData)
1528{
1529 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
1530 int rc;
1531 PVCICACHE pCache;
1532
1533 /* Check open flags. All valid flags are supported. */
1534 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
1535 {
1536 rc = VERR_INVALID_PARAMETER;
1537 goto out;
1538 }
1539
1540 /* Check remaining arguments. */
1541 if ( !RT_VALID_PTR(pszFilename)
1542 || !*pszFilename)
1543 {
1544 rc = VERR_INVALID_PARAMETER;
1545 goto out;
1546 }
1547
1548
1549 pCache = (PVCICACHE)RTMemAllocZ(sizeof(VCICACHE));
1550 if (!pCache)
1551 {
1552 rc = VERR_NO_MEMORY;
1553 goto out;
1554 }
1555 pCache->pszFilename = pszFilename;
1556 pCache->pStorage = NULL;
1557 pCache->pVDIfsDisk = pVDIfsDisk;
1558 pCache->pVDIfsImage = pVDIfsImage;
1559
1560 rc = vciOpenImage(pCache, uOpenFlags);
1561 if (RT_SUCCESS(rc))
1562 *ppBackendData = pCache;
1563 else
1564 RTMemFree(pCache);
1565
1566out:
1567 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
1568 return rc;
1569}
1570
1571/** @copydoc VDCACHEBACKEND::pfnCreate */
1572static DECLCALLBACK(int) vciCreate(const char *pszFilename, uint64_t cbSize,
1573 unsigned uImageFlags, const char *pszComment,
1574 PCRTUUID pUuid, unsigned uOpenFlags,
1575 unsigned uPercentStart, unsigned uPercentSpan,
1576 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
1577 PVDINTERFACE pVDIfsOperation, void **ppBackendData)
1578{
1579 RT_NOREF1(pUuid);
1580 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p",
1581 pszFilename, cbSize, uImageFlags, pszComment, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
1582 int rc;
1583 PVCICACHE pCache;
1584
1585 PFNVDPROGRESS pfnProgress = NULL;
1586 void *pvUser = NULL;
1587 PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation);
1588 if (pIfProgress)
1589 {
1590 pfnProgress = pIfProgress->pfnProgress;
1591 pvUser = pIfProgress->Core.pvUser;
1592 }
1593
1594 /* Check open flags. All valid flags are supported. */
1595 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
1596 {
1597 rc = VERR_INVALID_PARAMETER;
1598 goto out;
1599 }
1600
1601 /* Check remaining arguments. */
1602 if ( !RT_VALID_PTR(pszFilename)
1603 || !*pszFilename)
1604 {
1605 rc = VERR_INVALID_PARAMETER;
1606 goto out;
1607 }
1608
1609 pCache = (PVCICACHE)RTMemAllocZ(sizeof(VCICACHE));
1610 if (!pCache)
1611 {
1612 rc = VERR_NO_MEMORY;
1613 goto out;
1614 }
1615 pCache->pszFilename = pszFilename;
1616 pCache->pStorage = NULL;
1617 pCache->pVDIfsDisk = pVDIfsDisk;
1618 pCache->pVDIfsImage = pVDIfsImage;
1619
1620 rc = vciCreateImage(pCache, cbSize, uImageFlags, pszComment, uOpenFlags,
1621 pfnProgress, pvUser, uPercentStart, uPercentSpan);
1622 if (RT_SUCCESS(rc))
1623 {
1624 /* So far the image is opened in read/write mode. Make sure the
1625 * image is opened in read-only mode if the caller requested that. */
1626 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
1627 {
1628 vciFreeImage(pCache, false);
1629 rc = vciOpenImage(pCache, uOpenFlags);
1630 if (RT_FAILURE(rc))
1631 {
1632 RTMemFree(pCache);
1633 goto out;
1634 }
1635 }
1636 *ppBackendData = pCache;
1637 }
1638 else
1639 RTMemFree(pCache);
1640
1641out:
1642 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
1643 return rc;
1644}
1645
1646/** @copydoc VDCACHEBACKEND::pfnClose */
1647static DECLCALLBACK(int) vciClose(void *pBackendData, bool fDelete)
1648{
1649 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
1650 PVCICACHE pCache = (PVCICACHE)pBackendData;
1651 int rc;
1652
1653 rc = vciFreeImage(pCache, fDelete);
1654 RTMemFree(pCache);
1655
1656 LogFlowFunc(("returns %Rrc\n", rc));
1657 return rc;
1658}
1659
1660/** @copydoc VDCACHEBACKEND::pfnRead */
1661static DECLCALLBACK(int) vciRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
1662 PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
1663{
1664 LogFlowFunc(("pBackendData=%#p uOffset=%llu cbToRead=%zu pIoCtx=%#p pcbActuallyRead=%#p\n",
1665 pBackendData, uOffset, cbToRead, pIoCtx, pcbActuallyRead));
1666 PVCICACHE pCache = (PVCICACHE)pBackendData;
1667 int rc = VINF_SUCCESS;
1668 PVCICACHEEXTENT pExtent;
1669 uint64_t cBlocksToRead = VCI_BYTE2BLOCK(cbToRead);
1670 uint64_t offBlockAddr = VCI_BYTE2BLOCK(uOffset);
1671
1672 AssertPtr(pCache);
1673 Assert(uOffset % 512 == 0);
1674 Assert(cbToRead % 512 == 0);
1675
1676 pExtent = vciCacheExtentLookup(pCache, offBlockAddr, NULL);
1677 if (pExtent)
1678 {
1679 uint64_t offRead = offBlockAddr - pExtent->u64BlockOffset;
1680 cBlocksToRead = RT_MIN(cBlocksToRead, pExtent->u32Blocks - offRead);
1681
1682 rc = vdIfIoIntFileReadUser(pCache->pIfIo, pCache->pStorage,
1683 pExtent->u64BlockAddr + offRead,
1684 pIoCtx, cBlocksToRead);
1685 }
1686 else
1687 {
1688 /** @todo Best fit to check whether we have cached data later and set
1689 * pcbActuallyRead accordingly. */
1690 rc = VERR_VD_BLOCK_FREE;
1691 }
1692
1693 if (pcbActuallyRead)
1694 *pcbActuallyRead = VCI_BLOCK2BYTE(cBlocksToRead);
1695
1696 LogFlowFunc(("returns %Rrc\n", rc));
1697 return rc;
1698}
1699
1700/** @copydoc VDCACHEBACKEND::pfnWrite */
1701static DECLCALLBACK(int) vciWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
1702 PVDIOCTX pIoCtx, size_t *pcbWriteProcess)
1703{
1704 RT_NOREF5(pBackendData, uOffset, cbToWrite, pIoCtx, pcbWriteProcess);
1705 LogFlowFunc(("pBackendData=%#p uOffset=%llu cbToWrite=%zu pIoCtx=%#p pcbWriteProcess=%#p\n",
1706 pBackendData, uOffset, cbToWrite, pIoCtx, pcbWriteProcess));
1707 PVCICACHE pCache = (PVCICACHE)pBackendData;
1708 int rc = VINF_SUCCESS;
1709 uint64_t cBlocksToWrite = VCI_BYTE2BLOCK(cbToWrite);
1710 //uint64_t offBlockAddr = VCI_BYTE2BLOCK(uOffset);
1711
1712 AssertPtr(pCache); NOREF(pCache);
1713 Assert(uOffset % 512 == 0);
1714 Assert(cbToWrite % 512 == 0);
1715 while (cBlocksToWrite)
1716 {
1717
1718 }
1719
1720 *pcbWriteProcess = cbToWrite; /** @todo Implement. */
1721
1722 LogFlowFunc(("returns %Rrc\n", rc));
1723 return rc;
1724}
1725
1726/** @copydoc VDCACHEBACKEND::pfnFlush */
1727static DECLCALLBACK(int) vciFlush(void *pBackendData, PVDIOCTX pIoCtx)
1728{
1729 RT_NOREF1(pIoCtx);
1730 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1731 PVCICACHE pCache = (PVCICACHE)pBackendData;
1732
1733 int rc = vciFlushImage(pCache);
1734 LogFlowFunc(("returns %Rrc\n", rc));
1735 return rc;
1736}
1737
1738/** @copydoc VDCACHEBACKEND::pfnGetVersion */
1739static DECLCALLBACK(unsigned) vciGetVersion(void *pBackendData)
1740{
1741 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1742 PVCICACHE pCache = (PVCICACHE)pBackendData;
1743
1744 AssertPtr(pCache);
1745
1746 if (pCache)
1747 return 1;
1748 else
1749 return 0;
1750}
1751
1752/** @copydoc VDCACHEBACKEND::pfnGetSize */
1753static DECLCALLBACK(uint64_t) vciGetSize(void *pBackendData)
1754{
1755 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1756 PVCICACHE pCache = (PVCICACHE)pBackendData;
1757 uint64_t cb = 0;
1758
1759 AssertPtr(pCache);
1760
1761 if (pCache && pCache->pStorage)
1762 cb = pCache->cbSize;
1763
1764 LogFlowFunc(("returns %llu\n", cb));
1765 return cb;
1766}
1767
1768/** @copydoc VDCACHEBACKEND::pfnGetFileSize */
1769static DECLCALLBACK(uint64_t) vciGetFileSize(void *pBackendData)
1770{
1771 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1772 PVCICACHE pCache = (PVCICACHE)pBackendData;
1773 uint64_t cb = 0;
1774
1775 AssertPtr(pCache);
1776
1777 if (pCache)
1778 {
1779 uint64_t cbFile;
1780 if (pCache->pStorage)
1781 {
1782 int rc = vdIfIoIntFileGetSize(pCache->pIfIo, pCache->pStorage, &cbFile);
1783 if (RT_SUCCESS(rc))
1784 cb = cbFile;
1785 }
1786 }
1787
1788 LogFlowFunc(("returns %lld\n", cb));
1789 return cb;
1790}
1791
1792/** @copydoc VDCACHEBACKEND::pfnGetImageFlags */
1793static DECLCALLBACK(unsigned) vciGetImageFlags(void *pBackendData)
1794{
1795 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1796 PVCICACHE pCache = (PVCICACHE)pBackendData;
1797 unsigned uImageFlags;
1798
1799 AssertPtr(pCache);
1800
1801 if (pCache)
1802 uImageFlags = pCache->uImageFlags;
1803 else
1804 uImageFlags = 0;
1805
1806 LogFlowFunc(("returns %#x\n", uImageFlags));
1807 return uImageFlags;
1808}
1809
1810/** @copydoc VDCACHEBACKEND::pfnGetOpenFlags */
1811static DECLCALLBACK(unsigned) vciGetOpenFlags(void *pBackendData)
1812{
1813 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
1814 PVCICACHE pCache = (PVCICACHE)pBackendData;
1815 unsigned uOpenFlags;
1816
1817 AssertPtr(pCache);
1818
1819 if (pCache)
1820 uOpenFlags = pCache->uOpenFlags;
1821 else
1822 uOpenFlags = 0;
1823
1824 LogFlowFunc(("returns %#x\n", uOpenFlags));
1825 return uOpenFlags;
1826}
1827
1828/** @copydoc VDCACHEBACKEND::pfnSetOpenFlags */
1829static DECLCALLBACK(int) vciSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
1830{
1831 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
1832 PVCICACHE pCache = (PVCICACHE)pBackendData;
1833 int rc;
1834
1835 /* Image must be opened and the new flags must be valid. Just readonly and
1836 * info flags are supported. */
1837 if (!pCache || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO)))
1838 {
1839 rc = VERR_INVALID_PARAMETER;
1840 goto out;
1841 }
1842
1843 /* Implement this operation via reopening the image. */
1844 rc = vciFreeImage(pCache, false);
1845 if (RT_FAILURE(rc))
1846 goto out;
1847 rc = vciOpenImage(pCache, uOpenFlags);
1848
1849out:
1850 LogFlowFunc(("returns %Rrc\n", rc));
1851 return rc;
1852}
1853
1854/** @copydoc VDCACHEBACKEND::pfnGetComment */
1855static DECLCALLBACK(int) vciGetComment(void *pBackendData, char *pszComment,
1856 size_t cbComment)
1857{
1858 RT_NOREF2(pszComment, cbComment);
1859 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
1860 PVCICACHE pCache = (PVCICACHE)pBackendData;
1861 int rc;
1862
1863 AssertPtr(pCache);
1864
1865 if (pCache)
1866 rc = VERR_NOT_SUPPORTED;
1867 else
1868 rc = VERR_VD_NOT_OPENED;
1869
1870 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
1871 return rc;
1872}
1873
1874/** @copydoc VDCACHEBACKEND::pfnSetComment */
1875static DECLCALLBACK(int) vciSetComment(void *pBackendData, const char *pszComment)
1876{
1877 RT_NOREF1(pszComment);
1878 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
1879 PVCICACHE pCache = (PVCICACHE)pBackendData;
1880 int rc;
1881
1882 AssertPtr(pCache);
1883
1884 if (pCache)
1885 {
1886 if (pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY)
1887 rc = VERR_VD_IMAGE_READ_ONLY;
1888 else
1889 rc = VERR_NOT_SUPPORTED;
1890 }
1891 else
1892 rc = VERR_VD_NOT_OPENED;
1893
1894 LogFlowFunc(("returns %Rrc\n", rc));
1895 return rc;
1896}
1897
1898/** @copydoc VDCACHEBACKEND::pfnGetUuid */
1899static DECLCALLBACK(int) vciGetUuid(void *pBackendData, PRTUUID pUuid)
1900{
1901 RT_NOREF1(pUuid);
1902 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
1903 PVCICACHE pCache = (PVCICACHE)pBackendData;
1904 int rc;
1905
1906 AssertPtr(pCache);
1907
1908 if (pCache)
1909 rc = VERR_NOT_SUPPORTED;
1910 else
1911 rc = VERR_VD_NOT_OPENED;
1912
1913 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
1914 return rc;
1915}
1916
1917/** @copydoc VDCACHEBACKEND::pfnSetUuid */
1918static DECLCALLBACK(int) vciSetUuid(void *pBackendData, PCRTUUID pUuid)
1919{
1920 RT_NOREF1(pUuid);
1921 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
1922 PVCICACHE pCache = (PVCICACHE)pBackendData;
1923 int rc;
1924
1925 LogFlowFunc(("%RTuuid\n", pUuid));
1926 AssertPtr(pCache);
1927
1928 if (pCache)
1929 {
1930 if (!(pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY))
1931 rc = VERR_NOT_SUPPORTED;
1932 else
1933 rc = VERR_VD_IMAGE_READ_ONLY;
1934 }
1935 else
1936 rc = VERR_VD_NOT_OPENED;
1937
1938 LogFlowFunc(("returns %Rrc\n", rc));
1939 return rc;
1940}
1941
1942/** @copydoc VDCACHEBACKEND::pfnGetModificationUuid */
1943static DECLCALLBACK(int) vciGetModificationUuid(void *pBackendData, PRTUUID pUuid)
1944{
1945 RT_NOREF1(pUuid);
1946 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
1947 PVCICACHE pCache = (PVCICACHE)pBackendData;
1948 int rc;
1949
1950 AssertPtr(pCache);
1951
1952 if (pCache)
1953 rc = VERR_NOT_SUPPORTED;
1954 else
1955 rc = VERR_VD_NOT_OPENED;
1956
1957 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
1958 return rc;
1959}
1960
1961/** @copydoc VDCACHEBACKEND::pfnSetModificationUuid */
1962static DECLCALLBACK(int) vciSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
1963{
1964 RT_NOREF1(pUuid);
1965 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
1966 PVCICACHE pCache = (PVCICACHE)pBackendData;
1967 int rc;
1968
1969 AssertPtr(pCache);
1970
1971 if (pCache)
1972 {
1973 if (!(pCache->uOpenFlags & VD_OPEN_FLAGS_READONLY))
1974 rc = VERR_NOT_SUPPORTED;
1975 else
1976 rc = VERR_VD_IMAGE_READ_ONLY;
1977 }
1978 else
1979 rc = VERR_VD_NOT_OPENED;
1980
1981 LogFlowFunc(("returns %Rrc\n", rc));
1982 return rc;
1983}
1984
1985/** @copydoc VDCACHEBACKEND::pfnDump */
1986static DECLCALLBACK(void) vciDump(void *pBackendData)
1987{
1988 NOREF(pBackendData);
1989}
1990
1991
1992const VDCACHEBACKEND g_VciCacheBackend =
1993{
1994 /* u32Version */
1995 VD_CACHEBACKEND_VERSION,
1996 /* pszBackendName */
1997 "vci",
1998 /* uBackendCaps */
1999 VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC | VD_CAP_FILE | VD_CAP_VFS,
2000 /* papszFileExtensions */
2001 s_apszVciFileExtensions,
2002 /* paConfigInfo */
2003 NULL,
2004 /* pfnProbe */
2005 vciProbe,
2006 /* pfnOpen */
2007 vciOpen,
2008 /* pfnCreate */
2009 vciCreate,
2010 /* pfnClose */
2011 vciClose,
2012 /* pfnRead */
2013 vciRead,
2014 /* pfnWrite */
2015 vciWrite,
2016 /* pfnFlush */
2017 vciFlush,
2018 /* pfnDiscard */
2019 NULL,
2020 /* pfnGetVersion */
2021 vciGetVersion,
2022 /* pfnGetSize */
2023 vciGetSize,
2024 /* pfnGetFileSize */
2025 vciGetFileSize,
2026 /* pfnGetImageFlags */
2027 vciGetImageFlags,
2028 /* pfnGetOpenFlags */
2029 vciGetOpenFlags,
2030 /* pfnSetOpenFlags */
2031 vciSetOpenFlags,
2032 /* pfnGetComment */
2033 vciGetComment,
2034 /* pfnSetComment */
2035 vciSetComment,
2036 /* pfnGetUuid */
2037 vciGetUuid,
2038 /* pfnSetUuid */
2039 vciSetUuid,
2040 /* pfnGetModificationUuid */
2041 vciGetModificationUuid,
2042 /* pfnSetModificationUuid */
2043 vciSetModificationUuid,
2044 /* pfnDump */
2045 vciDump,
2046 /* pfnComposeLocation */
2047 NULL,
2048 /* pfnComposeName */
2049 NULL,
2050 /* u32VersionEnd */
2051 VD_CACHEBACKEND_VERSION
2052};
2053
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette