VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 66244

Last change on this file since 66244 was 66244, checked in by vboxsync, 8 years ago

ValidationKit/tests/storage: Implement option to use a ramdisk for backing the disk images

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.8 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 66244 2017-03-24 12:14:06Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016-2017 Oracle Corporation
11
12This file is part of VirtualBox Open Source Edition (OSE), as
13available from http://www.virtualbox.org. This file is free software;
14you can redistribute it and/or modify it under the terms of the GNU
15General Public License (GPL) as published by the Free Software
16Foundation, in version 2 as it comes in the "COPYING" file of the
17VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
20The contents of this file may alternatively be used under the terms
21of the Common Development and Distribution License Version 1.0
22(CDDL) only, as it comes in the "COPYING.CDDL" file of the
23VirtualBox OSE distribution, in which case the provisions of the
24CDDL are applicable instead of those of the GPL.
25
26You may elect to license modified versions of this file under the
27terms and conditions of either the GPL or the CDDL or both.
28"""
29__version__ = "$Revision: 66244 $"
30
31# Standard Python imports.
32import os;
33import re;
34
35class StorageDisk(object):
36 """
37 Class representing a disk for testing.
38 """
39
40 def __init__(self, sPath, fRamDisk = False):
41 self.sPath = sPath;
42 self.fUsed = False;
43 self.fRamDisk = fRamDisk;
44
45 def getPath(self):
46 """
47 Return the disk path.
48 """
49 return self.sPath;
50
51 def isUsed(self):
52 """
53 Returns whether the disk is currently in use.
54 """
55 return self.fUsed;
56
57 def isRamDisk(self):
58 """
59 Returns whether the disk objecthas a RAM backing.
60 """
61 return self.fRamDisk;
62
63 def setUsed(self, fUsed):
64 """
65 Sets the used flag for the disk.
66 """
67 if fUsed:
68 if self.fUsed:
69 return False;
70
71 self.fUsed = True;
72 else:
73 self.fUsed = fUsed;
74
75 return True;
76
77class StorageConfigOs(object):
78 """
79 Base class for a single hosts OS storage configuration.
80 """
81
82 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
83 """
84 Adds new disks to the config matching the given regular expression.
85 """
86
87 lstDisks = [];
88 oRegExp = re.compile(sRegExp);
89 asFiles = os.listdir(sPath);
90 for sFile in asFiles:
91 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
92 lstDisks.append(StorageDisk(sPath + '/' + sFile));
93
94 return lstDisks;
95
96class StorageConfigOsSolaris(StorageConfigOs):
97 """
98 Class implementing the Solaris specifics for a storage configuration.
99 """
100
101 def __init__(self):
102 StorageConfigOs.__init__(self);
103 self.idxRamDisk = 0;
104
105 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
106 """
107 Returns a list of pools starting with the given ID or None on failure.
108 """
109 lstPools = None;
110 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
111 if fRc:
112 lstPools = [];
113 asPools = sOutput.splitlines();
114 for sPool in asPools:
115 if sPool.startswith(sPoolIdStart):
116 # Extract the whole name and add it to the list.
117 asItems = sPool.split('\t');
118 lstPools.append(asItems[0]);
119 return lstPools;
120
121 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
122 """
123 Returns a list of active volumes for the given pool starting with the given
124 identifier or None on failure.
125 """
126 lstVolumes = None;
127 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
128 if fRc:
129 lstVolumes = [];
130 asVolumes = sOutput.splitlines();
131 for sVolume in asVolumes:
132 if sVolume.startswith(sPool + '/' + sVolumeIdStart):
133 # Extract the whole name and add it to the list.
134 asItems = sVolume.split('\t');
135 lstVolumes.append(asItems[0]);
136 return lstVolumes;
137
138 def getDisksMatchingRegExp(self, sRegExp):
139 """
140 Returns a list of disks matching the regular expression.
141 """
142 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
143
144 def getMntBase(self):
145 """
146 Returns the mountpoint base for the host.
147 """
148 return '/pools';
149
150 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
151 """
152 Creates a new storage pool with the given disks and the given RAID level.
153 """
154 sZPoolRaid = None
155 if sRaidLvl == 'raid5' or sRaidLvl is None:
156 sZPoolRaid = 'raidz';
157
158 fRc = True;
159 if sZPoolRaid is not None:
160 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
161 else:
162 fRc = False;
163
164 return fRc;
165
166 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
167 """
168 Creates and mounts a filesystem at the given mountpoint using the
169 given pool and volume IDs.
170 """
171 fRc = True;
172 if cbVol is not None:
173 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
174 else:
175 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
176
177 return fRc;
178
179 def destroyVolume(self, oExec, sPool, sVol):
180 """
181 Destroys the given volume.
182 """
183 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
184 return fRc;
185
186 def destroyPool(self, oExec, sPool):
187 """
188 Destroys the given storage pool.
189 """
190 fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
191 return fRc;
192
193 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
194 """
195 Cleans up any pools and volumes starting with the name in the given
196 parameters.
197 """
198 fRc = True;
199 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
200 if lstPools is not None:
201 for sPool in lstPools:
202 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
203 if lstVolumes is not None:
204 # Destroy all the volumes first
205 for sVolume in lstVolumes:
206 fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
207 if not fRc2:
208 fRc = fRc2;
209
210 # Destroy the pool
211 fRc2 = self.destroyPool(oExec, sPool);
212 if not fRc2:
213 fRc = fRc2;
214 else:
215 fRc = False;
216 else:
217 fRc = False;
218
219 return fRc;
220
221 def createRamDisk(self, oExec, cbRamDisk):
222 """
223 Creates a RAM backed disk with the given size.
224 """
225 oDisk = None;
226 sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
227 fRc, sOut, _ = oExec.execBinary('ramdiskadm', '-a', sRamDiskName, str(cbRamDisk));
228 if fRc:
229 self.idxRamDisk += 1;
230 oDisk = StorageDisk(sOut.rstrip(), True);
231
232 return oDisk;
233
234 def destroyRamDisk(self, oExec, oDisk):
235 """
236 Destroys the given ramdisk object.
237 """
238 sRamDiskName = os.path.basename(oDisk.getPath());
239 return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
240
241class StorageConfigOsLinux(StorageConfigOs):
242 """
243 Class implementing the Linux specifics for a storage configuration.
244 """
245
246 def __init__(self):
247 StorageConfigOs.__init__(self);
248 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
249 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
250
251 def _getDmRaidLevelFromLvl(self, sRaidLvl):
252 """
253 Converts our raid level indicators to something mdadm can understand.
254 """
255 if sRaidLvl == 'raid5':
256 return '5';
257 elif sRaidLvl == 'raid1':
258 return 'mirror';
259 elif sRaidLvl == 'raid0' or sRaidLvl is None:
260 return 'stripe';
261
262 return 'stripe';
263
264 def getDisksMatchingRegExp(self, sRegExp):
265 """
266 Returns a list of disks matching the regular expression.
267 """
268 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
269
270 def getMntBase(self):
271 """
272 Returns the mountpoint base for the host.
273 """
274 return '/mnt';
275
276 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
277 """
278 Creates a new storage pool with the given disks and the given RAID level.
279 """
280 fRc = True;
281 if len(asDisks) == 1 and sRaidLvl is None:
282 # Doesn't require LVM, put into the simple pools dictionary so we can
283 # use it when creating a volume later.
284 self.dSimplePools[sPool] = asDisks[0];
285 else:
286 # If a RAID is required use dm-raid first to create one.
287 asLvmPvDisks = asDisks;
288 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
289 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
290 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
291 if fRc:
292 # /dev/md0 is the only block device to use for our volume group.
293 asLvmPvDisks = [ '/dev/md0' ];
294
295 # Create a physical volume on every disk first.
296 for sLvmPvDisk in asLvmPvDisks:
297 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
298 if not fRc:
299 break;
300
301 if fRc:
302 # Create volume group with all physical volumes included
303 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
304 return fRc;
305
306 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
307 """
308 Creates and mounts a filesystem at the given mountpoint using the
309 given pool and volume IDs.
310 """
311 fRc = True;
312 sBlkDev = None;
313 if self.dSimplePools.has_key(sPool):
314 sDiskPath = self.dSimplePools.get(sPool);
315 if sDiskPath.find('zram') != -1:
316 sBlkDev = sDiskPath;
317 else:
318 # Create a partition with the requested size
319 sFdiskScript = ';\n'; # Single partition filling everything
320 if cbVol is not None:
321 sFdiskScript = ',' + str(cbVol / 512) + '\n'; # Get number of sectors
322 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
323 sFdiskScript);
324 if fRc:
325 if sDiskPath.find('nvme') != -1:
326 sBlkDev = sDiskPath + 'p1';
327 else:
328 sBlkDev = sDiskPath + '1';
329 else:
330 if cbVol is None:
331 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
332 else:
333 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
334 if fRc:
335 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
336
337 if fRc is True and sBlkDev is not None:
338 # Create a filesystem and mount it
339 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
340 fRc = fRc and oExec.mkDir(sMountPoint);
341 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
342 if fRc:
343 self.dMounts[sPool + '/' + sVol] = sMountPoint;
344 return fRc;
345
346 def destroyVolume(self, oExec, sPool, sVol):
347 """
348 Destroys the given volume.
349 """
350 # Unmount first
351 sMountPoint = self.dMounts[sPool + '/' + sVol];
352 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
353 self.dMounts.pop(sPool + '/' + sVol);
354 oExec.rmDir(sMountPoint);
355 if self.dSimplePools.has_key(sPool):
356 # Wipe partition table
357 sDiskPath = self.dSimplePools.get(sPool);
358 if sDiskPath.find('zram') == -1:
359 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
360 sDiskPath));
361 else:
362 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
363 return fRc;
364
365 def destroyPool(self, oExec, sPool):
366 """
367 Destroys the given storage pool.
368 """
369 fRc = True;
370 if self.dSimplePools.has_key(sPool):
371 self.dSimplePools.pop(sPool);
372 else:
373 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
374 return fRc;
375
376 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
377 """
378 Cleans up any pools and volumes starting with the name in the given
379 parameters.
380 """
381 # @todo: Needs implementation, for LVM based configs a similar approach can be used
382 # as for Solaris.
383 _ = oExec;
384 _ = sPoolIdStart;
385 _ = sVolIdStart;
386 return True;
387
388 def createRamDisk(self, oExec, cbRamDisk):
389 """
390 Creates a RAM backed disk with the given size.
391 """
392 # Make sure the ZRAM module is loaded.
393 oDisk = None;
394 fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
395 if fRc:
396 fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
397 if fRc:
398 oDisk = StorageDisk(sOut.rstrip(), True);
399
400 return oDisk;
401
402 def destroyRamDisk(self, oExec, oDisk):
403 """
404 Destroys the given ramdisk object.
405 """
406 return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
407
408class StorageCfg(object):
409 """
410 Storage configuration helper class taking care of the different host OS.
411 """
412
413 def __init__(self, oExec, sTargetOs, oDiskCfg):
414 self.oExec = oExec;
415 self.lstDisks = [ ]; # List of disks present in the system.
416 self.dPools = { }; # Dictionary of storage pools.
417 self.dVols = { }; # Dictionary of volumes.
418 self.iPoolId = 0;
419 self.iVolId = 0;
420
421 fRc = True;
422 oStorOs = None;
423 if sTargetOs == 'solaris':
424 oStorOs = StorageConfigOsSolaris();
425 elif sTargetOs == 'linux':
426 oStorOs = StorageConfigOsLinux(); # pylint: disable=R0204
427 else:
428 fRc = False;
429
430 if fRc:
431 self.oStorOs = oStorOs;
432 if isinstance(oDiskCfg, basestring):
433 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg);
434 else:
435 # Assume a list of of disks and add.
436 for sDisk in oDiskCfg:
437 self.lstDisks.append(StorageDisk(sDisk));
438
439 def __del__(self):
440 self.cleanup();
441
442 def cleanup(self):
443 """
444 Cleans up any created storage configs.
445 """
446
447 # Destroy all volumes first.
448 for sMountPoint in self.dVols.keys(): # pylint: disable=C0201
449 self.destroyVolume(sMountPoint);
450
451 # Destroy all pools.
452 for sPool in self.dPools.keys(): # pylint: disable=C0201
453 self.destroyStoragePool(sPool);
454
455 self.dVols.clear();
456 self.dPools.clear();
457 self.iPoolId = 0;
458 self.iVolId = 0;
459
460 def getRawDisk(self):
461 """
462 Returns a raw disk device from the list of free devices for use.
463 """
464 for oDisk in self.lstDisks:
465 if oDisk.isUsed() is False:
466 oDisk.setUsed(True);
467 return oDisk.getPath();
468
469 return None;
470
471 def getUnusedDiskCount(self):
472 """
473 Returns the number of unused disks.
474 """
475
476 cDisksUnused = 0;
477 for oDisk in self.lstDisks:
478 if not oDisk.isUsed():
479 cDisksUnused += 1;
480
481 return cDisksUnused;
482
483 def createStoragePool(self, cDisks = 0, sRaidLvl = None,
484 cbPool = None, fRamDisk = False):
485 """
486 Create a new storage pool
487 """
488 lstDisks = [ ];
489 fRc = True;
490 sPool = None;
491
492 if fRamDisk:
493 oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
494 if oDisk is not None:
495 lstDisks.append(oDisk);
496 cDisks = 1;
497 else:
498 if cDisks == 0:
499 cDisks = self.getUnusedDiskCount();
500
501 for oDisk in self.lstDisks:
502 if not oDisk.isUsed():
503 oDisk.setUsed(True);
504 lstDisks.append(oDisk);
505 if len(lstDisks) == cDisks:
506 break;
507
508 # Enough drives to satisfy the request?
509 if len(lstDisks) == cDisks:
510 # Create a list of all device paths
511 lstDiskPaths = [ ];
512 for oDisk in lstDisks:
513 lstDiskPaths.append(oDisk.getPath());
514
515 # Find a name for the pool
516 sPool = 'pool' + str(self.iPoolId);
517 self.iPoolId += 1;
518
519 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
520 if fRc:
521 self.dPools[sPool] = lstDisks;
522 else:
523 self.iPoolId -= 1;
524 else:
525 fRc = False;
526
527 # Cleanup in case of error.
528 if not fRc:
529 for oDisk in lstDisks:
530 oDisk.setUsed(False);
531 if oDisk.isRamDisk():
532 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
533
534 return fRc, sPool;
535
536 def destroyStoragePool(self, sPool):
537 """
538 Destroys the storage pool with the given ID.
539 """
540
541 lstDisks = self.dPools.get(sPool);
542 if lstDisks is not None:
543 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
544 if fRc:
545 # Mark disks as unused
546 self.dPools.pop(sPool);
547 for oDisk in lstDisks:
548 oDisk.setUsed(False);
549 if oDisk.isRamDisk():
550 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
551 else:
552 fRc = False;
553
554 return fRc;
555
556 def createVolume(self, sPool, cbVol = None):
557 """
558 Creates a new volume from the given pool returning the mountpoint.
559 """
560
561 fRc = True;
562 sMountPoint = None;
563 if self.dPools.has_key(sPool):
564 sVol = 'vol' + str(self.iVolId);
565 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
566 self.iVolId += 1;
567 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
568 if fRc:
569 self.dVols[sMountPoint] = (sVol, sPool);
570 else:
571 self.iVolId -= 1;
572 else:
573 fRc = False;
574
575 return fRc, sMountPoint;
576
577 def destroyVolume(self, sMountPoint):
578 """
579 Destroy the volume at the given mount point.
580 """
581
582 sVol, sPool = self.dVols.get(sMountPoint);
583 fRc = True;
584 if sVol is not None:
585 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
586 if fRc:
587 self.dVols.pop(sMountPoint);
588 else:
589 fRc = False;
590
591 return fRc;
592
593 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0700):
594 """
595 Creates a new directory on the volume pointed to by the given mount point.
596 """
597 return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
598
599 def cleanupLeftovers(self):
600 """
601 Tries to cleanup any leftover pools and volumes from a failed previous run.
602 """
603 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
604
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette