VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 78351

Last change on this file since 78351 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.9 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 76553 2019-01-01 01:45:53Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016-2019 Oracle Corporation
11
12This file is part of VirtualBox Open Source Edition (OSE), as
13available from http://www.virtualbox.org. This file is free software;
14you can redistribute it and/or modify it under the terms of the GNU
15General Public License (GPL) as published by the Free Software
16Foundation, in version 2 as it comes in the "COPYING" file of the
17VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
20The contents of this file may alternatively be used under the terms
21of the Common Development and Distribution License Version 1.0
22(CDDL) only, as it comes in the "COPYING.CDDL" file of the
23VirtualBox OSE distribution, in which case the provisions of the
24CDDL are applicable instead of those of the GPL.
25
26You may elect to license modified versions of this file under the
27terms and conditions of either the GPL or the CDDL or both.
28"""
29__version__ = "$Revision: 76553 $"
30
31# Standard Python imports.
32import os;
33import re;
34
35# Validation Kit imports.
36from common import utils;
37
38
39class StorageDisk(object):
40 """
41 Class representing a disk for testing.
42 """
43
44 def __init__(self, sPath, fRamDisk = False):
45 self.sPath = sPath;
46 self.fUsed = False;
47 self.fRamDisk = fRamDisk;
48
49 def getPath(self):
50 """
51 Return the disk path.
52 """
53 return self.sPath;
54
55 def isUsed(self):
56 """
57 Returns whether the disk is currently in use.
58 """
59 return self.fUsed;
60
61 def isRamDisk(self):
62 """
63 Returns whether the disk objecthas a RAM backing.
64 """
65 return self.fRamDisk;
66
67 def setUsed(self, fUsed):
68 """
69 Sets the used flag for the disk.
70 """
71 if fUsed:
72 if self.fUsed:
73 return False;
74
75 self.fUsed = True;
76 else:
77 self.fUsed = fUsed;
78
79 return True;
80
81class StorageConfigOs(object):
82 """
83 Base class for a single hosts OS storage configuration.
84 """
85
86 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
87 """
88 Adds new disks to the config matching the given regular expression.
89 """
90
91 lstDisks = [];
92 oRegExp = re.compile(sRegExp);
93 asFiles = os.listdir(sPath);
94 for sFile in asFiles:
95 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
96 lstDisks.append(StorageDisk(sPath + '/' + sFile));
97
98 return lstDisks;
99
100class StorageConfigOsSolaris(StorageConfigOs):
101 """
102 Class implementing the Solaris specifics for a storage configuration.
103 """
104
105 def __init__(self):
106 StorageConfigOs.__init__(self);
107 self.idxRamDisk = 0;
108
109 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
110 """
111 Returns a list of pools starting with the given ID or None on failure.
112 """
113 lstPools = None;
114 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
115 if fRc:
116 lstPools = [];
117 asPools = sOutput.splitlines();
118 for sPool in asPools:
119 if sPool.startswith(sPoolIdStart):
120 # Extract the whole name and add it to the list.
121 asItems = sPool.split('\t');
122 lstPools.append(asItems[0]);
123 return lstPools;
124
125 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
126 """
127 Returns a list of active volumes for the given pool starting with the given
128 identifier or None on failure.
129 """
130 lstVolumes = None;
131 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
132 if fRc:
133 lstVolumes = [];
134 asVolumes = sOutput.splitlines();
135 for sVolume in asVolumes:
136 if sVolume.startswith(sPool + '/' + sVolumeIdStart):
137 # Extract the whole name and add it to the list.
138 asItems = sVolume.split('\t');
139 lstVolumes.append(asItems[0]);
140 return lstVolumes;
141
142 def getDisksMatchingRegExp(self, sRegExp):
143 """
144 Returns a list of disks matching the regular expression.
145 """
146 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
147
148 def getMntBase(self):
149 """
150 Returns the mountpoint base for the host.
151 """
152 return '/pools';
153
154 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
155 """
156 Creates a new storage pool with the given disks and the given RAID level.
157 """
158 sZPoolRaid = None;
159 if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
160 sZPoolRaid = 'raidz';
161
162 fRc = True;
163 if sZPoolRaid is not None:
164 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
165 else:
166 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
167
168 return fRc;
169
170 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
171 """
172 Creates and mounts a filesystem at the given mountpoint using the
173 given pool and volume IDs.
174 """
175 fRc = True;
176 if cbVol is not None:
177 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
178 else:
179 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
180
181 return fRc;
182
183 def destroyVolume(self, oExec, sPool, sVol):
184 """
185 Destroys the given volume.
186 """
187 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
188 return fRc;
189
190 def destroyPool(self, oExec, sPool):
191 """
192 Destroys the given storage pool.
193 """
194 fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
195 return fRc;
196
197 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
198 """
199 Cleans up any pools and volumes starting with the name in the given
200 parameters.
201 """
202 fRc = True;
203 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
204 if lstPools is not None:
205 for sPool in lstPools:
206 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
207 if lstVolumes is not None:
208 # Destroy all the volumes first
209 for sVolume in lstVolumes:
210 fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
211 if not fRc2:
212 fRc = fRc2;
213
214 # Destroy the pool
215 fRc2 = self.destroyPool(oExec, sPool);
216 if not fRc2:
217 fRc = fRc2;
218 else:
219 fRc = False;
220 else:
221 fRc = False;
222
223 return fRc;
224
225 def createRamDisk(self, oExec, cbRamDisk):
226 """
227 Creates a RAM backed disk with the given size.
228 """
229 oDisk = None;
230 sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
231 fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
232 if fRc:
233 self.idxRamDisk += 1;
234 oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
235
236 return oDisk;
237
238 def destroyRamDisk(self, oExec, oDisk):
239 """
240 Destroys the given ramdisk object.
241 """
242 sRamDiskName = os.path.basename(oDisk.getPath());
243 return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
244
245class StorageConfigOsLinux(StorageConfigOs):
246 """
247 Class implementing the Linux specifics for a storage configuration.
248 """
249
250 def __init__(self):
251 StorageConfigOs.__init__(self);
252 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
253 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
254
255 def _getDmRaidLevelFromLvl(self, sRaidLvl):
256 """
257 Converts our raid level indicators to something mdadm can understand.
258 """
259 if sRaidLvl == 'raid5':
260 return '5';
261 elif sRaidLvl == 'raid1':
262 return 'mirror';
263 elif sRaidLvl == 'raid0' or sRaidLvl is None:
264 return 'stripe';
265
266 return 'stripe';
267
268 def getDisksMatchingRegExp(self, sRegExp):
269 """
270 Returns a list of disks matching the regular expression.
271 """
272 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
273
274 def getMntBase(self):
275 """
276 Returns the mountpoint base for the host.
277 """
278 return '/mnt';
279
280 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
281 """
282 Creates a new storage pool with the given disks and the given RAID level.
283 """
284 fRc = True;
285 if len(asDisks) == 1 and sRaidLvl is None:
286 # Doesn't require LVM, put into the simple pools dictionary so we can
287 # use it when creating a volume later.
288 self.dSimplePools[sPool] = asDisks[0];
289 else:
290 # If a RAID is required use dm-raid first to create one.
291 asLvmPvDisks = asDisks;
292 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
293 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
294 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
295 if fRc:
296 # /dev/md0 is the only block device to use for our volume group.
297 asLvmPvDisks = [ '/dev/md0' ];
298
299 # Create a physical volume on every disk first.
300 for sLvmPvDisk in asLvmPvDisks:
301 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
302 if not fRc:
303 break;
304
305 if fRc:
306 # Create volume group with all physical volumes included
307 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
308 return fRc;
309
310 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
311 """
312 Creates and mounts a filesystem at the given mountpoint using the
313 given pool and volume IDs.
314 """
315 fRc = True;
316 sBlkDev = None;
317 if sPool in self.dSimplePools:
318 sDiskPath = self.dSimplePools.get(sPool);
319 if sDiskPath.find('zram') != -1:
320 sBlkDev = sDiskPath;
321 else:
322 # Create a partition with the requested size
323 sFdiskScript = ';\n'; # Single partition filling everything
324 if cbVol is not None:
325 sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
326 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
327 sFdiskScript);
328 if fRc:
329 if sDiskPath.find('nvme') != -1:
330 sBlkDev = sDiskPath + 'p1';
331 else:
332 sBlkDev = sDiskPath + '1';
333 else:
334 if cbVol is None:
335 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
336 else:
337 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
338 if fRc:
339 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
340
341 if fRc is True and sBlkDev is not None:
342 # Create a filesystem and mount it
343 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
344 fRc = fRc and oExec.mkDir(sMountPoint);
345 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
346 if fRc:
347 self.dMounts[sPool + '/' + sVol] = sMountPoint;
348 return fRc;
349
350 def destroyVolume(self, oExec, sPool, sVol):
351 """
352 Destroys the given volume.
353 """
354 # Unmount first
355 sMountPoint = self.dMounts[sPool + '/' + sVol];
356 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
357 self.dMounts.pop(sPool + '/' + sVol);
358 oExec.rmDir(sMountPoint);
359 if sPool in self.dSimplePools:
360 # Wipe partition table
361 sDiskPath = self.dSimplePools.get(sPool);
362 if sDiskPath.find('zram') == -1:
363 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
364 sDiskPath));
365 else:
366 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
367 return fRc;
368
369 def destroyPool(self, oExec, sPool):
370 """
371 Destroys the given storage pool.
372 """
373 fRc = True;
374 if sPool in self.dSimplePools:
375 self.dSimplePools.pop(sPool);
376 else:
377 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
378 return fRc;
379
380 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
381 """
382 Cleans up any pools and volumes starting with the name in the given
383 parameters.
384 """
385 # @todo: Needs implementation, for LVM based configs a similar approach can be used
386 # as for Solaris.
387 _ = oExec;
388 _ = sPoolIdStart;
389 _ = sVolIdStart;
390 return True;
391
392 def createRamDisk(self, oExec, cbRamDisk):
393 """
394 Creates a RAM backed disk with the given size.
395 """
396 # Make sure the ZRAM module is loaded.
397 oDisk = None;
398 fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
399 if fRc:
400 fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
401 if fRc:
402 oDisk = StorageDisk(sOut.rstrip(), True);
403
404 return oDisk;
405
406 def destroyRamDisk(self, oExec, oDisk):
407 """
408 Destroys the given ramdisk object.
409 """
410 return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
411
412class StorageCfg(object):
413 """
414 Storage configuration helper class taking care of the different host OS.
415 """
416
417 def __init__(self, oExec, sTargetOs, oDiskCfg):
418 self.oExec = oExec;
419 self.lstDisks = [ ]; # List of disks present in the system.
420 self.dPools = { }; # Dictionary of storage pools.
421 self.dVols = { }; # Dictionary of volumes.
422 self.iPoolId = 0;
423 self.iVolId = 0;
424
425 fRc = True;
426 oStorOs = None;
427 if sTargetOs == 'solaris':
428 oStorOs = StorageConfigOsSolaris();
429 elif sTargetOs == 'linux':
430 oStorOs = StorageConfigOsLinux(); # pylint: disable=R0204
431 else:
432 fRc = False;
433
434 if fRc:
435 self.oStorOs = oStorOs;
436 if utils.isString(oDiskCfg):
437 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg);
438 else:
439 # Assume a list of of disks and add.
440 for sDisk in oDiskCfg:
441 self.lstDisks.append(StorageDisk(sDisk));
442
443 def __del__(self):
444 self.cleanup();
445
446 def cleanup(self):
447 """
448 Cleans up any created storage configs.
449 """
450
451 # Destroy all volumes first.
452 for sMountPoint in self.dVols.keys(): # pylint: disable=C0201
453 self.destroyVolume(sMountPoint);
454
455 # Destroy all pools.
456 for sPool in self.dPools.keys(): # pylint: disable=C0201
457 self.destroyStoragePool(sPool);
458
459 self.dVols.clear();
460 self.dPools.clear();
461 self.iPoolId = 0;
462 self.iVolId = 0;
463
464 def getRawDisk(self):
465 """
466 Returns a raw disk device from the list of free devices for use.
467 """
468 for oDisk in self.lstDisks:
469 if oDisk.isUsed() is False:
470 oDisk.setUsed(True);
471 return oDisk.getPath();
472
473 return None;
474
475 def getUnusedDiskCount(self):
476 """
477 Returns the number of unused disks.
478 """
479
480 cDisksUnused = 0;
481 for oDisk in self.lstDisks:
482 if not oDisk.isUsed():
483 cDisksUnused += 1;
484
485 return cDisksUnused;
486
487 def createStoragePool(self, cDisks = 0, sRaidLvl = None,
488 cbPool = None, fRamDisk = False):
489 """
490 Create a new storage pool
491 """
492 lstDisks = [ ];
493 fRc = True;
494 sPool = None;
495
496 if fRamDisk:
497 oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
498 if oDisk is not None:
499 lstDisks.append(oDisk);
500 cDisks = 1;
501 else:
502 if cDisks == 0:
503 cDisks = self.getUnusedDiskCount();
504
505 for oDisk in self.lstDisks:
506 if not oDisk.isUsed():
507 oDisk.setUsed(True);
508 lstDisks.append(oDisk);
509 if len(lstDisks) == cDisks:
510 break;
511
512 # Enough drives to satisfy the request?
513 if len(lstDisks) == cDisks:
514 # Create a list of all device paths
515 lstDiskPaths = [ ];
516 for oDisk in lstDisks:
517 lstDiskPaths.append(oDisk.getPath());
518
519 # Find a name for the pool
520 sPool = 'pool' + str(self.iPoolId);
521 self.iPoolId += 1;
522
523 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
524 if fRc:
525 self.dPools[sPool] = lstDisks;
526 else:
527 self.iPoolId -= 1;
528 else:
529 fRc = False;
530
531 # Cleanup in case of error.
532 if not fRc:
533 for oDisk in lstDisks:
534 oDisk.setUsed(False);
535 if oDisk.isRamDisk():
536 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
537
538 return fRc, sPool;
539
540 def destroyStoragePool(self, sPool):
541 """
542 Destroys the storage pool with the given ID.
543 """
544
545 lstDisks = self.dPools.get(sPool);
546 if lstDisks is not None:
547 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
548 if fRc:
549 # Mark disks as unused
550 self.dPools.pop(sPool);
551 for oDisk in lstDisks:
552 oDisk.setUsed(False);
553 if oDisk.isRamDisk():
554 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
555 else:
556 fRc = False;
557
558 return fRc;
559
560 def createVolume(self, sPool, cbVol = None):
561 """
562 Creates a new volume from the given pool returning the mountpoint.
563 """
564
565 fRc = True;
566 sMountPoint = None;
567 if sPool in self.dPools:
568 sVol = 'vol' + str(self.iVolId);
569 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
570 self.iVolId += 1;
571 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
572 if fRc:
573 self.dVols[sMountPoint] = (sVol, sPool);
574 else:
575 self.iVolId -= 1;
576 else:
577 fRc = False;
578
579 return fRc, sMountPoint;
580
581 def destroyVolume(self, sMountPoint):
582 """
583 Destroy the volume at the given mount point.
584 """
585
586 sVol, sPool = self.dVols.get(sMountPoint);
587 fRc = True;
588 if sVol is not None:
589 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
590 if fRc:
591 self.dVols.pop(sMountPoint);
592 else:
593 fRc = False;
594
595 return fRc;
596
597 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
598 """
599 Creates a new directory on the volume pointed to by the given mount point.
600 """
601 return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
602
603 def cleanupLeftovers(self):
604 """
605 Tries to cleanup any leftover pools and volumes from a failed previous run.
606 """
607 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
608
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette