VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 65747

Last change on this file since 65747 was 65746, checked in by vboxsync, 8 years ago

ValidationKit/tdStorageBenchmark1: Fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.5 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 65746 2017-02-10 18:40:34Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016 Oracle Corporation
11
12This file is part of VirtualBox Open Source Edition (OSE), as
13available from http://www.virtualbox.org. This file is free software;
14you can redistribute it and/or modify it under the terms of the GNU
15General Public License (GPL) as published by the Free Software
16Foundation, in version 2 as it comes in the "COPYING" file of the
17VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
20The contents of this file may alternatively be used under the terms
21of the Common Development and Distribution License Version 1.0
22(CDDL) only, as it comes in the "COPYING.CDDL" file of the
23VirtualBox OSE distribution, in which case the provisions of the
24CDDL are applicable instead of those of the GPL.
25
26You may elect to license modified versions of this file under the
27terms and conditions of either the GPL or the CDDL or both.
28"""
29__version__ = "$Revision: 65746 $"
30
31# Standard Python imports.
32import os;
33import re;
34
35class StorageDisk(object):
36 """
37 Class representing a disk for testing.
38 """
39
40 def __init__(self, sPath):
41 self.sPath = sPath;
42 self.fUsed = False;
43
44 def getPath(self):
45 """
46 Return the disk path.
47 """
48 return self.sPath;
49
50 def isUsed(self):
51 """
52 Returns whether the disk is currently in use.
53 """
54 return self.fUsed;
55
56 def setUsed(self, fUsed):
57 """
58 Sets the used flag for the disk.
59 """
60 if fUsed:
61 if self.fUsed:
62 return False;
63
64 self.fUsed = True;
65 else:
66 self.fUsed = fUsed;
67
68 return True;
69
70class StorageConfigOs(object):
71 """
72 Base class for a single hosts OS storage configuration.
73 """
74
75 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
76 """
77 Adds new disks to the config matching the given regular expression.
78 """
79
80 lstDisks = [];
81 oRegExp = re.compile(sRegExp);
82 asFiles = os.listdir(sPath);
83 for sFile in asFiles:
84 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
85 lstDisks.append(StorageDisk(sPath + '/' + sFile));
86
87 return lstDisks;
88
89class StorageConfigOsSolaris(StorageConfigOs):
90 """
91 Class implementing the Solaris specifics for a storage configuration.
92 """
93
94 def __init__(self):
95 StorageConfigOs.__init__(self);
96
97 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
98 """
99 Returns a list of pools starting with the given ID or None on failure.
100 """
101 lstPools = None;
102 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
103 if fRc:
104 lstPools = [];
105 asPools = sOutput.splitlines();
106 for sPool in asPools:
107 if sPool.startswith(sPoolIdStart):
108 # Extract the whole name and add it to the list.
109 asItems = sPool.split('\t');
110 lstPools.append(asItems[0]);
111 return lstPools;
112
113 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
114 """
115 Returns a list of active volumes for the given pool starting with the given
116 identifier or None on failure.
117 """
118 lstVolumes = None;
119 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
120 if fRc:
121 lstVolumes = [];
122 asVolumes = sOutput.splitlines();
123 for sVolume in asVolumes:
124 if sVolume.startswith(sPool + '/' + sVolumeIdStart):
125 # Extract the whole name and add it to the list.
126 asItems = sVolume.split('\t');
127 lstVolumes.append(asItems[0]);
128 return lstVolumes;
129
130 def getDisksMatchingRegExp(self, sRegExp):
131 """
132 Returns a list of disks matching the regular expression.
133 """
134 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
135
136 def getMntBase(self):
137 """
138 Returns the mountpoint base for the host.
139 """
140 return '/pools';
141
142 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
143 """
144 Creates a new storage pool with the given disks and the given RAID level.
145 """
146 sZPoolRaid = None
147 if sRaidLvl == 'raid5' or sRaidLvl is None:
148 sZPoolRaid = 'raidz';
149
150 fRc = True;
151 if sZPoolRaid is not None:
152 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
153 else:
154 fRc = False;
155
156 return fRc;
157
158 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
159 """
160 Creates and mounts a filesystem at the given mountpoint using the
161 given pool and volume IDs.
162 """
163 fRc = True;
164 if cbVol is not None:
165 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
166 else:
167 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
168
169 return fRc;
170
171 def destroyVolume(self, oExec, sPool, sVol):
172 """
173 Destroys the given volume.
174 """
175 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
176 return fRc;
177
178 def destroyPool(self, oExec, sPool):
179 """
180 Destroys the given storage pool.
181 """
182 fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
183 return fRc;
184
185 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
186 """
187 Cleans up any pools and volumes starting with the name in the given
188 parameters.
189 """
190 fRc = True;
191 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
192 if lstPools is not None:
193 for sPool in lstPools:
194 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
195 if lstVolumes is not None:
196 # Destroy all the volumes first
197 for sVolume in lstVolumes:
198 fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
199 if not fRc2:
200 fRc = fRc2;
201
202 # Destroy the pool
203 fRc2 = self.destroyPool(oExec, sPool);
204 if not fRc2:
205 fRc = fRc2;
206 else:
207 fRc = False;
208 else:
209 fRc = False;
210
211 return fRc;
212
213class StorageConfigOsLinux(StorageConfigOs):
214 """
215 Class implementing the Linux specifics for a storage configuration.
216 """
217
218 def __init__(self):
219 StorageConfigOs.__init__(self);
220 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
221 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
222
223 def _getDmRaidLevelFromLvl(self, sRaidLvl):
224 """
225 Converts our raid level indicators to something mdadm can understand.
226 """
227 if sRaidLvl == 'raid5':
228 return '5';
229 elif sRaidLvl == 'raid1':
230 return 'mirror';
231 elif sRaidLvl == 'raid0' or sRaidLvl is None:
232 return 'stripe';
233
234 return 'stripe';
235
236 def getDisksMatchingRegExp(self, sRegExp):
237 """
238 Returns a list of disks matching the regular expression.
239 """
240 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
241
242 def getMntBase(self):
243 """
244 Returns the mountpoint base for the host.
245 """
246 return '/mnt';
247
248 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
249 """
250 Creates a new storage pool with the given disks and the given RAID level.
251 """
252 fRc = True;
253 if len(asDisks) == 1 and sRaidLvl is None:
254 # Doesn't require LVM, put into the simple pools dictionary so we can
255 # use it when creating a volume later.
256 self.dSimplePools[sPool] = asDisks[0];
257 else:
258 # If a RAID is required use dm-raid first to create one.
259 asLvmPvDisks = asDisks;
260 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
261 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
262 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
263 if fRc:
264 # /dev/md0 is the only block device to use for our volume group.
265 asLvmPvDisks = [ '/dev/md0' ];
266
267 # Create a physical volume on every disk first.
268 for sLvmPvDisk in asLvmPvDisks:
269 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
270 if not fRc:
271 break;
272
273 if fRc:
274 # Create volume group with all physical volumes included
275 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
276 return fRc;
277
278 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
279 """
280 Creates and mounts a filesystem at the given mountpoint using the
281 given pool and volume IDs.
282 """
283 fRc = True;
284 sBlkDev = None;
285 if self.dSimplePools.has_key(sPool):
286 sDiskPath = self.dSimplePools.get(sPool);
287 # Create a partition with the requested size
288 sFdiskScript = ';\n'; # Single partition filling everything
289 if cbVol is not None:
290 sFdiskScript = ',' + str(cbVol / 512) + '\n'; # Get number of sectors
291 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), sFdiskScript);
292 if fRc:
293 if sDiskPath.find('nvme') is not -1:
294 sBlkDev = sDiskPath + 'p1';
295 else:
296 sBlkDev = sDiskPath + '1';
297 else:
298 if cbVol is None:
299 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
300 else:
301 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
302 if fRc:
303 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
304
305 if fRc is True and sBlkDev is not None:
306 # Create a filesystem and mount it
307 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
308 fRc = fRc and oExec.mkDir(sMountPoint);
309 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
310 if fRc:
311 self.dMounts[sPool + '/' + sVol] = sMountPoint;
312 return fRc;
313
314 def destroyVolume(self, oExec, sPool, sVol):
315 """
316 Destroys the given volume.
317 """
318 # Unmount first
319 sMountPoint = self.dMounts[sPool + '/' + sVol];
320 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
321 self.dMounts.pop(sPool + '/' + sVol);
322 oExec.rmDir(sMountPoint);
323 if self.dSimplePools.has_key(sPool):
324 # Wipe partition table
325 sDiskPath = self.dSimplePools.get(sPool);
326 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', sDiskPath));
327 else:
328 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
329 return fRc;
330
331 def destroyPool(self, oExec, sPool):
332 """
333 Destroys the given storage pool.
334 """
335 fRc = True;
336 if self.dSimplePools.has_key(sPool):
337 self.dSimplePools.pop(sPool);
338 else:
339 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
340 return fRc;
341
342 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
343 """
344 Cleans up any pools and volumes starting with the name in the given
345 parameters.
346 """
347 # @todo: Needs implementation, for LVM based configs a similar approach can be used
348 # as for Solaris.
349 _ = oExec;
350 _ = sPoolIdStart;
351 _ = sVolIdStart;
352 return True;
353
354class StorageCfg(object):
355 """
356 Storage configuration helper class taking care of the different host OS.
357 """
358
359 def __init__(self, oExec, sTargetOs, oDiskCfg):
360 self.oExec = oExec;
361 self.lstDisks = [ ]; # List of disks present in the system.
362 self.dPools = { }; # Dictionary of storage pools.
363 self.dVols = { }; # Dictionary of volumes.
364 self.iPoolId = 0;
365 self.iVolId = 0;
366
367 fRc = True;
368 oStorOs = None;
369 if sTargetOs == 'solaris':
370 oStorOs = StorageConfigOsSolaris();
371 elif sTargetOs == 'linux':
372 oStorOs = StorageConfigOsLinux(); # pylint: disable=R0204
373 else:
374 fRc = False;
375
376 if fRc:
377 self.oStorOs = oStorOs;
378 if isinstance(oDiskCfg, basestring):
379 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg);
380 else:
381 # Assume a list of of disks and add.
382 for sDisk in oDiskCfg:
383 self.lstDisks.append(StorageDisk(sDisk));
384
385 def __del__(self):
386 self.cleanup();
387
388 def cleanup(self):
389 """
390 Cleans up any created storage configs.
391 """
392
393 # Destroy all volumes first.
394 for sMountPoint in self.dVols.keys(): # pylint: disable=C0201
395 self.destroyVolume(sMountPoint);
396
397 # Destroy all pools.
398 for sPool in self.dPools.keys(): # pylint: disable=C0201
399 self.destroyStoragePool(sPool);
400
401 self.dVols.clear();
402 self.dPools.clear();
403 self.iPoolId = 0;
404 self.iVolId = 0;
405
406 def getRawDisk(self):
407 """
408 Returns a raw disk device from the list of free devices for use.
409 """
410 for oDisk in self.lstDisks:
411 if oDisk.isUsed() is False:
412 oDisk.setUsed(True);
413 return oDisk.getPath();
414
415 return None;
416
417 def getUnusedDiskCount(self):
418 """
419 Returns the number of unused disks.
420 """
421
422 cDisksUnused = 0;
423 for oDisk in self.lstDisks:
424 if not oDisk.isUsed():
425 cDisksUnused += 1;
426
427 return cDisksUnused;
428
429 def createStoragePool(self, cDisks = 0, sRaidLvl = None):
430 """
431 Create a new storage pool
432 """
433 lstDisks = [ ];
434 fRc = True;
435 sPool = None;
436
437 if cDisks == 0:
438 cDisks = self.getUnusedDiskCount();
439
440 for oDisk in self.lstDisks:
441 if not oDisk.isUsed():
442 oDisk.setUsed(True);
443 lstDisks.append(oDisk);
444 if len(lstDisks) == cDisks:
445 break;
446
447 # Enough drives to satisfy the request?
448 if len(lstDisks) == cDisks:
449 # Create a list of all device paths
450 lstDiskPaths = [ ];
451 for oDisk in lstDisks:
452 lstDiskPaths.append(oDisk.getPath());
453
454 # Find a name for the pool
455 sPool = 'pool' + str(self.iPoolId);
456 self.iPoolId += 1;
457
458 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
459 if fRc:
460 self.dPools[sPool] = lstDisks;
461 else:
462 self.iPoolId -= 1;
463 else:
464 fRc = False;
465
466 # Cleanup in case of error.
467 if not fRc:
468 for oDisk in lstDisks:
469 oDisk.setUsed(False);
470
471 return fRc, sPool;
472
473 def destroyStoragePool(self, sPool):
474 """
475 Destroys the storage pool with the given ID.
476 """
477
478 lstDisks = self.dPools.get(sPool);
479 if lstDisks is not None:
480 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
481 if fRc:
482 # Mark disks as unused
483 self.dPools.pop(sPool);
484 for oDisk in lstDisks:
485 oDisk.setUsed(False);
486 else:
487 fRc = False;
488
489 return fRc;
490
491 def createVolume(self, sPool, cbVol = None):
492 """
493 Creates a new volume from the given pool returning the mountpoint.
494 """
495
496 fRc = True;
497 sMountPoint = None;
498 if self.dPools.has_key(sPool):
499 sVol = 'vol' + str(self.iVolId);
500 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
501 self.iVolId += 1;
502 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
503 if fRc:
504 self.dVols[sMountPoint] = (sVol, sPool);
505 else:
506 self.iVolId -= 1;
507 else:
508 fRc = False;
509
510 return fRc, sMountPoint;
511
512 def destroyVolume(self, sMountPoint):
513 """
514 Destroy the volume at the given mount point.
515 """
516
517 sVol, sPool = self.dVols.get(sMountPoint);
518 fRc = True;
519 if sVol is not None:
520 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
521 if fRc:
522 self.dVols.pop(sMountPoint);
523 else:
524 fRc = False;
525
526 return fRc;
527
528 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0700):
529 """
530 Creates a new directory on the volume pointed to by the given mount point.
531 """
532 return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
533
534 def cleanupLeftovers(self):
535 """
536 Tries to cleanup any leftover pools and volumes from a failed previous run.
537 """
538 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
539
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette