VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 62136

Last change on this file since 62136 was 62118, checked in by vboxsync, 9 years ago

ValidationKit/tests/storage: More hacking on the storage benchmark testcase

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 62118 2016-07-07 16:16:44Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016 Oracle Corporation
11
12This file is part of VirtualBox Open Source Edition (OSE), as
13available from http://www.virtualbox.org. This file is free software;
14you can redistribute it and/or modify it under the terms of the GNU
15General Public License (GPL) as published by the Free Software
16Foundation, in version 2 as it comes in the "COPYING" file of the
17VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
20The contents of this file may alternatively be used under the terms
21of the Common Development and Distribution License Version 1.0
22(CDDL) only, as it comes in the "COPYING.CDDL" file of the
23VirtualBox OSE distribution, in which case the provisions of the
24CDDL are applicable instead of those of the GPL.
25
26You may elect to license modified versions of this file under the
27terms and conditions of either the GPL or the CDDL or both.
28"""
29__version__ = "$Revision: 62118 $"
30
31# Standard Python imports.
32import os;
33import re;
34
35class StorageDisk(object):
36 """
37 Class representing a disk for testing.
38 """
39
40 def __init__(self, sPath):
41 self.sPath = sPath;
42 self.fUsed = False;
43
44 def getPath(self):
45 """
46 Return the disk path.
47 """
48 return self.sPath;
49
50 def isUsed(self):
51 """
52 Returns whether the disk is currently in use.
53 """
54 return self.fUsed;
55
56 def setUsed(self, fUsed):
57 """
58 Sets the used flag for the disk.
59 """
60 if fUsed:
61 if self.fUsed:
62 return False;
63
64 self.fUsed = True;
65 else:
66 self.fUsed = fUsed;
67
68 return True;
69
70class StorageConfigOs(object):
71 """
72 Base class for a single hosts OS storage configuration.
73 """
74
75 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
76 """
77 Adds new disks to the config matching the given regular expression.
78 """
79
80 lstDisks = [];
81 oRegExp = re.compile(sRegExp);
82 asFiles = os.listdir(sPath);
83 for sFile in asFiles:
84 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
85 lstDisks.append(StorageDisk(sPath + '/' + sFile));
86
87 return lstDisks;
88
89class StorageConfigOsSolaris(StorageConfigOs):
90 """
91 Class implementing the Solaris specifics for a storage configuration.
92 """
93
94 def __init__(self):
95 StorageConfigOs.__init__(self);
96
97 def getDisksMatchingRegExp(self, sRegExp):
98 """
99 Returns a list of disks matching the regular expression.
100 """
101 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
102
103 def getMntBase(self):
104 """
105 Returns the mountpoint base for the host.
106 """
107 return '/pools';
108
109 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
110 """
111 Creates a new storage pool with the given disks and the given RAID level.
112 """
113 sZPoolRaid = None
114 if sRaidLvl == 'raid5' or sRaidLvl is None:
115 sZPoolRaid = 'raidz';
116
117 fRc = True;
118 if sZPoolRaid is not None:
119 fRc = oExec.execBinary('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
120 else:
121 fRc = False;
122
123 return fRc;
124
125 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
126 """
127 Creates and mounts a filesystem at the given mountpoint using the
128 given pool and volume IDs.
129 """
130 fRc = True;
131 if cbVol is not None:
132 fRc, _ = oExec.execBinary('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
133 else:
134 fRc, _ = oExec.execBinary('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
135
136 return fRc;
137
138 def destroyVolume(self, oExec, sPool, sVol):
139 """
140 Destroys the given volume.
141 """
142 fRc, _ = oExec.execBinary('zfs', ('destroy', sPool + '/' + sVol));
143 return fRc;
144
145 def destroyPool(self, oExec, sPool):
146 """
147 Destroys the given storage pool.
148 """
149 fRc, _ = oExec.execBinary('zpool', ('destroy', sPool));
150 return fRc;
151
152class StorageConfigOsLinux(StorageConfigOs):
153 """
154 Class implementing the Linux specifics for a storage configuration.
155 """
156
157 def __init__(self):
158 StorageConfigOs.__init__(self);
159 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
160 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
161
162 def _getDmRaidLevelFromLvl(self, sRaidLvl):
163 """
164 Converts our raid level indicators to something mdadm can understand.
165 """
166 if sRaidLvl == 'raid5':
167 return '5';
168 elif sRaidLvl == 'raid1':
169 return 'mirror';
170 elif sRaidLvl == 'raid0' or sRaidLvl is None:
171 return 'stripe';
172
173 return 'stripe';
174
175 def getDisksMatchingRegExp(self, sRegExp):
176 """
177 Returns a list of disks matching the regular expression.
178 """
179 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
180
181 def getMntBase(self):
182 """
183 Returns the mountpoint base for the host.
184 """
185 return '/media';
186
187 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
188 """
189 Creates a new storage pool with the given disks and the given RAID level.
190 """
191 fRc = True;
192 if len(asDisks) == 1 and sRaidLvl is None:
193 # Doesn't require LVM, put into the simple pools dictionary so we can
194 # use it when creating a volume later.
195 self.dSimplePools[sPool] = asDisks[0];
196 else:
197 # If a RAID is required use dm-raid first to create one.
198 asLvmPvDisks = asDisks;
199 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
200 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
201 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
202 if fRc:
203 # /dev/md0 is the only block device to use for our volume group.
204 asLvmPvDisks = [ '/dev/md0' ];
205
206 # Create a physical volume on every disk first.
207 for sLvmPvDisk in asLvmPvDisks:
208 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
209 if not fRc:
210 break;
211
212 if fRc:
213 # Create volume group with all physical volumes included
214 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
215 return fRc;
216
217 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
218 """
219 Creates and mounts a filesystem at the given mountpoint using the
220 given pool and volume IDs.
221 """
222 fRc = True;
223 sBlkDev = None;
224 if self.dSimplePools.has_key(sPool):
225 sDiskPath = self.dSimplePools.get(sPool);
226 # Create a partition with the requested size
227 sFdiskScript = ';\n'; # Single partition filling everything
228 if cbVol is not None:
229 sFdiskScript = ',' + str(cbVol / 512) + '\n'; # Get number of sectors
230 fRc, _ = oExec.execBinary('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), sFdiskScript);
231 if fRc:
232 sBlkDev = sDiskPath + '1';
233 else:
234 if cbVol is None:
235 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
236 else:
237 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
238 if fRc:
239 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
240
241 if fRc is True and sBlkDev is not None:
242 # Create a filesystem and mount it
243 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
244 fRc = fRc and oExec.mkDir(sMountPoint);
245 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
246 if fRc:
247 self.dMounts[sPool + '/' + sVol] = sMountPoint;
248 return fRc;
249
250 def destroyVolume(self, oExec, sPool, sVol):
251 """
252 Destroys the given volume.
253 """
254 # Unmount first
255 sMountPoint = self.dMounts[sPool + '/' + sVol];
256 fRc, _ = oExec.execBinary('umount', (sMountPoint,));
257 self.dMounts.pop(sPool + '/' + sVol);
258 oExec.rmDir(sMountPoint);
259 if self.dSimplePools.has_key(sPool):
260 # Wipe partition table
261 sDiskPath = self.dSimplePools.get(sPool);
262 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath));
263 else:
264 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
265 return fRc;
266
267 def destroyPool(self, oExec, sPool):
268 """
269 Destroys the given storage pool.
270 """
271 fRc = True;
272 if self.dSimplePools.has_key(sPool):
273 self.dSimplePools.pop(sPool);
274 else:
275 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
276 return fRc;
277
278class StorageCfg(object):
279 """
280 Storage configuration helper class taking care of the different host OS.
281 """
282
283 def __init__(self, oExec, sTargetOs, oDiskCfg):
284 self.oExec = oExec;
285 self.lstDisks = [ ]; # List of disks present in the system.
286 self.dPools = { }; # Dictionary of storage pools.
287 self.dVols = { }; # Dictionary of volumes.
288 self.iPoolId = 0;
289 self.iVolId = 0;
290
291 fRc = True;
292 oStorOs = None;
293 if sTargetOs == 'solaris':
294 oStorOs = StorageConfigOsSolaris();
295 elif sTargetOs == 'linux':
296 oStorOs = StorageConfigOsLinux(); # pylint: disable=R0204
297 else:
298 fRc = False;
299
300 if fRc:
301 self.oStorOs = oStorOs;
302 if isinstance(oDiskCfg, basestring):
303 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg);
304 else:
305 # Assume a list of of disks and add.
306 for sDisk in oDiskCfg:
307 self.lstDisks.append(StorageDisk(sDisk));
308
309 def __del__(self):
310 self.cleanup();
311
312 def cleanup(self):
313 """
314 Cleans up any created storage configs.
315 """
316
317 # Destroy all volumes first.
318 for sMountPoint in self.dVols.keys():
319 self.destroyVolume(sMountPoint);
320
321 # Destroy all pools.
322 for sPool in self.dPools.keys():
323 self.destroyStoragePool(sPool);
324
325 self.dVols.clear();
326 self.dPools.clear();
327 self.iPoolId = 0;
328 self.iVolId = 0;
329
330 def getRawDisk(self):
331 """
332 Returns a raw disk device from the list of free devices for use.
333 """
334 for oDisk in self.lstDisks:
335 if oDisk.isUsed() is False:
336 oDisk.setUsed(True);
337 return oDisk.getPath();
338
339 return None;
340
341 def getUnusedDiskCount(self):
342 """
343 Returns the number of unused disks.
344 """
345
346 cDisksUnused = 0;
347 for oDisk in self.lstDisks:
348 if not oDisk.isUsed():
349 cDisksUnused += 1;
350
351 return cDisksUnused;
352
353 def createStoragePool(self, cDisks = 0, sRaidLvl = None):
354 """
355 Create a new storage pool
356 """
357 lstDisks = [ ];
358 fRc = True;
359 sPool = None;
360
361 if cDisks == 0:
362 cDisks = self.getUnusedDiskCount();
363
364 for oDisk in self.lstDisks:
365 if not oDisk.isUsed():
366 oDisk.setUsed(True);
367 lstDisks.append(oDisk);
368 if len(lstDisks) == cDisks:
369 break;
370
371 # Enough drives to satisfy the request?
372 if len(lstDisks) == cDisks:
373 # Create a list of all device paths
374 lstDiskPaths = [ ];
375 for oDisk in lstDisks:
376 lstDiskPaths.append(oDisk.getPath());
377
378 # Find a name for the pool
379 sPool = 'pool' + str(self.iPoolId);
380 self.iPoolId += 1;
381
382 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
383 if fRc:
384 self.dPools[sPool] = lstDisks;
385 else:
386 self.iPoolId -= 1;
387 else:
388 fRc = False;
389
390 # Cleanup in case of error.
391 if not fRc:
392 for oDisk in lstDisks:
393 oDisk.setUsed(False);
394
395 return fRc, sPool;
396
397 def destroyStoragePool(self, sPool):
398 """
399 Destroys the storage pool with the given ID.
400 """
401
402 lstDisks = self.dPools.get(sPool);
403 if lstDisks is not None:
404 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
405 if fRc:
406 # Mark disks as unused
407 self.dPools.pop(sPool);
408 for oDisk in lstDisks:
409 oDisk.setUsed(False);
410 else:
411 fRc = False;
412
413 return fRc;
414
415 def createVolume(self, sPool, cbVol = None):
416 """
417 Creates a new volume from the given pool returning the mountpoint.
418 """
419
420 fRc = True;
421 sMountPoint = None;
422 if self.dPools.has_key(sPool):
423 sVol = 'vol' + str(self.iVolId);
424 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
425 self.iVolId += 1;
426 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
427 if fRc:
428 self.dVols[sMountPoint] = (sVol, sPool);
429 else:
430 self.iVolId -= 1;
431 else:
432 fRc = False;
433
434 return fRc, sMountPoint;
435
436 def destroyVolume(self, sMountPoint):
437 """
438 Destroy the volume at the given mount point.
439 """
440
441 sVol, sPool = self.dVols.get(sMountPoint);
442 fRc = True;
443 if sVol is not None:
444 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
445 if fRc:
446 self.dVols.pop(sMountPoint);
447 else:
448 fRc = False;
449
450 return fRc;
451
452 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0700):
453 """
454 Creates a new directory on the volume pointed to by the given mount point.
455 """
456 return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
457
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette