VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 62190

Last change on this file since 62190 was 62190, checked in by vboxsync, 8 years ago

ValidationKit/storage: Fixes in general and add support for NVMe controllers

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.4 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 62190 2016-07-12 12:31:05Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016 Oracle Corporation
11
12This file is part of VirtualBox Open Source Edition (OSE), as
13available from http://www.virtualbox.org. This file is free software;
14you can redistribute it and/or modify it under the terms of the GNU
15General Public License (GPL) as published by the Free Software
16Foundation, in version 2 as it comes in the "COPYING" file of the
17VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
20The contents of this file may alternatively be used under the terms
21of the Common Development and Distribution License Version 1.0
22(CDDL) only, as it comes in the "COPYING.CDDL" file of the
23VirtualBox OSE distribution, in which case the provisions of the
24CDDL are applicable instead of those of the GPL.
25
26You may elect to license modified versions of this file under the
27terms and conditions of either the GPL or the CDDL or both.
28"""
29__version__ = "$Revision: 62190 $"
30
31# Standard Python imports.
32import os;
33import re;
34
35class StorageDisk(object):
36 """
37 Class representing a disk for testing.
38 """
39
40 def __init__(self, sPath):
41 self.sPath = sPath;
42 self.fUsed = False;
43
44 def getPath(self):
45 """
46 Return the disk path.
47 """
48 return self.sPath;
49
50 def isUsed(self):
51 """
52 Returns whether the disk is currently in use.
53 """
54 return self.fUsed;
55
56 def setUsed(self, fUsed):
57 """
58 Sets the used flag for the disk.
59 """
60 if fUsed:
61 if self.fUsed:
62 return False;
63
64 self.fUsed = True;
65 else:
66 self.fUsed = fUsed;
67
68 return True;
69
70class StorageConfigOs(object):
71 """
72 Base class for a single hosts OS storage configuration.
73 """
74
75 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
76 """
77 Adds new disks to the config matching the given regular expression.
78 """
79
80 lstDisks = [];
81 oRegExp = re.compile(sRegExp);
82 asFiles = os.listdir(sPath);
83 for sFile in asFiles:
84 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
85 lstDisks.append(StorageDisk(sPath + '/' + sFile));
86
87 return lstDisks;
88
89class StorageConfigOsSolaris(StorageConfigOs):
90 """
91 Class implementing the Solaris specifics for a storage configuration.
92 """
93
94 def __init__(self):
95 StorageConfigOs.__init__(self);
96
97 def getDisksMatchingRegExp(self, sRegExp):
98 """
99 Returns a list of disks matching the regular expression.
100 """
101 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
102
103 def getMntBase(self):
104 """
105 Returns the mountpoint base for the host.
106 """
107 return '/pools';
108
109 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
110 """
111 Creates a new storage pool with the given disks and the given RAID level.
112 """
113 sZPoolRaid = None
114 if sRaidLvl == 'raid5' or sRaidLvl is None:
115 sZPoolRaid = 'raidz';
116
117 fRc = True;
118 if sZPoolRaid is not None:
119 fRc = oExec.execBinary('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
120 else:
121 fRc = False;
122
123 return fRc;
124
125 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
126 """
127 Creates and mounts a filesystem at the given mountpoint using the
128 given pool and volume IDs.
129 """
130 fRc = True;
131 if cbVol is not None:
132 fRc, _ = oExec.execBinary('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
133 else:
134 fRc, _ = oExec.execBinary('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
135
136 return fRc;
137
138 def destroyVolume(self, oExec, sPool, sVol):
139 """
140 Destroys the given volume.
141 """
142 fRc, _ = oExec.execBinary('zfs', ('destroy', sPool + '/' + sVol));
143 return fRc;
144
145 def destroyPool(self, oExec, sPool):
146 """
147 Destroys the given storage pool.
148 """
149 fRc, _ = oExec.execBinary('zpool', ('destroy', sPool));
150 return fRc;
151
152class StorageConfigOsLinux(StorageConfigOs):
153 """
154 Class implementing the Linux specifics for a storage configuration.
155 """
156
157 def __init__(self):
158 StorageConfigOs.__init__(self);
159 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
160 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
161
162 def _getDmRaidLevelFromLvl(self, sRaidLvl):
163 """
164 Converts our raid level indicators to something mdadm can understand.
165 """
166 if sRaidLvl == 'raid5':
167 return '5';
168 elif sRaidLvl == 'raid1':
169 return 'mirror';
170 elif sRaidLvl == 'raid0' or sRaidLvl is None:
171 return 'stripe';
172
173 return 'stripe';
174
175 def getDisksMatchingRegExp(self, sRegExp):
176 """
177 Returns a list of disks matching the regular expression.
178 """
179 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
180
181 def getMntBase(self):
182 """
183 Returns the mountpoint base for the host.
184 """
185 return '/mnt';
186
187 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
188 """
189 Creates a new storage pool with the given disks and the given RAID level.
190 """
191 fRc = True;
192 if len(asDisks) == 1 and sRaidLvl is None:
193 # Doesn't require LVM, put into the simple pools dictionary so we can
194 # use it when creating a volume later.
195 self.dSimplePools[sPool] = asDisks[0];
196 else:
197 # If a RAID is required use dm-raid first to create one.
198 asLvmPvDisks = asDisks;
199 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
200 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
201 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
202 if fRc:
203 # /dev/md0 is the only block device to use for our volume group.
204 asLvmPvDisks = [ '/dev/md0' ];
205
206 # Create a physical volume on every disk first.
207 for sLvmPvDisk in asLvmPvDisks:
208 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
209 if not fRc:
210 break;
211
212 if fRc:
213 # Create volume group with all physical volumes included
214 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
215 return fRc;
216
217 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
218 """
219 Creates and mounts a filesystem at the given mountpoint using the
220 given pool and volume IDs.
221 """
222 fRc = True;
223 sBlkDev = None;
224 if self.dSimplePools.has_key(sPool):
225 sDiskPath = self.dSimplePools.get(sPool);
226 # Create a partition with the requested size
227 sFdiskScript = ';\n'; # Single partition filling everything
228 if cbVol is not None:
229 sFdiskScript = ',' + str(cbVol / 512) + '\n'; # Get number of sectors
230 fRc, _ = oExec.execBinary('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), sFdiskScript);
231 if fRc:
232 if sDiskPath.find('nvme') is not -1:
233 sBlkDev = sDiskPath + 'p1';
234 else:
235 sBlkDev = sDiskPath + '1';
236 else:
237 if cbVol is None:
238 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
239 else:
240 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
241 if fRc:
242 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
243
244 if fRc is True and sBlkDev is not None:
245 # Create a filesystem and mount it
246 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
247 fRc = fRc and oExec.mkDir(sMountPoint);
248 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
249 if fRc:
250 self.dMounts[sPool + '/' + sVol] = sMountPoint;
251 return fRc;
252
253 def destroyVolume(self, oExec, sPool, sVol):
254 """
255 Destroys the given volume.
256 """
257 # Unmount first
258 sMountPoint = self.dMounts[sPool + '/' + sVol];
259 fRc, _ = oExec.execBinary('umount', (sMountPoint,));
260 self.dMounts.pop(sPool + '/' + sVol);
261 oExec.rmDir(sMountPoint);
262 if self.dSimplePools.has_key(sPool):
263 # Wipe partition table
264 sDiskPath = self.dSimplePools.get(sPool);
265 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath));
266 else:
267 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
268 return fRc;
269
270 def destroyPool(self, oExec, sPool):
271 """
272 Destroys the given storage pool.
273 """
274 fRc = True;
275 if self.dSimplePools.has_key(sPool):
276 self.dSimplePools.pop(sPool);
277 else:
278 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
279 return fRc;
280
281class StorageCfg(object):
282 """
283 Storage configuration helper class taking care of the different host OS.
284 """
285
286 def __init__(self, oExec, sTargetOs, oDiskCfg):
287 self.oExec = oExec;
288 self.lstDisks = [ ]; # List of disks present in the system.
289 self.dPools = { }; # Dictionary of storage pools.
290 self.dVols = { }; # Dictionary of volumes.
291 self.iPoolId = 0;
292 self.iVolId = 0;
293
294 fRc = True;
295 oStorOs = None;
296 if sTargetOs == 'solaris':
297 oStorOs = StorageConfigOsSolaris();
298 elif sTargetOs == 'linux':
299 oStorOs = StorageConfigOsLinux(); # pylint: disable=R0204
300 else:
301 fRc = False;
302
303 if fRc:
304 self.oStorOs = oStorOs;
305 if isinstance(oDiskCfg, basestring):
306 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg);
307 else:
308 # Assume a list of of disks and add.
309 for sDisk in oDiskCfg:
310 self.lstDisks.append(StorageDisk(sDisk));
311
312 def __del__(self):
313 self.cleanup();
314
315 def cleanup(self):
316 """
317 Cleans up any created storage configs.
318 """
319
320 # Destroy all volumes first.
321 for sMountPoint in self.dVols.keys():
322 self.destroyVolume(sMountPoint);
323
324 # Destroy all pools.
325 for sPool in self.dPools.keys():
326 self.destroyStoragePool(sPool);
327
328 self.dVols.clear();
329 self.dPools.clear();
330 self.iPoolId = 0;
331 self.iVolId = 0;
332
333 def getRawDisk(self):
334 """
335 Returns a raw disk device from the list of free devices for use.
336 """
337 for oDisk in self.lstDisks:
338 if oDisk.isUsed() is False:
339 oDisk.setUsed(True);
340 return oDisk.getPath();
341
342 return None;
343
344 def getUnusedDiskCount(self):
345 """
346 Returns the number of unused disks.
347 """
348
349 cDisksUnused = 0;
350 for oDisk in self.lstDisks:
351 if not oDisk.isUsed():
352 cDisksUnused += 1;
353
354 return cDisksUnused;
355
356 def createStoragePool(self, cDisks = 0, sRaidLvl = None):
357 """
358 Create a new storage pool
359 """
360 lstDisks = [ ];
361 fRc = True;
362 sPool = None;
363
364 if cDisks == 0:
365 cDisks = self.getUnusedDiskCount();
366
367 for oDisk in self.lstDisks:
368 if not oDisk.isUsed():
369 oDisk.setUsed(True);
370 lstDisks.append(oDisk);
371 if len(lstDisks) == cDisks:
372 break;
373
374 # Enough drives to satisfy the request?
375 if len(lstDisks) == cDisks:
376 # Create a list of all device paths
377 lstDiskPaths = [ ];
378 for oDisk in lstDisks:
379 lstDiskPaths.append(oDisk.getPath());
380
381 # Find a name for the pool
382 sPool = 'pool' + str(self.iPoolId);
383 self.iPoolId += 1;
384
385 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
386 if fRc:
387 self.dPools[sPool] = lstDisks;
388 else:
389 self.iPoolId -= 1;
390 else:
391 fRc = False;
392
393 # Cleanup in case of error.
394 if not fRc:
395 for oDisk in lstDisks:
396 oDisk.setUsed(False);
397
398 return fRc, sPool;
399
400 def destroyStoragePool(self, sPool):
401 """
402 Destroys the storage pool with the given ID.
403 """
404
405 lstDisks = self.dPools.get(sPool);
406 if lstDisks is not None:
407 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
408 if fRc:
409 # Mark disks as unused
410 self.dPools.pop(sPool);
411 for oDisk in lstDisks:
412 oDisk.setUsed(False);
413 else:
414 fRc = False;
415
416 return fRc;
417
418 def createVolume(self, sPool, cbVol = None):
419 """
420 Creates a new volume from the given pool returning the mountpoint.
421 """
422
423 fRc = True;
424 sMountPoint = None;
425 if self.dPools.has_key(sPool):
426 sVol = 'vol' + str(self.iVolId);
427 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
428 self.iVolId += 1;
429 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
430 if fRc:
431 self.dVols[sMountPoint] = (sVol, sPool);
432 else:
433 self.iVolId -= 1;
434 else:
435 fRc = False;
436
437 return fRc, sMountPoint;
438
439 def destroyVolume(self, sMountPoint):
440 """
441 Destroy the volume at the given mount point.
442 """
443
444 sVol, sPool = self.dVols.get(sMountPoint);
445 fRc = True;
446 if sVol is not None:
447 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
448 if fRc:
449 self.dVols.pop(sMountPoint);
450 else:
451 fRc = False;
452
453 return fRc;
454
455 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0700):
456 """
457 Creates a new directory on the volume pointed to by the given mount point.
458 """
459 return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
460
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette