VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py

Last change on this file was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.2 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 106061 2024-09-16 14:03:52Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016-2024 Oracle and/or its affiliates.
11
12This file is part of VirtualBox base platform packages, as
13available from https://www.virtualbox.org.
14
15This program is free software; you can redistribute it and/or
16modify it under the terms of the GNU General Public License
17as published by the Free Software Foundation, in version 3 of the
18License.
19
20This program is distributed in the hope that it will be useful, but
21WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23General Public License for more details.
24
25You should have received a copy of the GNU General Public License
26along with this program; if not, see <https://www.gnu.org/licenses>.
27
28The contents of this file may alternatively be used under the terms
29of the Common Development and Distribution License Version 1.0
30(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
31in the VirtualBox distribution, in which case the provisions of the
32CDDL are applicable instead of those of the GPL.
33
34You may elect to license modified versions of this file under the
35terms and conditions of either the GPL or the CDDL or both.
36
37SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
38"""
39__version__ = "$Revision: 106061 $"
40
41# Standard Python imports.
42import os;
43import re;
44from testdriver import reporter;
45
46
47class StorageDisk(object):
48 """
49 Class representing a disk for testing.
50 """
51
52 def __init__(self, sPath, fRamDisk = False):
53 self.sPath = sPath;
54 self.fUsed = False;
55 self.fRamDisk = fRamDisk;
56
57 def getPath(self):
58 """
59 Return the disk path.
60 """
61 return self.sPath;
62
63 def isUsed(self):
64 """
65 Returns whether the disk is currently in use.
66 """
67 return self.fUsed;
68
69 def isRamDisk(self):
70 """
71 Returns whether the disk objecthas a RAM backing.
72 """
73 return self.fRamDisk;
74
75 def setUsed(self, fUsed):
76 """
77 Sets the used flag for the disk.
78 """
79 if fUsed:
80 if self.fUsed:
81 return False;
82
83 self.fUsed = True;
84 else:
85 self.fUsed = fUsed;
86
87 return True;
88
89class StorageConfigOs(object):
90 """
91 Base class for a single hosts OS storage configuration.
92 """
93
94 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
95 """
96 Adds new disks to the config matching the given regular expression.
97 """
98
99 lstDisks = [];
100 oRegExp = re.compile(sRegExp);
101 asFiles = os.listdir(sPath);
102 for sFile in asFiles:
103 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
104 lstDisks.append(StorageDisk(sPath + '/' + sFile));
105
106 return lstDisks;
107
108class StorageConfigOsSolaris(StorageConfigOs):
109 """
110 Class implementing the Solaris specifics for a storage configuration.
111 """
112
113 def __init__(self):
114 StorageConfigOs.__init__(self);
115 self.idxRamDisk = 0;
116
117 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
118 """
119 Returns a list of pools starting with the given ID or None on failure.
120 """
121 lstPools = None;
122 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
123 if fRc:
124 lstPools = [];
125 asPools = re.sub("b'|'", "", sOutput).rstrip("\\n").split("\\n") # as sOutput could look like "b'blabla'"
126 reporter.log('asPools: %s' % asPools) # plus delete excessive end-of-line
127 for sPool in asPools:
128 oMatchResult = re.match("%s[0-9]?[0-9]?" % sPoolIdStart, sPool) # either re.Match obj or None
129 reporter.log('sPoolIdStart: %s, sPool: %s, oMatchResult: %s' % (sPoolIdStart, sPool, oMatchResult))
130 if oMatchResult:
131 lstPools.append(oMatchResult.group(0));
132 return lstPools;
133
134 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
135 """
136 Returns a list of active volumes for the given pool starting with the given
137 identifier or None on failure.
138 """
139 lstVolumes = None;
140 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
141 if fRc:
142 lstVolumes = [];
143 asVolumes = re.sub("b'|'", "", sOutput).rstrip("\\n").split("\\n") # as sOutput could look like "b'blabla'"
144 reporter.log('asVolumes: %s' % asVolumes) # plus delete excessive end-of-line
145 for sVolume in asVolumes:
146 oMatchResult = re.match("%s/%s" % (sPool, sVolumeIdStart), sVolume) # either re.Match obj or None
147 reporter.log('sPool: %s, sVolumeIdStart: %s, sVolume: %s, OMatchResult: %s' % (sPool, sVolumeIdStart,
148 sVolume, oMatchResult))
149 if oMatchResult:
150 lstVolumes.append(oMatchResult.group(0));
151 return lstVolumes;
152
153 def getDisksMatchingRegExp(self, sRegExp):
154 """
155 Returns a list of disks matching the regular expression.
156 """
157 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
158
159 def getMntBase(self):
160 """
161 Returns the mountpoint base for the host.
162 """
163 return '/pools';
164
165 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
166 """
167 Creates a new storage pool with the given disks and the given RAID level.
168 """
169 sZPoolRaid = None;
170 if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
171 sZPoolRaid = 'raidz';
172
173 fRc = True;
174 if sZPoolRaid is not None:
175 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
176 else:
177 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
178
179 return fRc;
180
181 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
182 """
183 Creates and mounts a filesystem at the given mountpoint using the
184 given pool and volume IDs.
185 """
186 fRc = True;
187 if cbVol is not None:
188 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
189 else:
190 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
191
192 # @todo Add proper parameters to set proper owner:group ownership, the testcase broke in r133060 for Solaris
193 # because ceating directories is now done using the python mkdir API instead of calling 'sudo mkdir...'.
194 # No one noticed though because testboxstor1 went out of action before...
195 # Will get fixed as soon as I'm back home.
196 if fRc:
197 fRc = oExec.execBinaryNoStdOut('chmod', ('777', sMountPoint));
198
199 return fRc;
200
201 def destroyVolume(self, oExec, sPool, sVol):
202 """
203 Destroys the given volume.
204 """
205 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
206 return fRc;
207
208 def destroyPool(self, oExec, sPool):
209 """
210 Destroys the given storage pool.
211 """
212 fRc = oExec.execBinary('zpool', ('destroy', sPool));
213 return fRc;
214
215 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
216 """
217 Cleans up any pools and volumes starting with the name in the given
218 parameters.
219 """
220 reporter.log('cleanupPoolsAndVolumes starts');
221 fRc = True;
222 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
223 reporter.log('lstPools: %s' % lstPools);
224 if lstPools is not None:
225 for sPool in lstPools:
226 fRc2 = fRc3 = False # flags for volumes and pools destruction results
227 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
228 reporter.log('lstVolumes: %s' % lstVolumes)
229 if lstVolumes is not None:
230 # Destroy all the volumes first
231 reporter.log('destroying volumes starts');
232 for sVolume in lstVolumes:
233 fRc2 = oExec.execBinary('zfs', ('destroy', sVolume));
234 reporter.log('destroying volumes ends');
235
236 # Destroy the pool
237 reporter.log('destroying pools starts');
238 fRc3 = self.destroyPool(oExec, sPool);
239 reporter.log('destroying pools ends');
240 if not (fRc2 or fRc3):
241 fRc = False;
242 else:
243 fRc = False;
244 reporter.log('cleanupPoolsAndVolumes is finished');
245 return fRc;
246
247 def createRamDisk(self, oExec, cbRamDisk):
248 """
249 Creates a RAM backed disk with the given size.
250 """
251 oDisk = None;
252 sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
253 fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
254 if fRc:
255 self.idxRamDisk += 1;
256 oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
257
258 return oDisk;
259
260 def destroyRamDisk(self, oExec, oDisk):
261 """
262 Destroys the given ramdisk object.
263 """
264 sRamDiskName = os.path.basename(oDisk.getPath());
265 return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
266
267class StorageConfigOsLinux(StorageConfigOs):
268 """
269 Class implementing the Linux specifics for a storage configuration.
270 """
271
272 def __init__(self):
273 StorageConfigOs.__init__(self);
274 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
275 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
276
277 def _getDmRaidLevelFromLvl(self, sRaidLvl):
278 """
279 Converts our raid level indicators to something mdadm can understand.
280 """
281 if sRaidLvl is None or sRaidLvl == 'raid0':
282 return 'stripe';
283 if sRaidLvl == 'raid5':
284 return '5';
285 if sRaidLvl == 'raid1':
286 return 'mirror';
287 return 'stripe';
288
289 def getDisksMatchingRegExp(self, sRegExp):
290 """
291 Returns a list of disks matching the regular expression.
292 """
293 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
294
295 def getMntBase(self):
296 """
297 Returns the mountpoint base for the host.
298 """
299 return '/mnt';
300
301 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
302 """
303 Creates a new storage pool with the given disks and the given RAID level.
304 """
305 fRc = True;
306 if len(asDisks) == 1 and sRaidLvl is None:
307 # Doesn't require LVM, put into the simple pools dictionary so we can
308 # use it when creating a volume later.
309 self.dSimplePools[sPool] = asDisks[0];
310 else:
311 # If a RAID is required use dm-raid first to create one.
312 asLvmPvDisks = asDisks;
313 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
314 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
315 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
316 if fRc:
317 # /dev/md0 is the only block device to use for our volume group.
318 asLvmPvDisks = [ '/dev/md0' ];
319
320 # Create a physical volume on every disk first.
321 for sLvmPvDisk in asLvmPvDisks:
322 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
323 if not fRc:
324 break;
325
326 if fRc:
327 # Create volume group with all physical volumes included
328 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
329 return fRc;
330
331 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
332 """
333 Creates and mounts a filesystem at the given mountpoint using the
334 given pool and volume IDs.
335 """
336 fRc = True;
337 sBlkDev = None;
338 if sPool in self.dSimplePools:
339 sDiskPath = self.dSimplePools.get(sPool);
340 if sDiskPath.find('zram') != -1:
341 sBlkDev = sDiskPath;
342 else:
343 # Create a partition with the requested size
344 sFdiskScript = ';\n'; # Single partition filling everything
345 if cbVol is not None:
346 sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
347 fRc, _, _ = oExec.execBinary('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
348 sFdiskScript);
349 if fRc:
350 if sDiskPath.find('nvme') != -1:
351 sBlkDev = sDiskPath + 'p1';
352 else:
353 sBlkDev = sDiskPath + '1';
354 else:
355 if cbVol is None:
356 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
357 else:
358 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
359 if fRc:
360 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
361
362 if fRc is True and sBlkDev is not None:
363 # Create a filesystem and mount it
364 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
365 fRc = fRc and oExec.mkDir(sMountPoint);
366 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
367 if fRc:
368 self.dMounts[sPool + '/' + sVol] = sMountPoint;
369 return fRc;
370
371 def destroyVolume(self, oExec, sPool, sVol):
372 """
373 Destroys the given volume.
374 """
375 # Unmount first
376 sMountPoint = self.dMounts[sPool + '/' + sVol];
377 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
378 self.dMounts.pop(sPool + '/' + sVol);
379 oExec.rmDir(sMountPoint);
380 if sPool in self.dSimplePools:
381 # Wipe partition table
382 sDiskPath = self.dSimplePools.get(sPool);
383 if sDiskPath.find('zram') == -1:
384 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
385 sDiskPath));
386 else:
387 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
388 return fRc;
389
390 def destroyPool(self, oExec, sPool):
391 """
392 Destroys the given storage pool.
393 """
394 fRc = True;
395 if sPool in self.dSimplePools:
396 self.dSimplePools.pop(sPool);
397 else:
398 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
399 return fRc;
400
401 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
402 """
403 Cleans up any pools and volumes starting with the name in the given
404 parameters.
405 """
406 # @todo: Needs implementation, for LVM based configs a similar approach can be used
407 # as for Solaris.
408 _ = oExec;
409 _ = sPoolIdStart;
410 _ = sVolIdStart;
411 return True;
412
413 def createRamDisk(self, oExec, cbRamDisk):
414 """
415 Creates a RAM backed disk with the given size.
416 """
417 # Make sure the ZRAM module is loaded.
418 oDisk = None;
419 fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
420 if fRc:
421 fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
422 if fRc:
423 oDisk = StorageDisk(sOut.rstrip(), True);
424
425 return oDisk;
426
427 def destroyRamDisk(self, oExec, oDisk):
428 """
429 Destroys the given ramdisk object.
430 """
431 return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
432
433## @name Host disk config types.
434## @{
435g_ksDiskCfgStatic = 'StaticDir';
436g_ksDiskCfgRegExp = 'RegExp';
437g_ksDiskCfgList = 'DiskList';
438## @}
439
440class DiskCfg(object):
441 """
442 Host disk configuration.
443 """
444
445 def __init__(self, sTargetOs, sCfgType, oDisks):
446 self.sTargetOs = sTargetOs;
447 self.sCfgType = sCfgType;
448 self.oDisks = oDisks;
449
450 def getTargetOs(self):
451 return self.sTargetOs;
452
453 def getCfgType(self):
454 return self.sCfgType;
455
456 def isCfgStaticDir(self):
457 return self.sCfgType == g_ksDiskCfgStatic;
458
459 def isCfgRegExp(self):
460 return self.sCfgType == g_ksDiskCfgRegExp;
461
462 def isCfgList(self):
463 return self.sCfgType == g_ksDiskCfgList;
464
465 def getDisks(self):
466 return self.oDisks;
467
468class StorageCfg(object):
469 """
470 Storage configuration helper class taking care of the different host OS.
471 """
472
473 def __init__(self, oExec, oDiskCfg):
474 self.oExec = oExec;
475 self.lstDisks = [ ]; # List of disks present in the system.
476 self.dPools = { }; # Dictionary of storage pools.
477 self.dVols = { }; # Dictionary of volumes.
478 self.iPoolId = 0;
479 self.iVolId = 0;
480 self.oDiskCfg = oDiskCfg;
481
482 fRc = True;
483 oStorOs = None;
484 if oDiskCfg.getTargetOs() == 'solaris':
485 oStorOs = StorageConfigOsSolaris();
486 elif oDiskCfg.getTargetOs() == 'linux':
487 oStorOs = StorageConfigOsLinux(); # pylint: disable=redefined-variable-type
488 elif not oDiskCfg.isCfgStaticDir():
489 # For unknown hosts only allow a static testing directory we don't care about setting up
490 fRc = False;
491
492 if fRc:
493 self.oStorOs = oStorOs;
494 if oDiskCfg.isCfgRegExp():
495 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg.getDisks());
496 elif oDiskCfg.isCfgList():
497 # Assume a list of disks and add.
498 for sDisk in oDiskCfg.getDisks():
499 self.lstDisks.append(StorageDisk(sDisk));
500 elif oDiskCfg.isCfgStaticDir():
501 if not os.path.exists(oDiskCfg.getDisks()):
502 self.oExec.mkDir(oDiskCfg.getDisks(), 0o700);
503
504 def __del__(self):
505 self.cleanup();
506 self.oDiskCfg = None;
507
508 def cleanup(self):
509 """
510 Cleans up any created storage configs.
511 """
512
513 if not self.oDiskCfg.isCfgStaticDir():
514 # Destroy all volumes first.
515 for sMountPoint in list(self.dVols.keys()): # pylint: disable=consider-iterating-dictionary
516 self.destroyVolume(sMountPoint);
517
518 # Destroy all pools.
519 for sPool in list(self.dPools.keys()): # pylint: disable=consider-iterating-dictionary
520 self.destroyStoragePool(sPool);
521
522 self.dVols.clear();
523 self.dPools.clear();
524 self.iPoolId = 0;
525 self.iVolId = 0;
526
527 def getRawDisk(self):
528 """
529 Returns a raw disk device from the list of free devices for use.
530 """
531
532 for oDisk in self.lstDisks:
533 if oDisk.isUsed() is False:
534 oDisk.setUsed(True);
535 return oDisk.getPath();
536
537 return None;
538
539 def getUnusedDiskCount(self):
540 """
541 Returns the number of unused disks.
542 """
543
544 cDisksUnused = 0;
545 for oDisk in self.lstDisks:
546 if not oDisk.isUsed():
547 cDisksUnused += 1;
548
549 return cDisksUnused;
550
551 def createStoragePool(self, cDisks = 0, sRaidLvl = None,
552 cbPool = None, fRamDisk = False):
553 """
554 Create a new storage pool
555 """
556 lstDisks = [ ];
557 fRc = True;
558 sPool = None;
559
560 if not self.oDiskCfg.isCfgStaticDir():
561 if fRamDisk:
562 oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
563 if oDisk is not None:
564 lstDisks.append(oDisk);
565 cDisks = 1;
566 else:
567 if cDisks == 0:
568 cDisks = self.getUnusedDiskCount();
569
570 for oDisk in self.lstDisks:
571 if not oDisk.isUsed():
572 oDisk.setUsed(True);
573 lstDisks.append(oDisk);
574 if len(lstDisks) == cDisks:
575 break;
576
577 # Enough drives to satisfy the request?
578 if len(lstDisks) == cDisks:
579 # Create a list of all device paths
580 lstDiskPaths = [ ];
581 for oDisk in lstDisks:
582 lstDiskPaths.append(oDisk.getPath());
583
584 # Find a name for the pool
585 sPool = 'pool' + str(self.iPoolId);
586 self.iPoolId += 1;
587
588 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
589 if fRc:
590 self.dPools[sPool] = lstDisks;
591 else:
592 self.iPoolId -= 1;
593 else:
594 fRc = False;
595
596 # Cleanup in case of error.
597 if not fRc:
598 for oDisk in lstDisks:
599 oDisk.setUsed(False);
600 if oDisk.isRamDisk():
601 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
602 else:
603 sPool = 'StaticDummy';
604
605 return fRc, sPool;
606
607 def destroyStoragePool(self, sPool):
608 """
609 Destroys the storage pool with the given ID.
610 """
611
612 fRc = True;
613
614 if not self.oDiskCfg.isCfgStaticDir():
615 lstDisks = self.dPools.get(sPool);
616 if lstDisks is not None:
617 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
618 if fRc:
619 # Mark disks as unused
620 self.dPools.pop(sPool);
621 for oDisk in lstDisks:
622 oDisk.setUsed(False);
623 if oDisk.isRamDisk():
624 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
625 else:
626 fRc = False;
627
628 return fRc;
629
630 def createVolume(self, sPool, cbVol = None):
631 """
632 Creates a new volume from the given pool returning the mountpoint.
633 """
634
635 fRc = True;
636 sMountPoint = None;
637 if not self.oDiskCfg.isCfgStaticDir():
638 if sPool in self.dPools:
639 sVol = 'vol' + str(self.iVolId);
640 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
641 self.iVolId += 1;
642 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
643 if fRc:
644 self.dVols[sMountPoint] = (sVol, sPool);
645 else:
646 self.iVolId -= 1;
647 else:
648 fRc = False;
649 else:
650 sMountPoint = self.oDiskCfg.getDisks();
651
652 return fRc, sMountPoint;
653
654 def destroyVolume(self, sMountPoint):
655 """
656 Destroy the volume at the given mount point.
657 """
658
659 fRc = True;
660 if not self.oDiskCfg.isCfgStaticDir():
661 sVol, sPool = self.dVols.get(sMountPoint);
662 if sVol is not None:
663 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
664 if fRc:
665 self.dVols.pop(sMountPoint);
666 else:
667 fRc = False;
668
669 return fRc;
670
671 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
672 """
673 Creates a new directory on the volume pointed to by the given mount point.
674
675 Returns success status.
676 """
677 return self.oExec.mkDir(os.path.join(sMountPoint, sDir), fMode);
678
679 def cleanupLeftovers(self):
680 """
681 Tries to clean up any leftover pools and volumes from a failed previous run.
682 """
683 reporter.log('cleanupLeftovers starts');
684 if not self.oDiskCfg.isCfgStaticDir():
685 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
686
687 fRc = True;
688 asDisks = self.oDiskCfg.getDisks();
689 reporter.log("oDiskCfg.getDisks: %s" % asDisks);
690 if os.path.exists(asDisks):
691 reporter.log('os.listdir(asDisks): %s' % asDisks)
692 for sEntry in os.listdir(asDisks):
693 sPath = os.path.join(self.oDiskCfg.getDisks(), sEntry);
694 reporter.log('path to sEntry: %s' % sPath)
695 fRc = fRc and self.oExec.rmTree(sPath);
696 reporter.log('cleanupLeftovers ends');
697 return fRc;
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette