VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 103576

Last change on this file since 103576 was 103576, checked in by vboxsync, 9 months ago

ValKit, storagecfg.py, cleanupLeftovers() function fix, added a bit of cleaning for sOutput received from execBinary function; utils/TestExecServ/win/vboxtxs.reg, added key for disabling UAC

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.3 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 103576 2024-02-26 17:02:26Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016-2023 Oracle and/or its affiliates.
11
12This file is part of VirtualBox base platform packages, as
13available from https://www.virtualbox.org.
14
15This program is free software; you can redistribute it and/or
16modify it under the terms of the GNU General Public License
17as published by the Free Software Foundation, in version 3 of the
18License.
19
20This program is distributed in the hope that it will be useful, but
21WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23General Public License for more details.
24
25You should have received a copy of the GNU General Public License
26along with this program; if not, see <https://www.gnu.org/licenses>.
27
28The contents of this file may alternatively be used under the terms
29of the Common Development and Distribution License Version 1.0
30(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
31in the VirtualBox distribution, in which case the provisions of the
32CDDL are applicable instead of those of the GPL.
33
34You may elect to license modified versions of this file under the
35terms and conditions of either the GPL or the CDDL or both.
36
37SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
38"""
39__version__ = "$Revision: 103576 $"
40
41# Standard Python imports.
42import os;
43import re;
44from testdriver import reporter;
45
46
47class StorageDisk(object):
48 """
49 Class representing a disk for testing.
50 """
51
52 def __init__(self, sPath, fRamDisk = False):
53 self.sPath = sPath;
54 self.fUsed = False;
55 self.fRamDisk = fRamDisk;
56
57 def getPath(self):
58 """
59 Return the disk path.
60 """
61 return self.sPath;
62
63 def isUsed(self):
64 """
65 Returns whether the disk is currently in use.
66 """
67 return self.fUsed;
68
69 def isRamDisk(self):
70 """
71 Returns whether the disk objecthas a RAM backing.
72 """
73 return self.fRamDisk;
74
75 def setUsed(self, fUsed):
76 """
77 Sets the used flag for the disk.
78 """
79 if fUsed:
80 if self.fUsed:
81 return False;
82
83 self.fUsed = True;
84 else:
85 self.fUsed = fUsed;
86
87 return True;
88
89class StorageConfigOs(object):
90 """
91 Base class for a single hosts OS storage configuration.
92 """
93
94 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
95 """
96 Adds new disks to the config matching the given regular expression.
97 """
98
99 lstDisks = [];
100 oRegExp = re.compile(sRegExp);
101 asFiles = os.listdir(sPath);
102 for sFile in asFiles:
103 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
104 lstDisks.append(StorageDisk(sPath + '/' + sFile));
105
106 return lstDisks;
107
108class StorageConfigOsSolaris(StorageConfigOs):
109 """
110 Class implementing the Solaris specifics for a storage configuration.
111 """
112
113 def __init__(self):
114 StorageConfigOs.__init__(self);
115 self.idxRamDisk = 0;
116
117 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
118 """
119 Returns a list of pools starting with the given ID or None on failure.
120 """
121 lstPools = None;
122 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
123 if fRc:
124 lstPools = [];
125 asPools = re.sub("b'|'", "", sOutput).rstrip("\\n").split("\\n") # as sOutput could look like "b'blabla'"
126 reporter.log('asPools: %s' % asPools) # plus delete excessive end-of-line
127 for sPool in asPools:
128 oMatchResult = re.match("%s[0-9]?[0-9]?" % sPoolIdStart, sPool) # either re.Match obj or None
129 reporter.log('sPoolIdStart: %s, sPool: %s, oMatchResult: %s' % (sPoolIdStart, sPool, oMatchResult))
130 if oMatchResult:
131 lstPools.append(oMatchResult.group(0));
132 return lstPools;
133
134 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
135 """
136 Returns a list of active volumes for the given pool starting with the given
137 identifier or None on failure.
138 """
139 lstVolumes = None;
140 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
141 if fRc:
142 lstVolumes = [];
143 asVolumes = re.sub("b'|'", "", sOutput).rstrip("\\n").split("\\n") # as sOutput could look like "b'blabla'"
144 reporter.log('asVolumes: %s' % asVolumes) # plus delete excessive end-of-line
145 for sVolume in asVolumes:
146 oMatchResult = re.match("%s/%s" % (sPool, sVolumeIdStart), sVolume) # either re.Match obj or None
147 reporter.log('sPool: %s, sVolumeIdStart: %s, sVolume: %s, OMatchResult: %s' % (sPool, sVolumeIdStart,
148 sVolume, oMatchResult))
149 if oMatchResult:
150 lstVolumes.append(oMatchResult.group(0));
151 return lstVolumes;
152
153 def getDisksMatchingRegExp(self, sRegExp):
154 """
155 Returns a list of disks matching the regular expression.
156 """
157 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
158
159 def getMntBase(self):
160 """
161 Returns the mountpoint base for the host.
162 """
163 return '/pools';
164
165 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
166 """
167 Creates a new storage pool with the given disks and the given RAID level.
168 """
169 sZPoolRaid = None;
170 if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
171 sZPoolRaid = 'raidz';
172
173 fRc = True;
174 if sZPoolRaid is not None:
175 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
176 else:
177 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
178
179 return fRc;
180
181 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
182 """
183 Creates and mounts a filesystem at the given mountpoint using the
184 given pool and volume IDs.
185 """
186 fRc = True;
187 if cbVol is not None:
188 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
189 else:
190 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
191
192 # @todo Add proper parameters to set proper owner:group ownership, the testcase broke in r133060 for Solaris
193 # because ceating directories is now done using the python mkdir API instead of calling 'sudo mkdir...'.
194 # No one noticed though because testboxstor1 went out of action before...
195 # Will get fixed as soon as I'm back home.
196 if fRc:
197 fRc = oExec.execBinaryNoStdOut('chmod', ('777', sMountPoint));
198
199 return fRc;
200
201 def destroyVolume(self, oExec, sPool, sVol):
202 """
203 Destroys the given volume.
204 """
205 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
206 return fRc;
207
208 def destroyPool(self, oExec, sPool):
209 """
210 Destroys the given storage pool.
211 """
212 fRc = oExec.execBinary('zpool', ('destroy', sPool));
213 return fRc;
214
215 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
216 """
217 Cleans up any pools and volumes starting with the name in the given
218 parameters.
219 """
220 reporter.log('cleanupPoolsAndVolumes starts');
221 fRc = True;
222 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
223 reporter.log('lstPools: %s' % lstPools);
224 if lstPools is not None:
225 for sPool in lstPools:
226 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
227 reporter.log('lstVolumes: %s' % lstVolumes)
228 if lstVolumes is not None:
229 # Destroy all the volumes first
230 reporter.log('destroying volumes starts');
231 for sVolume in lstVolumes:
232 fRc2 = oExec.execBinary('zfs', ('destroy', sVolume));
233 if not fRc2:
234 fRc = fRc2
235 reporter.log('destroying volumes ends');
236
237 # Destroy the pool
238 reporter.log('destroying pools starts');
239 fRc2 = self.destroyPool(oExec, sPool);
240 reporter.log('destroying pools ends');
241 if not fRc2:
242 fRc = fRc2;
243 else:
244 fRc = False;
245 else:
246 fRc = False;
247 reporter.log('cleanupPoolsAndVolumes is finished');
248 return fRc;
249
250 def createRamDisk(self, oExec, cbRamDisk):
251 """
252 Creates a RAM backed disk with the given size.
253 """
254 oDisk = None;
255 sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
256 fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
257 if fRc:
258 self.idxRamDisk += 1;
259 oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
260
261 return oDisk;
262
263 def destroyRamDisk(self, oExec, oDisk):
264 """
265 Destroys the given ramdisk object.
266 """
267 sRamDiskName = os.path.basename(oDisk.getPath());
268 return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
269
270class StorageConfigOsLinux(StorageConfigOs):
271 """
272 Class implementing the Linux specifics for a storage configuration.
273 """
274
275 def __init__(self):
276 StorageConfigOs.__init__(self);
277 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
278 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
279
280 def _getDmRaidLevelFromLvl(self, sRaidLvl):
281 """
282 Converts our raid level indicators to something mdadm can understand.
283 """
284 if sRaidLvl is None or sRaidLvl == 'raid0':
285 return 'stripe';
286 if sRaidLvl == 'raid5':
287 return '5';
288 if sRaidLvl == 'raid1':
289 return 'mirror';
290 return 'stripe';
291
292 def getDisksMatchingRegExp(self, sRegExp):
293 """
294 Returns a list of disks matching the regular expression.
295 """
296 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
297
298 def getMntBase(self):
299 """
300 Returns the mountpoint base for the host.
301 """
302 return '/mnt';
303
304 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
305 """
306 Creates a new storage pool with the given disks and the given RAID level.
307 """
308 fRc = True;
309 if len(asDisks) == 1 and sRaidLvl is None:
310 # Doesn't require LVM, put into the simple pools dictionary so we can
311 # use it when creating a volume later.
312 self.dSimplePools[sPool] = asDisks[0];
313 else:
314 # If a RAID is required use dm-raid first to create one.
315 asLvmPvDisks = asDisks;
316 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
317 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
318 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
319 if fRc:
320 # /dev/md0 is the only block device to use for our volume group.
321 asLvmPvDisks = [ '/dev/md0' ];
322
323 # Create a physical volume on every disk first.
324 for sLvmPvDisk in asLvmPvDisks:
325 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
326 if not fRc:
327 break;
328
329 if fRc:
330 # Create volume group with all physical volumes included
331 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
332 return fRc;
333
334 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
335 """
336 Creates and mounts a filesystem at the given mountpoint using the
337 given pool and volume IDs.
338 """
339 fRc = True;
340 sBlkDev = None;
341 if sPool in self.dSimplePools:
342 sDiskPath = self.dSimplePools.get(sPool);
343 if sDiskPath.find('zram') != -1:
344 sBlkDev = sDiskPath;
345 else:
346 # Create a partition with the requested size
347 sFdiskScript = ';\n'; # Single partition filling everything
348 if cbVol is not None:
349 sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
350 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
351 sFdiskScript);
352 if fRc:
353 if sDiskPath.find('nvme') != -1:
354 sBlkDev = sDiskPath + 'p1';
355 else:
356 sBlkDev = sDiskPath + '1';
357 else:
358 if cbVol is None:
359 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
360 else:
361 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
362 if fRc:
363 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
364
365 if fRc is True and sBlkDev is not None:
366 # Create a filesystem and mount it
367 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
368 fRc = fRc and oExec.mkDir(sMountPoint);
369 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
370 if fRc:
371 self.dMounts[sPool + '/' + sVol] = sMountPoint;
372 return fRc;
373
374 def destroyVolume(self, oExec, sPool, sVol):
375 """
376 Destroys the given volume.
377 """
378 # Unmount first
379 sMountPoint = self.dMounts[sPool + '/' + sVol];
380 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
381 self.dMounts.pop(sPool + '/' + sVol);
382 oExec.rmDir(sMountPoint);
383 if sPool in self.dSimplePools:
384 # Wipe partition table
385 sDiskPath = self.dSimplePools.get(sPool);
386 if sDiskPath.find('zram') == -1:
387 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
388 sDiskPath));
389 else:
390 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
391 return fRc;
392
393 def destroyPool(self, oExec, sPool):
394 """
395 Destroys the given storage pool.
396 """
397 fRc = True;
398 if sPool in self.dSimplePools:
399 self.dSimplePools.pop(sPool);
400 else:
401 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
402 return fRc;
403
404 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
405 """
406 Cleans up any pools and volumes starting with the name in the given
407 parameters.
408 """
409 # @todo: Needs implementation, for LVM based configs a similar approach can be used
410 # as for Solaris.
411 _ = oExec;
412 _ = sPoolIdStart;
413 _ = sVolIdStart;
414 return True;
415
416 def createRamDisk(self, oExec, cbRamDisk):
417 """
418 Creates a RAM backed disk with the given size.
419 """
420 # Make sure the ZRAM module is loaded.
421 oDisk = None;
422 fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
423 if fRc:
424 fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
425 if fRc:
426 oDisk = StorageDisk(sOut.rstrip(), True);
427
428 return oDisk;
429
430 def destroyRamDisk(self, oExec, oDisk):
431 """
432 Destroys the given ramdisk object.
433 """
434 return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
435
436## @name Host disk config types.
437## @{
438g_ksDiskCfgStatic = 'StaticDir';
439g_ksDiskCfgRegExp = 'RegExp';
440g_ksDiskCfgList = 'DiskList';
441## @}
442
443class DiskCfg(object):
444 """
445 Host disk configuration.
446 """
447
448 def __init__(self, sTargetOs, sCfgType, oDisks):
449 self.sTargetOs = sTargetOs;
450 self.sCfgType = sCfgType;
451 self.oDisks = oDisks;
452
453 def getTargetOs(self):
454 return self.sTargetOs;
455
456 def getCfgType(self):
457 return self.sCfgType;
458
459 def isCfgStaticDir(self):
460 return self.sCfgType == g_ksDiskCfgStatic;
461
462 def isCfgRegExp(self):
463 return self.sCfgType == g_ksDiskCfgRegExp;
464
465 def isCfgList(self):
466 return self.sCfgType == g_ksDiskCfgList;
467
468 def getDisks(self):
469 return self.oDisks;
470
471class StorageCfg(object):
472 """
473 Storage configuration helper class taking care of the different host OS.
474 """
475
476 def __init__(self, oExec, oDiskCfg):
477 self.oExec = oExec;
478 self.lstDisks = [ ]; # List of disks present in the system.
479 self.dPools = { }; # Dictionary of storage pools.
480 self.dVols = { }; # Dictionary of volumes.
481 self.iPoolId = 0;
482 self.iVolId = 0;
483 self.oDiskCfg = oDiskCfg;
484
485 fRc = True;
486 oStorOs = None;
487 if oDiskCfg.getTargetOs() == 'solaris':
488 oStorOs = StorageConfigOsSolaris();
489 elif oDiskCfg.getTargetOs() == 'linux':
490 oStorOs = StorageConfigOsLinux(); # pylint: disable=redefined-variable-type
491 elif not oDiskCfg.isCfgStaticDir():
492 # For unknown hosts only allow a static testing directory we don't care about setting up
493 fRc = False;
494
495 if fRc:
496 self.oStorOs = oStorOs;
497 if oDiskCfg.isCfgRegExp():
498 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg.getDisks());
499 elif oDiskCfg.isCfgList():
500 # Assume a list of disks and add.
501 for sDisk in oDiskCfg.getDisks():
502 self.lstDisks.append(StorageDisk(sDisk));
503 elif oDiskCfg.isCfgStaticDir():
504 if not os.path.exists(oDiskCfg.getDisks()):
505 self.oExec.mkDir(oDiskCfg.getDisks(), 0o700);
506
507 def __del__(self):
508 self.cleanup();
509 self.oDiskCfg = None;
510
511 def cleanup(self):
512 """
513 Cleans up any created storage configs.
514 """
515
516 if not self.oDiskCfg.isCfgStaticDir():
517 # Destroy all volumes first.
518 for sMountPoint in list(self.dVols.keys()): # pylint: disable=consider-iterating-dictionary
519 self.destroyVolume(sMountPoint);
520
521 # Destroy all pools.
522 for sPool in list(self.dPools.keys()): # pylint: disable=consider-iterating-dictionary
523 self.destroyStoragePool(sPool);
524
525 self.dVols.clear();
526 self.dPools.clear();
527 self.iPoolId = 0;
528 self.iVolId = 0;
529
530 def getRawDisk(self):
531 """
532 Returns a raw disk device from the list of free devices for use.
533 """
534
535 for oDisk in self.lstDisks:
536 if oDisk.isUsed() is False:
537 oDisk.setUsed(True);
538 return oDisk.getPath();
539
540 return None;
541
542 def getUnusedDiskCount(self):
543 """
544 Returns the number of unused disks.
545 """
546
547 cDisksUnused = 0;
548 for oDisk in self.lstDisks:
549 if not oDisk.isUsed():
550 cDisksUnused += 1;
551
552 return cDisksUnused;
553
554 def createStoragePool(self, cDisks = 0, sRaidLvl = None,
555 cbPool = None, fRamDisk = False):
556 """
557 Create a new storage pool
558 """
559 lstDisks = [ ];
560 fRc = True;
561 sPool = None;
562
563 if not self.oDiskCfg.isCfgStaticDir():
564 if fRamDisk:
565 oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
566 if oDisk is not None:
567 lstDisks.append(oDisk);
568 cDisks = 1;
569 else:
570 if cDisks == 0:
571 cDisks = self.getUnusedDiskCount();
572
573 for oDisk in self.lstDisks:
574 if not oDisk.isUsed():
575 oDisk.setUsed(True);
576 lstDisks.append(oDisk);
577 if len(lstDisks) == cDisks:
578 break;
579
580 # Enough drives to satisfy the request?
581 if len(lstDisks) == cDisks:
582 # Create a list of all device paths
583 lstDiskPaths = [ ];
584 for oDisk in lstDisks:
585 lstDiskPaths.append(oDisk.getPath());
586
587 # Find a name for the pool
588 sPool = 'pool' + str(self.iPoolId);
589 self.iPoolId += 1;
590
591 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
592 if fRc:
593 self.dPools[sPool] = lstDisks;
594 else:
595 self.iPoolId -= 1;
596 else:
597 fRc = False;
598
599 # Cleanup in case of error.
600 if not fRc:
601 for oDisk in lstDisks:
602 oDisk.setUsed(False);
603 if oDisk.isRamDisk():
604 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
605 else:
606 sPool = 'StaticDummy';
607
608 return fRc, sPool;
609
610 def destroyStoragePool(self, sPool):
611 """
612 Destroys the storage pool with the given ID.
613 """
614
615 fRc = True;
616
617 if not self.oDiskCfg.isCfgStaticDir():
618 lstDisks = self.dPools.get(sPool);
619 if lstDisks is not None:
620 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
621 if fRc:
622 # Mark disks as unused
623 self.dPools.pop(sPool);
624 for oDisk in lstDisks:
625 oDisk.setUsed(False);
626 if oDisk.isRamDisk():
627 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
628 else:
629 fRc = False;
630
631 return fRc;
632
633 def createVolume(self, sPool, cbVol = None):
634 """
635 Creates a new volume from the given pool returning the mountpoint.
636 """
637
638 fRc = True;
639 sMountPoint = None;
640 if not self.oDiskCfg.isCfgStaticDir():
641 if sPool in self.dPools:
642 sVol = 'vol' + str(self.iVolId);
643 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
644 self.iVolId += 1;
645 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
646 if fRc:
647 self.dVols[sMountPoint] = (sVol, sPool);
648 else:
649 self.iVolId -= 1;
650 else:
651 fRc = False;
652 else:
653 sMountPoint = self.oDiskCfg.getDisks();
654
655 return fRc, sMountPoint;
656
657 def destroyVolume(self, sMountPoint):
658 """
659 Destroy the volume at the given mount point.
660 """
661
662 fRc = True;
663 if not self.oDiskCfg.isCfgStaticDir():
664 sVol, sPool = self.dVols.get(sMountPoint);
665 if sVol is not None:
666 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
667 if fRc:
668 self.dVols.pop(sMountPoint);
669 else:
670 fRc = False;
671
672 return fRc;
673
674 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
675 """
676 Creates a new directory on the volume pointed to by the given mount point.
677
678 Returns success status.
679 """
680 return self.oExec.mkDir(os.path.join(sMountPoint, sDir), fMode);
681
682 def cleanupLeftovers(self):
683 """
684 Tries to clean up any leftover pools and volumes from a failed previous run.
685 """
686 reporter.log('cleanupLeftovers starts');
687 if not self.oDiskCfg.isCfgStaticDir():
688 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
689
690 fRc = True;
691 asDisks = self.oDiskCfg.getDisks();
692 reporter.log("oDiskCfg.getDisks: %s" % asDisks);
693 if os.path.exists(asDisks):
694 reporter.log('os.listdir(asDisks): %s' % asDisks)
695 for sEntry in os.listdir(asDisks):
696 sPath = os.path.join(self.oDiskCfg.getDisks(), sEntry);
697 reporter.log('path to sEntry: %s' % sPath)
698 fRc = fRc and self.oExec.rmTree(sPath);
699 reporter.log('cleanupLeftovers ends');
700 return fRc;
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette