VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 103545

Last change on this file since 103545 was 103545, checked in by vboxsync, 12 months ago

storagecfg.py, fix _getActivePoolsStartingWith and _getActiveVolumesInPoolStartingWith, added more logging for cleanupLeftovers(), fixed typos

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.4 KB
Line 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 103545 2024-02-23 11:48:53Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016-2023 Oracle and/or its affiliates.
11
12This file is part of VirtualBox base platform packages, as
13available from https://www.virtualbox.org.
14
15This program is free software; you can redistribute it and/or
16modify it under the terms of the GNU General Public License
17as published by the Free Software Foundation, in version 3 of the
18License.
19
20This program is distributed in the hope that it will be useful, but
21WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23General Public License for more details.
24
25You should have received a copy of the GNU General Public License
26along with this program; if not, see <https://www.gnu.org/licenses>.
27
28The contents of this file may alternatively be used under the terms
29of the Common Development and Distribution License Version 1.0
30(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
31in the VirtualBox distribution, in which case the provisions of the
32CDDL are applicable instead of those of the GPL.
33
34You may elect to license modified versions of this file under the
35terms and conditions of either the GPL or the CDDL or both.
36
37SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
38"""
39__version__ = "$Revision: 103545 $"
40
41# Standard Python imports.
42import os;
43import re;
44from testdriver import reporter;
45
46
47class StorageDisk(object):
48 """
49 Class representing a disk for testing.
50 """
51
52 def __init__(self, sPath, fRamDisk = False):
53 self.sPath = sPath;
54 self.fUsed = False;
55 self.fRamDisk = fRamDisk;
56
57 def getPath(self):
58 """
59 Return the disk path.
60 """
61 return self.sPath;
62
63 def isUsed(self):
64 """
65 Returns whether the disk is currently in use.
66 """
67 return self.fUsed;
68
69 def isRamDisk(self):
70 """
71 Returns whether the disk objecthas a RAM backing.
72 """
73 return self.fRamDisk;
74
75 def setUsed(self, fUsed):
76 """
77 Sets the used flag for the disk.
78 """
79 if fUsed:
80 if self.fUsed:
81 return False;
82
83 self.fUsed = True;
84 else:
85 self.fUsed = fUsed;
86
87 return True;
88
89class StorageConfigOs(object):
90 """
91 Base class for a single hosts OS storage configuration.
92 """
93
94 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
95 """
96 Adds new disks to the config matching the given regular expression.
97 """
98
99 lstDisks = [];
100 oRegExp = re.compile(sRegExp);
101 asFiles = os.listdir(sPath);
102 for sFile in asFiles:
103 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
104 lstDisks.append(StorageDisk(sPath + '/' + sFile));
105
106 return lstDisks;
107
108class StorageConfigOsSolaris(StorageConfigOs):
109 """
110 Class implementing the Solaris specifics for a storage configuration.
111 """
112
113 def __init__(self):
114 StorageConfigOs.__init__(self);
115 self.idxRamDisk = 0;
116
117 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
118 """
119 Returns a list of pools starting with the given ID or None on failure.
120 """
121 lstPools = None;
122 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
123 if fRc:
124 lstPools = [];
125 asPools = sOutput.splitlines();
126 for sPool in asPools:
127 oMatchResult = re.match("%s[0-9]?[0-9]?" % sPoolIdStart, sPool) # either re.Match obj or None
128 if oMatchResult:
129 lstPools.append(oMatchResult.group(0));
130 return lstPools;
131
132 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
133 """
134 Returns a list of active volumes for the given pool starting with the given
135 identifier or None on failure.
136 """
137 lstVolumes = None;
138 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
139 if fRc:
140 lstVolumes = [];
141 asVolumes = sOutput.splitlines();
142 for sVolume in asVolumes:
143 oMatchResult = re.match("%s/%s" % (sPool, sVolumeIdStart), sVolume) # either re.Match obj or None
144 if oMatchResult:
145 lstVolumes.append(oMatchResult.group(0));
146 return lstVolumes;
147
148 def getDisksMatchingRegExp(self, sRegExp):
149 """
150 Returns a list of disks matching the regular expression.
151 """
152 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
153
154 def getMntBase(self):
155 """
156 Returns the mountpoint base for the host.
157 """
158 return '/pools';
159
160 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
161 """
162 Creates a new storage pool with the given disks and the given RAID level.
163 """
164 sZPoolRaid = None;
165 if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
166 sZPoolRaid = 'raidz';
167
168 fRc = True;
169 if sZPoolRaid is not None:
170 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
171 else:
172 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
173
174 return fRc;
175
176 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
177 """
178 Creates and mounts a filesystem at the given mountpoint using the
179 given pool and volume IDs.
180 """
181 fRc = True;
182 if cbVol is not None:
183 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
184 else:
185 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
186
187 # @todo Add proper parameters to set proper owner:group ownership, the testcase broke in r133060 for Solaris
188 # because ceating directories is now done using the python mkdir API instead of calling 'sudo mkdir...'.
189 # No one noticed though because testboxstor1 went out of action before...
190 # Will get fixed as soon as I'm back home.
191 if fRc:
192 fRc = oExec.execBinaryNoStdOut('chmod', ('777', sMountPoint));
193
194 return fRc;
195
196 def destroyVolume(self, oExec, sPool, sVol):
197 """
198 Destroys the given volume.
199 """
200 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
201 return fRc;
202
203 def destroyPool(self, oExec, sPool):
204 """
205 Destroys the given storage pool.
206 """
207 fRc = oExec.execBinary('zpool', ('destroy', sPool));
208 return fRc;
209
210 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
211 """
212 Cleans up any pools and volumes starting with the name in the given
213 parameters.
214 """
215 reporter.log('cleanupPoolsAndVolumes starts');
216 fRc = True;
217 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
218 reporter.log('lstPools: %s' % lstPools);
219 if lstPools is not None:
220 for sPool in lstPools:
221 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
222 reporter.log('lstVolumes: %s' % lstVolumes)
223 if lstVolumes is not None:
224 # Destroy all the volumes first
225 reporter.log('destroying volumes starts');
226 for sVolume in lstVolumes:
227 fRc2 = oExec.execBinary('zfs', ('destroy', sVolume));
228 if not fRc2:
229 fRc = fRc2
230 reporter.log('destroying volumes ends');
231
232 # Destroy the pool
233 reporter.log('destroying pools starts');
234 fRc2 = self.destroyPool(oExec, sPool);
235 reporter.log('destroying pools ends');
236 if not fRc2:
237 fRc = fRc2;
238 else:
239 fRc = False;
240 else:
241 fRc = False;
242 reporter.log('cleanupPoolsAndVolumes is finished');
243 return fRc;
244
245 def createRamDisk(self, oExec, cbRamDisk):
246 """
247 Creates a RAM backed disk with the given size.
248 """
249 oDisk = None;
250 sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
251 fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
252 if fRc:
253 self.idxRamDisk += 1;
254 oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
255
256 return oDisk;
257
258 def destroyRamDisk(self, oExec, oDisk):
259 """
260 Destroys the given ramdisk object.
261 """
262 sRamDiskName = os.path.basename(oDisk.getPath());
263 return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
264
265class StorageConfigOsLinux(StorageConfigOs):
266 """
267 Class implementing the Linux specifics for a storage configuration.
268 """
269
270 def __init__(self):
271 StorageConfigOs.__init__(self);
272 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
273 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
274
275 def _getDmRaidLevelFromLvl(self, sRaidLvl):
276 """
277 Converts our raid level indicators to something mdadm can understand.
278 """
279 if sRaidLvl is None or sRaidLvl == 'raid0':
280 return 'stripe';
281 if sRaidLvl == 'raid5':
282 return '5';
283 if sRaidLvl == 'raid1':
284 return 'mirror';
285 return 'stripe';
286
287 def getDisksMatchingRegExp(self, sRegExp):
288 """
289 Returns a list of disks matching the regular expression.
290 """
291 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
292
293 def getMntBase(self):
294 """
295 Returns the mountpoint base for the host.
296 """
297 return '/mnt';
298
299 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
300 """
301 Creates a new storage pool with the given disks and the given RAID level.
302 """
303 fRc = True;
304 if len(asDisks) == 1 and sRaidLvl is None:
305 # Doesn't require LVM, put into the simple pools dictionary so we can
306 # use it when creating a volume later.
307 self.dSimplePools[sPool] = asDisks[0];
308 else:
309 # If a RAID is required use dm-raid first to create one.
310 asLvmPvDisks = asDisks;
311 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
312 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
313 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
314 if fRc:
315 # /dev/md0 is the only block device to use for our volume group.
316 asLvmPvDisks = [ '/dev/md0' ];
317
318 # Create a physical volume on every disk first.
319 for sLvmPvDisk in asLvmPvDisks:
320 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
321 if not fRc:
322 break;
323
324 if fRc:
325 # Create volume group with all physical volumes included
326 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
327 return fRc;
328
329 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
330 """
331 Creates and mounts a filesystem at the given mountpoint using the
332 given pool and volume IDs.
333 """
334 fRc = True;
335 sBlkDev = None;
336 if sPool in self.dSimplePools:
337 sDiskPath = self.dSimplePools.get(sPool);
338 if sDiskPath.find('zram') != -1:
339 sBlkDev = sDiskPath;
340 else:
341 # Create a partition with the requested size
342 sFdiskScript = ';\n'; # Single partition filling everything
343 if cbVol is not None:
344 sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
345 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
346 sFdiskScript);
347 if fRc:
348 if sDiskPath.find('nvme') != -1:
349 sBlkDev = sDiskPath + 'p1';
350 else:
351 sBlkDev = sDiskPath + '1';
352 else:
353 if cbVol is None:
354 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
355 else:
356 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
357 if fRc:
358 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
359
360 if fRc is True and sBlkDev is not None:
361 # Create a filesystem and mount it
362 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
363 fRc = fRc and oExec.mkDir(sMountPoint);
364 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
365 if fRc:
366 self.dMounts[sPool + '/' + sVol] = sMountPoint;
367 return fRc;
368
369 def destroyVolume(self, oExec, sPool, sVol):
370 """
371 Destroys the given volume.
372 """
373 # Unmount first
374 sMountPoint = self.dMounts[sPool + '/' + sVol];
375 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
376 self.dMounts.pop(sPool + '/' + sVol);
377 oExec.rmDir(sMountPoint);
378 if sPool in self.dSimplePools:
379 # Wipe partition table
380 sDiskPath = self.dSimplePools.get(sPool);
381 if sDiskPath.find('zram') == -1:
382 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
383 sDiskPath));
384 else:
385 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
386 return fRc;
387
388 def destroyPool(self, oExec, sPool):
389 """
390 Destroys the given storage pool.
391 """
392 fRc = True;
393 if sPool in self.dSimplePools:
394 self.dSimplePools.pop(sPool);
395 else:
396 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
397 return fRc;
398
399 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
400 """
401 Cleans up any pools and volumes starting with the name in the given
402 parameters.
403 """
404 # @todo: Needs implementation, for LVM based configs a similar approach can be used
405 # as for Solaris.
406 _ = oExec;
407 _ = sPoolIdStart;
408 _ = sVolIdStart;
409 return True;
410
411 def createRamDisk(self, oExec, cbRamDisk):
412 """
413 Creates a RAM backed disk with the given size.
414 """
415 # Make sure the ZRAM module is loaded.
416 oDisk = None;
417 fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
418 if fRc:
419 fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
420 if fRc:
421 oDisk = StorageDisk(sOut.rstrip(), True);
422
423 return oDisk;
424
425 def destroyRamDisk(self, oExec, oDisk):
426 """
427 Destroys the given ramdisk object.
428 """
429 return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
430
431## @name Host disk config types.
432## @{
433g_ksDiskCfgStatic = 'StaticDir';
434g_ksDiskCfgRegExp = 'RegExp';
435g_ksDiskCfgList = 'DiskList';
436## @}
437
438class DiskCfg(object):
439 """
440 Host disk configuration.
441 """
442
443 def __init__(self, sTargetOs, sCfgType, oDisks):
444 self.sTargetOs = sTargetOs;
445 self.sCfgType = sCfgType;
446 self.oDisks = oDisks;
447
448 def getTargetOs(self):
449 return self.sTargetOs;
450
451 def getCfgType(self):
452 return self.sCfgType;
453
454 def isCfgStaticDir(self):
455 return self.sCfgType == g_ksDiskCfgStatic;
456
457 def isCfgRegExp(self):
458 return self.sCfgType == g_ksDiskCfgRegExp;
459
460 def isCfgList(self):
461 return self.sCfgType == g_ksDiskCfgList;
462
463 def getDisks(self):
464 return self.oDisks;
465
466class StorageCfg(object):
467 """
468 Storage configuration helper class taking care of the different host OS.
469 """
470
471 def __init__(self, oExec, oDiskCfg):
472 self.oExec = oExec;
473 self.lstDisks = [ ]; # List of disks present in the system.
474 self.dPools = { }; # Dictionary of storage pools.
475 self.dVols = { }; # Dictionary of volumes.
476 self.iPoolId = 0;
477 self.iVolId = 0;
478 self.oDiskCfg = oDiskCfg;
479
480 fRc = True;
481 oStorOs = None;
482 if oDiskCfg.getTargetOs() == 'solaris':
483 oStorOs = StorageConfigOsSolaris();
484 elif oDiskCfg.getTargetOs() == 'linux':
485 oStorOs = StorageConfigOsLinux(); # pylint: disable=redefined-variable-type
486 elif not oDiskCfg.isCfgStaticDir():
487 # For unknown hosts only allow a static testing directory we don't care about setting up
488 fRc = False;
489
490 if fRc:
491 self.oStorOs = oStorOs;
492 if oDiskCfg.isCfgRegExp():
493 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg.getDisks());
494 elif oDiskCfg.isCfgList():
495 # Assume a list of disks and add.
496 for sDisk in oDiskCfg.getDisks():
497 self.lstDisks.append(StorageDisk(sDisk));
498 elif oDiskCfg.isCfgStaticDir():
499 if not os.path.exists(oDiskCfg.getDisks()):
500 self.oExec.mkDir(oDiskCfg.getDisks(), 0o700);
501
502 def __del__(self):
503 self.cleanup();
504 self.oDiskCfg = None;
505
506 def cleanup(self):
507 """
508 Cleans up any created storage configs.
509 """
510
511 if not self.oDiskCfg.isCfgStaticDir():
512 # Destroy all volumes first.
513 for sMountPoint in list(self.dVols.keys()): # pylint: disable=consider-iterating-dictionary
514 self.destroyVolume(sMountPoint);
515
516 # Destroy all pools.
517 for sPool in list(self.dPools.keys()): # pylint: disable=consider-iterating-dictionary
518 self.destroyStoragePool(sPool);
519
520 self.dVols.clear();
521 self.dPools.clear();
522 self.iPoolId = 0;
523 self.iVolId = 0;
524
525 def getRawDisk(self):
526 """
527 Returns a raw disk device from the list of free devices for use.
528 """
529
530 for oDisk in self.lstDisks:
531 if oDisk.isUsed() is False:
532 oDisk.setUsed(True);
533 return oDisk.getPath();
534
535 return None;
536
537 def getUnusedDiskCount(self):
538 """
539 Returns the number of unused disks.
540 """
541
542 cDisksUnused = 0;
543 for oDisk in self.lstDisks:
544 if not oDisk.isUsed():
545 cDisksUnused += 1;
546
547 return cDisksUnused;
548
549 def createStoragePool(self, cDisks = 0, sRaidLvl = None,
550 cbPool = None, fRamDisk = False):
551 """
552 Create a new storage pool
553 """
554 lstDisks = [ ];
555 fRc = True;
556 sPool = None;
557
558 if not self.oDiskCfg.isCfgStaticDir():
559 if fRamDisk:
560 oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
561 if oDisk is not None:
562 lstDisks.append(oDisk);
563 cDisks = 1;
564 else:
565 if cDisks == 0:
566 cDisks = self.getUnusedDiskCount();
567
568 for oDisk in self.lstDisks:
569 if not oDisk.isUsed():
570 oDisk.setUsed(True);
571 lstDisks.append(oDisk);
572 if len(lstDisks) == cDisks:
573 break;
574
575 # Enough drives to satisfy the request?
576 if len(lstDisks) == cDisks:
577 # Create a list of all device paths
578 lstDiskPaths = [ ];
579 for oDisk in lstDisks:
580 lstDiskPaths.append(oDisk.getPath());
581
582 # Find a name for the pool
583 sPool = 'pool' + str(self.iPoolId);
584 self.iPoolId += 1;
585
586 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
587 if fRc:
588 self.dPools[sPool] = lstDisks;
589 else:
590 self.iPoolId -= 1;
591 else:
592 fRc = False;
593
594 # Cleanup in case of error.
595 if not fRc:
596 for oDisk in lstDisks:
597 oDisk.setUsed(False);
598 if oDisk.isRamDisk():
599 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
600 else:
601 sPool = 'StaticDummy';
602
603 return fRc, sPool;
604
605 def destroyStoragePool(self, sPool):
606 """
607 Destroys the storage pool with the given ID.
608 """
609
610 fRc = True;
611
612 if not self.oDiskCfg.isCfgStaticDir():
613 lstDisks = self.dPools.get(sPool);
614 if lstDisks is not None:
615 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
616 if fRc:
617 # Mark disks as unused
618 self.dPools.pop(sPool);
619 for oDisk in lstDisks:
620 oDisk.setUsed(False);
621 if oDisk.isRamDisk():
622 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
623 else:
624 fRc = False;
625
626 return fRc;
627
628 def createVolume(self, sPool, cbVol = None):
629 """
630 Creates a new volume from the given pool returning the mountpoint.
631 """
632
633 fRc = True;
634 sMountPoint = None;
635 if not self.oDiskCfg.isCfgStaticDir():
636 if sPool in self.dPools:
637 sVol = 'vol' + str(self.iVolId);
638 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
639 self.iVolId += 1;
640 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
641 if fRc:
642 self.dVols[sMountPoint] = (sVol, sPool);
643 else:
644 self.iVolId -= 1;
645 else:
646 fRc = False;
647 else:
648 sMountPoint = self.oDiskCfg.getDisks();
649
650 return fRc, sMountPoint;
651
652 def destroyVolume(self, sMountPoint):
653 """
654 Destroy the volume at the given mount point.
655 """
656
657 fRc = True;
658 if not self.oDiskCfg.isCfgStaticDir():
659 sVol, sPool = self.dVols.get(sMountPoint);
660 if sVol is not None:
661 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
662 if fRc:
663 self.dVols.pop(sMountPoint);
664 else:
665 fRc = False;
666
667 return fRc;
668
669 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
670 """
671 Creates a new directory on the volume pointed to by the given mount point.
672
673 Returns success status.
674 """
675 return self.oExec.mkDir(os.path.join(sMountPoint, sDir), fMode);
676
677 def cleanupLeftovers(self):
678 """
679 Tries to clean up any leftover pools and volumes from a failed previous run.
680 """
681 reporter.log('cleanupLeftovers starts');
682 if not self.oDiskCfg.isCfgStaticDir():
683 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
684
685 fRc = True;
686 asDisks = self.oDiskCfg.getDisks();
687 reporter.log("oDiskCfg.getDisks: %s" % asDisks);
688 if os.path.exists(asDisks):
689 for sEntry in os.listdir(asDisks):
690 fRc = fRc and self.oExec.rmTree(os.path.join(self.oDiskCfg.getDisks(), sEntry));
691 reporter.log('cleanupLeftovers ends');
692 return fRc;
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette