5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
26 from bisect import insort, bisect_left, bisect_right
32 # Commonly used command paths
33 PFCMD = "/usr/bin/pfexec"
34 ZFSCMD = "/usr/sbin/zfs"
35 ZPOOLCMD = "/usr/sbin/zpool"
38 class Datasets(Exception):
40 Container class for all zfs datasets. Maintains a centralised
41 list of datasets (generated on demand) and accessor methods.
42 Also allows clients to notify when a refresh might be necessary.
44 # Class wide instead of per-instance in order to avoid duplication
49 # Mutex locks to prevent concurrent writes to above class wide
51 _filesystemslock = threading.Lock()
52 _volumeslock = threading.Lock()
53 snapshotslock = threading.Lock()
55 def create_auto_snapshot_set(self, label, tag = None):
57 Create a complete set of snapshots as if this were
58 for a standard zfs-auto-snapshot operation.
62 A label to apply to the snapshot name. Cannot be None.
64 A string indicating one of the standard auto-snapshot schedules
65 tags to check (eg. "frequent" for will map to the tag:
66 com.sun:auto-snapshot:frequent). If specified as a zfs property
67 on a zfs dataset, the property corresponding to the tag will
68 override the wildcard property: "com.sun:auto-snapshot"
78 # Get auto-snap property in two passes. First with the schedule
79 # specific tag override value, then with the general property value
80 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
81 "-o", "name,com.sun:auto-snapshot", "-s", "name"]
83 overrideprop = "com.sun:auto-snapshot:" + tag
84 scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
85 "-o", "name," + overrideprop, "-s", "name"]
86 outdata,errdata = util.run_command(scmd)
87 for line in outdata.rstrip().split('\n'):
89 # Skip over unset values.
92 # Add to everything list. This is used later
93 # for identifying parents/children of a given
94 # filesystem or volume.
95 everything.append(line[0])
97 included.append(line[0])
98 elif line[1] == "false":
99 excluded.append(line[0])
100 # Now use the general property. If no value
101 # was set in the first pass, we set it here.
102 outdata,errdata = util.run_command(cmd)
103 for line in outdata.rstrip().split('\n'):
105 idx = bisect_right(everything, line[0])
106 if len(everything) == 0 or \
107 everything[idx-1] != line[0]:
108 # Dataset is neither included nor excluded so far
111 everything.insert(idx, line[0])
112 if line[1] == "true":
113 included.insert(0, line[0])
114 elif line[1] == "false":
115 excluded.append(line[0])
117 # Now figure out what can be recursively snapshotted and what
118 # must be singly snapshotted. Single snapshot restrictions apply
119 # to those datasets who have a child in the excluded list.
120 # 'included' is sorted in reverse alphabetical order.
121 for datasetname in included:
122 excludedchild = False
123 idx = bisect_right(everything, datasetname)
124 children = [name for name in everything[idx:] if \
125 name.find(datasetname) == 0]
126 for child in children:
127 idx = bisect_left(excluded, child)
128 if idx < len(excluded) and excluded[idx] == child:
130 single.append(datasetname)
132 if excludedchild == False:
133 # We want recursive list sorted in alphabetical order
134 # so insert instead of append to the list.
135 # Also, remove all children from the recursive
136 # list, as they are covered by the parent
137 recursive = [x for x in recursive if x not in children]
138 recursive.insert(0, datasetname)
140 for datasetname in recursive:
141 parts = datasetname.rsplit('/', 1)
143 if parent == datasetname:
144 # Root filesystem of the Zpool, so
145 # this can't be inherited and must be
147 finalrecursive.append(datasetname)
149 idx = bisect_right(recursive, parent)
150 if len(recursive) > 0 and \
151 recursive[idx-1] == parent:
152 # Parent already marked for recursive snapshot: so skip
155 finalrecursive.append(datasetname)
157 for name in finalrecursive:
158 dataset = ReadWritableDataset(name)
159 dataset.create_snapshot(label, True)
161 dataset = ReadWritableDataset(name)
162 dataset.create_snapshot(label, False)
164 def list_auto_snapshot_sets(self, tag = None):
166 Returns a list of zfs filesystems and volumes tagged with
167 the "com.sun:auto-snapshot" property set to "true", either
168 set locally or inherited. Snapshots are excluded from the
173 A string indicating one of the standard auto-snapshot schedules
174 tags to check (eg. "frequent" will map to the tag:
175 com.sun:auto-snapshot:frequent). If specified as a zfs property
176 on a zfs dataset, the property corresponding to the tag will
177 override the wildcard property: "com.sun:auto-snapshot"
180 #Get auto-snap property in two passes. First with the global
181 #value, then overriding with the label/schedule specific value
186 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
187 "-o", "name,com.sun:auto-snapshot", "-s", "name"]
189 overrideprop = "com.sun:auto-snapshot:" + tag
190 scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
191 "-o", "name," + overrideprop, "-s", "name"]
192 outdata,errdata = util.run_command(scmd)
193 for line in outdata.rstrip().split('\n'):
195 if line[1] == "true":
196 included.append(line[0])
197 elif line[1] == "false":
198 excluded.append(line[0])
199 outdata,errdata = util.run_command(cmd)
200 for line in outdata.rstrip().split('\n'):
202 # Only set values that aren't already set. Don't override
204 included.index(line[0])
208 excluded.index(line[0])
211 # Dataset is not listed in either list.
212 if line[1] == "true":
213 included.append(line[0])
216 def list_filesystems(self, pattern = None):
218 List pattern matching filesystems sorted by name.
221 pattern -- Filter according to pattern (default None)
224 # Need to first ensure no other thread is trying to
225 # build this list at the same time.
226 Datasets._filesystemslock.acquire()
227 if Datasets.filesystems == None:
228 Datasets.filesystems = []
229 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem", \
230 "-o", "name,mountpoint", "-s", "name"]
232 p = subprocess.Popen(cmd,
233 stdout=subprocess.PIPE,
234 stderr=subprocess.PIPE,
236 outdata,errdata = p.communicate()
238 except OSError, message:
239 raise RuntimeError, "%s subprocess error:\n %s" % \
242 Datasets._filesystemslock.release()
243 raise RuntimeError, '%s failed with exit code %d\n%s' % \
244 (str(cmd), err, errdata)
245 for line in outdata.rstrip().split('\n'):
246 line = line.rstrip().split()
247 Datasets.filesystems.append([line[0], line[1]])
248 Datasets._filesystemslock.release()
251 filesystems = Datasets.filesystems[:]
253 # Regular expression pattern to match "pattern" parameter.
254 regexpattern = ".*%s.*" % pattern
255 patternobj = re.compile(regexpattern)
257 for fsname,fsmountpoint in Datasets.filesystems:
258 patternmatchobj = re.match(patternobj, fsname)
259 if patternmatchobj != None:
260 filesystems.append(fsname, fsmountpoint)
263 def list_volumes(self, pattern = None):
265 List pattern matching volumes sorted by name.
268 pattern -- Filter according to pattern (default None)
271 Datasets._volumeslock.acquire()
272 if Datasets.volumes == None:
273 Datasets.volumes = []
274 cmd = [ZFSCMD, "list", "-H", "-t", "volume", \
275 "-o", "name", "-s", "name"]
277 p = subprocess.Popen(cmd,
278 stdout=subprocess.PIPE,
279 stderr=subprocess.PIPE,
281 outdata,errdata = p.communicate()
283 except OSError, message:
284 raise RuntimeError, "%s subprocess error:\n %s" % \
287 Datasets._volumeslock.release()
288 raise RuntimeError, '%s failed with exit code %d\n%s' % \
289 (str(cmd), err, errdata)
290 for line in outdata.rstrip().split('\n'):
291 Datasets.volumes.append(line.rstrip())
292 Datasets._volumeslock.release()
295 volumes = Datasets.volumes[:]
297 # Regular expression pattern to match "pattern" parameter.
298 regexpattern = ".*%s.*" % pattern
299 patternobj = re.compile(regexpattern)
301 for volname in Datasets.volumes:
302 patternmatchobj = re.match(patternobj, volname)
303 if patternmatchobj != None:
304 volumes.append(volname)
307 def list_snapshots(self, pattern = None):
309 List pattern matching snapshots sorted by creation date.
313 pattern -- Filter according to pattern (default None)
316 Datasets.snapshotslock.acquire()
317 if Datasets.snapshots == None:
318 Datasets.snapshots = []
320 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
322 p = subprocess.Popen(cmd,
323 stdout=subprocess.PIPE,
324 stderr=subprocess.PIPE,
326 outdata,errdata = p.communicate()
328 except OSError, message:
329 Datasets.snapshotslock.release()
330 raise RuntimeError, "%s subprocess error:\n %s" % \
333 Datasets.snapshotslock.release()
334 raise RuntimeError, '%s failed with exit code %d\n%s' % \
335 (str(cmd), err, errdata)
336 for dataset in outdata.rstrip().split('\n'):
337 if re.search("@", dataset):
338 insort(snaps, dataset.split())
340 Datasets.snapshots.append([snap[1], long(snap[0])])
342 snapshots = Datasets.snapshots[:]
344 # Regular expression pattern to match "pattern" parameter.
345 regexpattern = ".*@.*%s" % pattern
346 patternobj = re.compile(regexpattern)
348 for snapname,snaptime in Datasets.snapshots:
349 patternmatchobj = re.match(patternobj, snapname)
350 if patternmatchobj != None:
351 snapshots.append([snapname, snaptime])
352 Datasets.snapshotslock.release()
355 def list_cloned_snapshots(self):
357 Returns a list of snapshots that have cloned filesystems
359 Snapshots with cloned filesystems can not be destroyed
360 unless dependent cloned filesystems are first destroyed.
362 cmd = [ZFSCMD, "list", "-H", "-o", "origin"]
363 outdata,errdata = util.run_command(cmd)
365 for line in outdata.rstrip().split('\n'):
366 details = line.rstrip()
369 result.index(details)
371 result.append(details)
374 def list_held_snapshots(self):
376 Returns a list of snapshots that have a "userrefs"
377 property value of greater than 0. Resul list is
378 sorted in order of creation time. Oldest listed first.
380 cmd = [ZFSCMD, "list", "-H",
383 "-o", "userrefs,name"]
384 outdata,errdata = util.run_command(cmd)
386 for line in outdata.rstrip().split('\n'):
387 details = line.split()
388 if details[0] != "0":
389 result.append(details[1])
392 def refresh_snapshots(self):
394 Should be called when snapshots have been created or deleted
395 and a rescan should be performed. Rescan gets deferred until
396 next invocation of zfs.Dataset.list_snapshots()
399 # This is a little sub-optimal because we should be able to modify
400 # the snapshot list in place in some situations and regenerate the
401 # snapshot list without calling out to zfs(1m). But on the
402 # pro side, we will pick up any new snapshots since the last
403 # scan that we would be otherwise unaware of.
404 Datasets.snapshotslock.acquire()
405 Datasets.snapshots = None
406 Datasets.snapshotslock.release()
411 Base class for ZFS storage pool objects
413 def __init__(self, name):
415 self.health = self.__get_health()
416 self.__datasets = Datasets()
417 self.__filesystems = None
418 self.__volumes = None
419 self.__snapshots = None
421 def __get_health(self):
423 Returns pool health status: 'ONLINE', 'DEGRADED' or 'FAULTED'
425 cmd = [ZPOOLCMD, "list", "-H", "-o", "health", self.name]
426 outdata,errdata = util.run_command(cmd)
427 result = outdata.rstrip()
430 def get_capacity(self):
432 Returns the percentage of total pool storage in use.
433 Calculated based on the "used" and "available" properties
434 of the pool's top-level filesystem because the values account
435 for reservations and quotas of children in their calculations,
436 giving a more practical indication of how much capacity is used
439 if self.health == "FAULTED":
440 raise ZPoolFaultedError("Can not determine capacity of zpool: %s" \
441 "because it is in a FAULTED state" \
444 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", \
445 "used,available", self.name]
446 outdata,errdata = util.run_command(cmd)
447 _used,_available = outdata.rstrip().split('\n')
449 available = float(_available)
450 return 100.0 * used/(used + available)
452 def get_available_size(self):
454 How much unused space is available for use on this Zpool.
457 # zpool(1) doesn't report available space in
458 # units suitable for calulations but zfs(1)
459 # can so use it to find the value for the
460 # filesystem matching the pool.
461 # The root filesystem of the pool is simply
463 poolfs = Filesystem(self.name)
464 avail = poolfs.get_available_size()
467 def get_used_size(self):
469 How much space is in use on this Zpool.
472 # Same as ZPool.get_available_size(): zpool(1)
473 # doesn't generate suitable out put so use
474 # zfs(1) on the toplevel filesystem
475 if self.health == "FAULTED":
476 raise ZPoolFaultedError("Can not determine used size of zpool: %s" \
477 "because it is in a FAULTED state" \
479 poolfs = Filesystem(self.name)
480 used = poolfs.get_used_size()
483 def list_filesystems(self):
485 Return a list of filesystems on this Zpool.
486 List is sorted by name.
488 if self.__filesystems == None:
490 # Provides pre-sorted filesystem list
491 for fsname,fsmountpoint in self.__datasets.list_filesystems():
492 if re.match(self.name, fsname):
493 result.append([fsname, fsmountpoint])
494 self.__filesystems = result
495 return self.__filesystems
497 def list_volumes(self):
499 Return a list of volumes (zvol) on this Zpool
500 List is sorted by name
502 if self.__volumes == None:
504 regexpattern = "^%s" % self.name
505 patternobj = re.compile(regexpattern)
506 for volname in self.__datasets.list_volumes():
507 patternmatchobj = re.match(patternobj, volname)
508 if patternmatchobj != None:
509 result.append(volname)
511 self.__volumes = result
512 return self.__volumes
514 def list_auto_snapshot_sets(self, tag = None):
516 Returns a list of zfs filesystems and volumes tagged with
517 the "com.sun:auto-snapshot" property set to "true", either
518 set locally or inherited. Snapshots are excluded from the
519 returned result. Results are not sorted.
523 A string indicating one of the standard auto-snapshot schedules
524 tags to check (eg. "frequent" will map to the tag:
525 com.sun:auto-snapshot:frequent). If specified as a zfs property
526 on a zfs dataset, the property corresponding to the tag will
527 override the wildcard property: "com.sun:auto-snapshot"
531 allsets = self.__datasets.list_auto_snapshot_sets(tag)
532 if len(allsets) == 0:
535 regexpattern = "^%s" % self.name
536 patternobj = re.compile(regexpattern)
537 for datasetname in allsets:
538 patternmatchobj = re.match(patternobj, datasetname)
539 if patternmatchobj != None:
540 result.append(datasetname)
543 def list_snapshots(self, pattern = None):
545 List pattern matching snapshots sorted by creation date.
549 pattern -- Filter according to pattern (default None)
551 # If there isn't a list of snapshots for this dataset
552 # already, create it now and store it in order to save
553 # time later for potential future invocations.
554 Datasets.snapshotslock.acquire()
555 if Datasets.snapshots == None:
556 self.__snapshots = None
557 Datasets.snapshotslock.release()
558 if self.__snapshots == None:
560 regexpattern = "^%s.*@" % self.name
561 patternobj = re.compile(regexpattern)
562 for snapname,snaptime in self.__datasets.list_snapshots():
563 patternmatchobj = re.match(patternobj, snapname)
564 if patternmatchobj != None:
565 result.append([snapname, snaptime])
566 # Results already sorted by creation time
567 self.__snapshots = result
569 return self.__snapshots
572 regexpattern = "^%s.*@.*%s" % (self.name, pattern)
573 patternobj = re.compile(regexpattern)
574 for snapname,snaptime in self.__snapshots:
575 patternmatchobj = re.match(patternobj, snapname)
576 if patternmatchobj != None:
577 snapshots.append([snapname, snaptime])
581 return_string = "ZPool name: " + self.name
582 return_string = return_string + "\n\tHealth: " + self.health
584 return_string = return_string + \
586 str(self.get_used_size()/BYTESPERMB) + "Mb"
587 return_string = return_string + \
588 "\n\tAvailable: " + \
589 str(self.get_available_size()/BYTESPERMB) + "Mb"
590 return_string = return_string + \
592 str(self.get_capacity()) + "%"
593 except ZPoolFaultedError:
598 class ReadableDataset:
600 Base class for Filesystem, Volume and Snapshot classes
601 Provides methods for read only operations common to all.
603 def __init__(self, name, creation = None):
605 self.__creationTime = creation
606 self.datasets = Datasets()
609 return_string = "ReadableDataset name: " + self.name + "\n"
612 def get_creation_time(self):
613 if self.__creationTime == None:
614 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
616 outdata,errdata = util.run_command(cmd)
617 self.__creationTime = long(outdata.rstrip())
618 return self.__creationTime
622 Returns True if the dataset is still existent on the system.
625 # Test existance of the dataset by checking the output of a
626 # simple zfs get command on the snapshot
627 cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
629 p = subprocess.Popen(cmd,
630 stdout=subprocess.PIPE,
631 stderr=subprocess.PIPE,
633 outdata,errdata = p.communicate()
635 except OSError, message:
636 raise RuntimeError, "%s subprocess error:\n %s" % \
637 (command, str(message))
642 result = outdata.rstrip()
643 if result == self.name:
648 def get_used_size(self):
649 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
650 outdata,errdata = util.run_command(cmd)
651 return long(outdata.rstrip())
653 def get_user_property(self, prop, local=False):
655 cmd = [ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop, self.name]
657 cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name]
658 outdata,errdata = util.run_command(cmd)
659 return outdata.rstrip()
661 def set_user_property(self, prop, value):
662 cmd = [PFCMD, ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
663 outdata,errdata = util.run_command(cmd)
665 def unset_user_property(self, prop):
666 cmd = [PFCMD, ZFSCMD, "inherit", prop, self.name]
667 outdata,errdata = util.run_command(cmd)
669 class Snapshot(ReadableDataset):
671 ZFS Snapshot object class.
672 Provides information and operations specfic to ZFS snapshots
674 def __init__(self, name, creation = None):
677 name -- Name of the ZFS snapshot
678 creation -- Creation time of the snapshot if known (Default None)
680 ReadableDataset.__init__(self, name, creation)
681 self.fsname, self.snaplabel = self.__split_snapshot_name()
682 self.poolname = self.__get_pool_name()
684 def __get_pool_name(self):
685 name = self.fsname.split("/", 1)
688 def __split_snapshot_name(self):
689 name = self.name.split("@", 1)
690 # Make sure this is really a snapshot and not a
691 # filesystem otherwise a filesystem could get
692 # destroyed instead of a snapshot. That would be
694 if name[0] == self.name:
695 raise SnapshotError("\'%s\' is not a valid snapshot name" \
697 return name[0],name[1]
699 def get_referenced_size(self):
701 How much unique storage space is used by this snapshot.
704 cmd = [ZFSCMD, "get", "-H", "-p", \
705 "-o", "value", "referenced", \
707 outdata,errdata = util.run_command(cmd)
708 return long(outdata.rstrip())
710 def list_children(self):
711 """Returns a recursive list of child snapshots of this snapshot"""
713 "list", "-t", "snapshot", "-H", "-r", "-o", "name",
715 outdata,errdata = util.run_command(cmd)
717 for line in outdata.rstrip().split('\n'):
718 if re.search("@%s" % (self.snaplabel), line) and \
723 def has_clones(self):
724 """Returns True if the snapshot has any dependent clones"""
725 cmd = [ZFSCMD, "list", "-H", "-o", "origin,name"]
726 outdata,errdata = util.run_command(cmd)
727 for line in outdata.rstrip().split('\n'):
728 details = line.rstrip().split()
729 if details[0] == self.name and \
734 def destroy(self, deferred=True):
736 Permanently remove this snapshot from the filesystem
737 Performs deferred destruction by default.
739 # Be sure it genuninely exists before trying to destroy it
740 if self.exists() == False:
742 if deferred == False:
743 cmd = [PFCMD, ZFSCMD, "destroy", self.name]
745 cmd = [PFCMD, ZFSCMD, "destroy", "-d", self.name]
747 outdata,errdata = util.run_command(cmd)
748 # Clear the global snapshot cache so that a rescan will be
749 # triggered on the next call to Datasets.list_snapshots()
750 self.datasets.refresh_snapshots()
754 Place a hold on the snapshot with the specified "tag" string.
756 # FIXME - fails if hold is already held
757 # Be sure it genuninely exists before trying to place a hold
758 if self.exists() == False:
761 cmd = [PFCMD, ZFSCMD, "hold", tag, self.name]
762 outdata,errdata = util.run_command(cmd)
766 Returns a list of user hold tags for this snapshot
768 cmd = [ZFSCMD, "holds", self.name]
770 outdata,errdata = util.run_command(cmd)
772 for line in outdata.rstrip().split('\n'):
775 # The first line heading columns are NAME TAG TIMESTAMP
776 # Filter that line out.
778 if (line[0] != "NAME" and line[1] != "TAG"):
779 results.append(line[1])
782 def release(self, tag,):
784 Release the hold on the snapshot with the specified "tag" string.
786 # FIXME raises exception if no hold exists.
787 # Be sure it genuninely exists before trying to destroy it
788 if self.exists() == False:
791 cmd = [PFCMD, ZFSCMD, "release", tag, self.name]
793 outdata,errdata = util.run_command(cmd)
794 # Releasing the snapshot might cause it get automatically
796 # Clear the global snapshot cache so that a rescan will be
797 # triggered on the next call to Datasets.list_snapshots()
798 self.datasets.refresh_snapshots()
802 return_string = "Snapshot name: " + self.name
803 return_string = return_string + "\n\tCreation time: " \
804 + str(self.get_creation_time())
805 return_string = return_string + "\n\tUsed Size: " \
806 + str(self.get_used_size())
807 return_string = return_string + "\n\tReferenced Size: " \
808 + str(self.get_referenced_size())
812 class ReadWritableDataset(ReadableDataset):
814 Base class for ZFS filesystems and volumes.
815 Provides methods for operations and properties
816 common to both filesystems and volumes.
818 def __init__(self, name, creation = None):
819 ReadableDataset.__init__(self, name, creation)
820 self.__snapshots = None
823 return_string = "ReadWritableDataset name: " + self.name + "\n"
826 def get_auto_snap(self, schedule = None):
828 cmd = [ZFSCMD, "get", "-H", "-o", "value", \
829 "com.sun:auto-snapshot", self.name]
830 cmd = [ZFSCMD, "get", "-H", "-o", "value", \
831 "com.sun:auto-snapshot", self.name]
832 outdata,errdata = util.run_command(cmd)
833 if outdata.rstrip() == "true":
838 def get_available_size(self):
839 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
841 outdata,errdata = util.run_command(cmd)
842 return long(outdata.rstrip())
844 def create_snapshot(self, snaplabel, recursive = False):
846 Create a snapshot for the ReadWritable dataset using the supplied
851 A string to use as the snapshot label.
852 The bit that comes after the "@" part of the snapshot
855 Recursively snapshot childfren of this dataset.
858 cmd = [PFCMD, ZFSCMD, "snapshot"]
859 if recursive == True:
861 cmd.append("%s@%s" % (self.name, snaplabel))
862 outdata,errdata = util.run_command(cmd, False)
865 self.datasets.refresh_snapshots()
867 def list_children(self):
869 # Note, if more dataset types ever come around they will
870 # need to be added to the filsystem,volume args below.
871 # Not for the forseeable future though.
872 cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem,volume",
873 "-o", "name", self.name]
874 outdata,errdata = util.run_command(cmd)
876 for line in outdata.rstrip().split('\n'):
877 if line.rstrip() != self.name:
878 result.append(line.rstrip())
882 def list_snapshots(self, pattern = None):
884 List pattern matching snapshots sorted by creation date.
888 pattern -- Filter according to pattern (default None)
890 # If there isn't a list of snapshots for this dataset
891 # already, create it now and store it in order to save
892 # time later for potential future invocations.
893 Datasets.snapshotslock.acquire()
894 if Datasets.snapshots == None:
895 self.__snapshots = None
896 Datasets.snapshotslock.release()
897 if self.__snapshots == None:
899 regexpattern = "^%s@" % self.name
900 patternobj = re.compile(regexpattern)
901 for snapname,snaptime in self.datasets.list_snapshots():
902 patternmatchobj = re.match(patternobj, snapname)
903 if patternmatchobj != None:
904 result.append([snapname, snaptime])
905 # Results already sorted by creation time
906 self.__snapshots = result
908 return self.__snapshots
911 regexpattern = "^%s@.*%s" % (self.name, pattern)
912 patternobj = re.compile(regexpattern)
913 for snapname,snaptime in self.__snapshots:
914 patternmatchobj = re.match(patternobj, snapname)
915 if patternmatchobj != None:
916 snapshots.append(snapname)
919 def set_auto_snap(self, include, inherit = False):
921 self.unset_user_property("com.sun:auto-snapshot")
927 self.set_user_property("com.sun:auto-snapshot", value)
932 class Filesystem(ReadWritableDataset):
933 """ZFS Filesystem class"""
934 def __init__(self, name, mountpoint = None):
935 ReadWritableDataset.__init__(self, name)
936 self.__mountpoint = mountpoint
939 return_string = "Filesystem name: " + self.name + \
940 "\n\tMountpoint: " + self.get_mountpoint() + \
941 "\n\tMounted: " + str(self.is_mounted()) + \
942 "\n\tAuto snap: " + str(self.get_auto_snap())
945 def get_mountpoint(self):
946 if (self.__mountpoint == None):
947 cmd = [ZFSCMD, "get", "-H", "-o", "value", "mountpoint", \
949 outdata,errdata = util.run_command(cmd)
950 result = outdata.rstrip()
951 self.__mountpoint = result
952 return self.__mountpoint
954 def is_mounted(self):
955 cmd = [ZFSCMD, "get", "-H", "-o", "value", "mounted", \
957 outdata,errdata = util.run_command(cmd)
958 result = outdata.rstrip()
964 def list_children(self):
965 cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem", "-o", "name",
967 outdata,errdata = util.run_command(cmd)
969 for line in outdata.rstrip().split('\n'):
970 if line.rstrip() != self.name:
971 result.append(line.rstrip())
975 class Volume(ReadWritableDataset):
978 This is basically just a stub and does nothing
979 unique from ReadWritableDataset parent class.
981 def __init__(self, name):
982 ReadWritableDataset.__init__(self, name)
985 return_string = "Volume name: " + self.name + "\n"
989 class ZFSError(Exception):
990 """Generic base class for ZPoolFaultedError and SnapshotError
993 msg -- explanation of the error
995 def __init__(self, msg):
998 return repr(self.msg)
1001 class ZPoolFaultedError(ZFSError):
1002 """Exception raised for queries made against ZPools that
1003 are in a FAULTED state
1006 msg -- explanation of the error
1008 def __init__(self, msg):
1009 ZFSError.__init__(self, msg)
1012 class SnapshotError(ZFSError):
1013 """Exception raised for invalid snapshot names provided to
1014 Snapshot() constructor.
1017 msg -- explanation of the error
1019 def __init__(self, msg):
1020 ZFSError.__init__(self, msg)
1024 """Returns a list of all zpools on the system"""
1026 cmd = [ZPOOLCMD, "list", "-H", "-o", "name"]
1027 outdata,errdata = util.run_command(cmd)
1028 for line in outdata.rstrip().split('\n'):
1029 result.append(line.rstrip())
1033 if __name__ == "__main__":
1034 for zpool in list_zpools():
1037 for filesys,mountpoint in pool.list_filesystems():
1038 fs = Filesystem(filesys, mountpoint)
1040 print "\tSnapshots:"
1041 for snapshot, snaptime in fs.list_snapshots():
1042 snap = Snapshot(snapshot, snaptime)
1043 print "\t\t" + snap.name
1045 for volname in pool.list_volumes():
1046 vol = Volume(volname)
1048 print "\tSnapshots:"
1049 for snapshot, snaptime in vol.list_snapshots():
1050 snap = Snapshot(snapshot, snaptime)
1051 print "\t\t" + snap.name