5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
26 from bisect import insort, bisect_left, bisect_right
32 # Commonly used command paths
33 PFCMD = "/usr/bin/pfexec"
34 ZFSCMD = "/usr/sbin/zfs"
35 ZPOOLCMD = "/usr/sbin/zpool"
38 class Datasets(Exception):
40 Container class for all zfs datasets. Maintains a centralised
41 list of datasets (generated on demand) and accessor methods.
42 Also allows clients to notify when a refresh might be necessary.
44 # Class wide instead of per-instance in order to avoid duplication
49 # Mutex locks to prevent concurrent writes to above class wide
51 _filesystemslock = threading.Lock()
52 _volumeslock = threading.Lock()
53 snapshotslock = threading.Lock()
55 def create_auto_snapshot_set(self, label, tag = None):
57 Create a complete set of snapshots as if this were
58 for a standard zfs-auto-snapshot operation.
62 A label to apply to the snapshot name. Cannot be None.
64 A string indicating one of the standard auto-snapshot schedules
65 tags to check (eg. "frequent" for will map to the tag:
66 com.sun:auto-snapshot:frequent). If specified as a zfs property
67 on a zfs dataset, the property corresponding to the tag will
68 override the wildcard property: "com.sun:auto-snapshot"
78 # Get auto-snap property in two passes. First with the schedule
79 # specific tag override value, then with the general property value
80 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
81 "-o", "name,com.sun:auto-snapshot", "-s", "name"]
83 overrideprop = "com.sun:auto-snapshot:" + tag
84 scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
85 "-o", "name," + overrideprop, "-s", "name"]
86 outdata,errdata = util.run_command(scmd)
87 for line in outdata.rstrip().split('\n'):
89 # Skip over unset values.
92 # Add to everything list. This is used later
93 # for identifying parents/children of a given
94 # filesystem or volume.
95 everything.append(line[0])
97 included.append(line[0])
98 elif line[1] == "false":
99 excluded.append(line[0])
100 # Now use the general property. If no value
101 # was set in the first pass, we set it here.
102 outdata,errdata = util.run_command(cmd)
103 for line in outdata.rstrip().split('\n'):
105 idx = bisect_right(everything, line[0])
106 if len(everything) == 0 or \
107 everything[idx-1] != line[0]:
108 # Dataset is neither included nor excluded so far
111 everything.insert(idx, line[0])
112 if line[1] == "true":
113 included.insert(0, line[0])
114 elif line[1] == "false":
115 excluded.append(line[0])
117 # Now figure out what can be recursively snapshotted and what
118 # must be singly snapshotted. Single snapshot restrictions apply
119 # to those datasets who have a child in the excluded list.
120 # 'included' is sorted in reverse alphabetical order.
121 for datasetname in included:
122 excludedchild = False
123 idx = bisect_right(everything, datasetname)
124 children = [name for name in everything[idx:] if \
125 name.find(datasetname) == 0]
126 for child in children:
127 idx = bisect_left(excluded, child)
128 if idx < len(excluded) and excluded[idx] == child:
130 single.append(datasetname)
132 if excludedchild == False:
133 # We want recursive list sorted in alphabetical order
134 # so insert instead of append to the list.
135 # Also, remove all children from the recursive
136 # list, as they are covered by the parent
137 recursive = [x for x in recursive if x not in children]
138 recursive.insert(0, datasetname)
140 for datasetname in recursive:
141 parts = datasetname.rsplit('/', 1)
143 if parent == datasetname:
144 # Root filesystem of the Zpool, so
145 # this can't be inherited and must be
147 finalrecursive.append(datasetname)
149 idx = bisect_right(recursive, parent)
150 if len(recursive) > 0 and \
151 recursive[idx-1] == parent:
152 # Parent already marked for recursive snapshot: so skip
155 finalrecursive.append(datasetname)
157 for name in finalrecursive:
158 dataset = ReadWritableDataset(name)
159 dataset.create_snapshot(label, True)
161 dataset = ReadWritableDataset(name)
162 dataset.create_snapshot(label, False)
164 def list_auto_snapshot_sets(self, tag = None):
166 Returns a list of zfs filesystems and volumes tagged with
167 the "com.sun:auto-snapshot" property set to "true", either
168 set locally or inherited. Snapshots are excluded from the
173 A string indicating one of the standard auto-snapshot schedules
174 tags to check (eg. "frequent" will map to the tag:
175 com.sun:auto-snapshot:frequent). If specified as a zfs property
176 on a zfs dataset, the property corresponding to the tag will
177 override the wildcard property: "com.sun:auto-snapshot"
180 #Get auto-snap property in two passes. First with the global
181 #value, then overriding with the label/schedule specific value
186 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
187 "-o", "name,com.sun:auto-snapshot", "-s", "name"]
189 overrideprop = "com.sun:auto-snapshot:" + tag
190 scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume",
191 "-o", "name," + overrideprop, "-s", "name"]
192 outdata,errdata = util.run_command(scmd)
193 for line in outdata.rstrip().split('\n'):
195 if line[1] == "true":
196 included.append(line[0])
197 elif line[1] == "false":
198 excluded.append(line[0])
199 outdata,errdata = util.run_command(cmd)
200 for line in outdata.rstrip().split('\n'):
202 # Only set values that aren't already set. Don't override
204 included.index(line[0])
208 excluded.index(line[0])
211 # Dataset is not listed in either list.
212 if line[1] == "true":
213 included.append(line[0])
216 def list_filesystems(self, pattern = None):
218 List pattern matching filesystems sorted by name.
221 pattern -- Filter according to pattern (default None)
224 # Need to first ensure no other thread is trying to
225 # build this list at the same time.
226 Datasets._filesystemslock.acquire()
227 if Datasets.filesystems == None:
228 Datasets.filesystems = []
229 cmd = [ZFSCMD, "list", "-H", "-t", "filesystem", \
230 "-o", "name,mountpoint", "-s", "name"]
232 outdata,errdata = util.run_command(cmd, True)
233 except OSError, message:
234 raise RuntimeError, "%s subprocess error:\n %s" % \
237 Datasets._filesystemslock.release()
238 raise RuntimeError, '%s failed with exit code %d\n%s' % \
239 (str(cmd), err, errdata)
240 for line in outdata.rstrip().split('\n'):
241 line = line.rstrip().split()
242 Datasets.filesystems.append([line[0], line[1]])
243 Datasets._filesystemslock.release()
246 filesystems = Datasets.filesystems[:]
248 # Regular expression pattern to match "pattern" parameter.
249 regexpattern = ".*%s.*" % pattern
250 patternobj = re.compile(regexpattern)
252 for fsname,fsmountpoint in Datasets.filesystems:
253 patternmatchobj = re.match(patternobj, fsname)
254 if patternmatchobj != None:
255 filesystems.append(fsname, fsmountpoint)
258 def list_volumes(self, pattern = None):
260 List pattern matching volumes sorted by name.
263 pattern -- Filter according to pattern (default None)
266 Datasets._volumeslock.acquire()
267 if Datasets.volumes == None:
268 Datasets.volumes = []
269 cmd = [ZFSCMD, "list", "-H", "-t", "volume", \
270 "-o", "name", "-s", "name"]
272 outdata,errdata = util.run_command(cmd, True)
273 except RuntimeError, message:
274 Datasets._volumeslock.release()
275 raise RuntimeError, str(message)
277 for line in outdata.rstrip().split('\n'):
278 Datasets.volumes.append(line.rstrip())
279 Datasets._volumeslock.release()
282 volumes = Datasets.volumes[:]
284 # Regular expression pattern to match "pattern" parameter.
285 regexpattern = ".*%s.*" % pattern
286 patternobj = re.compile(regexpattern)
288 for volname in Datasets.volumes:
289 patternmatchobj = re.match(patternobj, volname)
290 if patternmatchobj != None:
291 volumes.append(volname)
294 def list_snapshots(self, pattern = None):
296 List pattern matching snapshots sorted by creation date.
300 pattern -- Filter according to pattern (default None)
303 Datasets.snapshotslock.acquire()
304 if Datasets.snapshots == None:
305 Datasets.snapshots = []
307 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value,name", "creation"]
309 outdata,errdata = util.run_command(cmd, True)
310 except RuntimeError, message:
311 Datasets.snapshotslock.release()
312 raise RuntimeError, str(message)
313 for dataset in outdata.rstrip().split('\n'):
314 if re.search("@", dataset):
315 insort(snaps, dataset.split())
317 Datasets.snapshots.append([snap[1], long(snap[0])])
319 snapshots = Datasets.snapshots[:]
321 # Regular expression pattern to match "pattern" parameter.
322 regexpattern = ".*@.*%s" % pattern
323 patternobj = re.compile(regexpattern)
325 for snapname,snaptime in Datasets.snapshots:
326 patternmatchobj = re.match(patternobj, snapname)
327 if patternmatchobj != None:
328 snapshots.append([snapname, snaptime])
329 Datasets.snapshotslock.release()
332 def list_cloned_snapshots(self):
334 Returns a list of snapshots that have cloned filesystems
336 Snapshots with cloned filesystems can not be destroyed
337 unless dependent cloned filesystems are first destroyed.
339 cmd = [ZFSCMD, "list", "-H", "-o", "origin"]
340 outdata,errdata = util.run_command(cmd)
342 for line in outdata.rstrip().split('\n'):
343 details = line.rstrip()
346 result.index(details)
348 result.append(details)
351 def list_held_snapshots(self):
353 Returns a list of snapshots that have a "userrefs"
354 property value of greater than 0. Resul list is
355 sorted in order of creation time. Oldest listed first.
357 cmd = [ZFSCMD, "list", "-H",
360 "-o", "userrefs,name"]
361 outdata,errdata = util.run_command(cmd)
363 for line in outdata.rstrip().split('\n'):
364 details = line.split()
365 if details[0] != "0":
366 result.append(details[1])
369 def refresh_snapshots(self):
371 Should be called when snapshots have been created or deleted
372 and a rescan should be performed. Rescan gets deferred until
373 next invocation of zfs.Dataset.list_snapshots()
376 # This is a little sub-optimal because we should be able to modify
377 # the snapshot list in place in some situations and regenerate the
378 # snapshot list without calling out to zfs(1m). But on the
379 # pro side, we will pick up any new snapshots since the last
380 # scan that we would be otherwise unaware of.
381 Datasets.snapshotslock.acquire()
382 Datasets.snapshots = None
383 Datasets.snapshotslock.release()
388 Base class for ZFS storage pool objects
390 def __init__(self, name):
392 self.health = self.__get_health()
393 self.__datasets = Datasets()
394 self.__filesystems = None
395 self.__volumes = None
396 self.__snapshots = None
398 def __get_health(self):
400 Returns pool health status: 'ONLINE', 'DEGRADED' or 'FAULTED'
402 cmd = [ZPOOLCMD, "list", "-H", "-o", "health", self.name]
403 outdata,errdata = util.run_command(cmd)
404 result = outdata.rstrip()
407 def get_capacity(self):
409 Returns the percentage of total pool storage in use.
410 Calculated based on the "used" and "available" properties
411 of the pool's top-level filesystem because the values account
412 for reservations and quotas of children in their calculations,
413 giving a more practical indication of how much capacity is used
416 if self.health == "FAULTED":
417 raise ZPoolFaultedError("Can not determine capacity of zpool: %s" \
418 "because it is in a FAULTED state" \
421 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", \
422 "used,available", self.name]
423 outdata,errdata = util.run_command(cmd)
424 _used,_available = outdata.rstrip().split('\n')
426 available = float(_available)
427 return 100.0 * used/(used + available)
429 def get_available_size(self):
431 How much unused space is available for use on this Zpool.
434 # zpool(1) doesn't report available space in
435 # units suitable for calulations but zfs(1)
436 # can so use it to find the value for the
437 # filesystem matching the pool.
438 # The root filesystem of the pool is simply
440 poolfs = Filesystem(self.name)
441 avail = poolfs.get_available_size()
444 def get_used_size(self):
446 How much space is in use on this Zpool.
449 # Same as ZPool.get_available_size(): zpool(1)
450 # doesn't generate suitable out put so use
451 # zfs(1) on the toplevel filesystem
452 if self.health == "FAULTED":
453 raise ZPoolFaultedError("Can not determine used size of zpool: %s" \
454 "because it is in a FAULTED state" \
456 poolfs = Filesystem(self.name)
457 used = poolfs.get_used_size()
460 def list_filesystems(self):
462 Return a list of filesystems on this Zpool.
463 List is sorted by name.
465 if self.__filesystems == None:
467 # Provides pre-sorted filesystem list
468 for fsname,fsmountpoint in self.__datasets.list_filesystems():
469 if re.match(self.name, fsname):
470 result.append([fsname, fsmountpoint])
471 self.__filesystems = result
472 return self.__filesystems
474 def list_volumes(self):
476 Return a list of volumes (zvol) on this Zpool
477 List is sorted by name
479 if self.__volumes == None:
481 regexpattern = "^%s" % self.name
482 patternobj = re.compile(regexpattern)
483 for volname in self.__datasets.list_volumes():
484 patternmatchobj = re.match(patternobj, volname)
485 if patternmatchobj != None:
486 result.append(volname)
488 self.__volumes = result
489 return self.__volumes
491 def list_auto_snapshot_sets(self, tag = None):
493 Returns a list of zfs filesystems and volumes tagged with
494 the "com.sun:auto-snapshot" property set to "true", either
495 set locally or inherited. Snapshots are excluded from the
496 returned result. Results are not sorted.
500 A string indicating one of the standard auto-snapshot schedules
501 tags to check (eg. "frequent" will map to the tag:
502 com.sun:auto-snapshot:frequent). If specified as a zfs property
503 on a zfs dataset, the property corresponding to the tag will
504 override the wildcard property: "com.sun:auto-snapshot"
508 allsets = self.__datasets.list_auto_snapshot_sets(tag)
509 if len(allsets) == 0:
512 regexpattern = "^%s" % self.name
513 patternobj = re.compile(regexpattern)
514 for datasetname in allsets:
515 patternmatchobj = re.match(patternobj, datasetname)
516 if patternmatchobj != None:
517 result.append(datasetname)
520 def list_snapshots(self, pattern = None):
522 List pattern matching snapshots sorted by creation date.
526 pattern -- Filter according to pattern (default None)
528 # If there isn't a list of snapshots for this dataset
529 # already, create it now and store it in order to save
530 # time later for potential future invocations.
531 Datasets.snapshotslock.acquire()
532 if Datasets.snapshots == None:
533 self.__snapshots = None
534 Datasets.snapshotslock.release()
535 if self.__snapshots == None:
537 regexpattern = "^%s.*@" % self.name
538 patternobj = re.compile(regexpattern)
539 for snapname,snaptime in self.__datasets.list_snapshots():
540 patternmatchobj = re.match(patternobj, snapname)
541 if patternmatchobj != None:
542 result.append([snapname, snaptime])
543 # Results already sorted by creation time
544 self.__snapshots = result
546 return self.__snapshots
549 regexpattern = "^%s.*@.*%s" % (self.name, pattern)
550 patternobj = re.compile(regexpattern)
551 for snapname,snaptime in self.__snapshots:
552 patternmatchobj = re.match(patternobj, snapname)
553 if patternmatchobj != None:
554 snapshots.append([snapname, snaptime])
558 return_string = "ZPool name: " + self.name
559 return_string = return_string + "\n\tHealth: " + self.health
561 return_string = return_string + \
563 str(self.get_used_size()/BYTESPERMB) + "Mb"
564 return_string = return_string + \
565 "\n\tAvailable: " + \
566 str(self.get_available_size()/BYTESPERMB) + "Mb"
567 return_string = return_string + \
569 str(self.get_capacity()) + "%"
570 except ZPoolFaultedError:
575 class ReadableDataset:
577 Base class for Filesystem, Volume and Snapshot classes
578 Provides methods for read only operations common to all.
580 def __init__(self, name, creation = None):
582 self.__creationTime = creation
583 self.datasets = Datasets()
586 return_string = "ReadableDataset name: " + self.name + "\n"
589 def get_creation_time(self):
590 if self.__creationTime == None:
591 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation",
593 outdata,errdata = util.run_command(cmd)
594 self.__creationTime = long(outdata.rstrip())
595 return self.__creationTime
599 Returns True if the dataset is still existent on the system.
602 # Test existance of the dataset by checking the output of a
603 # simple zfs get command on the snapshot
604 cmd = [ZFSCMD, "get", "-H", "-o", "name", "type", self.name]
606 outdata,errdata = util.run_command(cmd)
607 except RuntimeError, message:
608 raise RuntimeError, str(message)
610 result = outdata.rstrip()
611 if result == self.name:
616 def get_used_size(self):
617 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "used", self.name]
618 outdata,errdata = util.run_command(cmd)
619 return long(outdata.rstrip())
621 def get_user_property(self, prop, local=False):
623 cmd = [ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop, self.name]
625 cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name]
626 outdata,errdata = util.run_command(cmd)
627 return outdata.rstrip()
629 def set_user_property(self, prop, value):
630 cmd = [ZFSCMD, "set", "%s=%s" % (prop, value), self.name]
631 outdata,errdata = util.run_command(cmd)
633 def unset_user_property(self, prop):
634 cmd = [ZFSCMD, "inherit", prop, self.name]
635 outdata,errdata = util.run_command(cmd)
637 class Snapshot(ReadableDataset):
639 ZFS Snapshot object class.
640 Provides information and operations specfic to ZFS snapshots
642 def __init__(self, name, creation = None):
645 name -- Name of the ZFS snapshot
646 creation -- Creation time of the snapshot if known (Default None)
648 ReadableDataset.__init__(self, name, creation)
649 self.fsname, self.snaplabel = self.__split_snapshot_name()
650 self.poolname = self.__get_pool_name()
652 def __get_pool_name(self):
653 name = self.fsname.split("/", 1)
656 def __split_snapshot_name(self):
657 name = self.name.split("@", 1)
658 # Make sure this is really a snapshot and not a
659 # filesystem otherwise a filesystem could get
660 # destroyed instead of a snapshot. That would be
662 if name[0] == self.name:
663 raise SnapshotError("\'%s\' is not a valid snapshot name" \
665 return name[0],name[1]
667 def get_referenced_size(self):
669 How much unique storage space is used by this snapshot.
672 cmd = [ZFSCMD, "get", "-H", "-p", \
673 "-o", "value", "referenced", \
675 outdata,errdata = util.run_command(cmd)
676 return long(outdata.rstrip())
678 def list_children(self):
679 """Returns a recursive list of child snapshots of this snapshot"""
681 "list", "-t", "snapshot", "-H", "-r", "-o", "name",
683 outdata,errdata = util.run_command(cmd)
685 for line in outdata.rstrip().split('\n'):
686 if re.search("@%s" % (self.snaplabel), line) and \
691 def has_clones(self):
692 """Returns True if the snapshot has any dependent clones"""
693 cmd = [ZFSCMD, "list", "-H", "-o", "origin,name"]
694 outdata,errdata = util.run_command(cmd)
695 for line in outdata.rstrip().split('\n'):
696 details = line.rstrip().split()
697 if details[0] == self.name and \
702 def destroy(self, deferred=True):
704 Permanently remove this snapshot from the filesystem
705 Performs deferred destruction by default.
707 # Be sure it genuninely exists before trying to destroy it
708 if self.exists() == False:
710 if deferred == False:
711 cmd = [ZFSCMD, "destroy", self.name]
713 cmd = [ZFSCMD, "destroy", "-d", self.name]
715 outdata,errdata = util.run_command(cmd)
716 # Clear the global snapshot cache so that a rescan will be
717 # triggered on the next call to Datasets.list_snapshots()
718 self.datasets.refresh_snapshots()
722 Place a hold on the snapshot with the specified "tag" string.
724 # FIXME - fails if hold is already held
725 # Be sure it genuninely exists before trying to place a hold
726 if self.exists() == False:
729 cmd = [ZFSCMD, "hold", tag, self.name]
730 outdata,errdata = util.run_command(cmd)
734 Returns a list of user hold tags for this snapshot
736 cmd = [ZFSCMD, "holds", self.name]
738 outdata,errdata = util.run_command(cmd)
740 for line in outdata.rstrip().split('\n'):
743 # The first line heading columns are NAME TAG TIMESTAMP
744 # Filter that line out.
746 if (line[0] != "NAME" and line[1] != "TAG"):
747 results.append(line[1])
750 def release(self, tag,):
752 Release the hold on the snapshot with the specified "tag" string.
754 # FIXME raises exception if no hold exists.
755 # Be sure it genuninely exists before trying to destroy it
756 if self.exists() == False:
759 cmd = [ZFSCMD, "release", tag, self.name]
761 outdata,errdata = util.run_command(cmd)
762 # Releasing the snapshot might cause it get automatically
764 # Clear the global snapshot cache so that a rescan will be
765 # triggered on the next call to Datasets.list_snapshots()
766 self.datasets.refresh_snapshots()
770 return_string = "Snapshot name: " + self.name
771 return_string = return_string + "\n\tCreation time: " \
772 + str(self.get_creation_time())
773 return_string = return_string + "\n\tUsed Size: " \
774 + str(self.get_used_size())
775 return_string = return_string + "\n\tReferenced Size: " \
776 + str(self.get_referenced_size())
780 class ReadWritableDataset(ReadableDataset):
782 Base class for ZFS filesystems and volumes.
783 Provides methods for operations and properties
784 common to both filesystems and volumes.
786 def __init__(self, name, creation = None):
787 ReadableDataset.__init__(self, name, creation)
788 self.__snapshots = None
791 return_string = "ReadWritableDataset name: " + self.name + "\n"
794 def get_auto_snap(self, schedule = None):
796 cmd = [ZFSCMD, "get", "-H", "-o", "value", \
797 "com.sun:auto-snapshot", self.name]
798 cmd = [ZFSCMD, "get", "-H", "-o", "value", \
799 "com.sun:auto-snapshot", self.name]
800 outdata,errdata = util.run_command(cmd)
801 if outdata.rstrip() == "true":
806 def get_available_size(self):
807 cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "available", \
809 outdata,errdata = util.run_command(cmd)
810 return long(outdata.rstrip())
812 def create_snapshot(self, snaplabel, recursive = False):
814 Create a snapshot for the ReadWritable dataset using the supplied
819 A string to use as the snapshot label.
820 The bit that comes after the "@" part of the snapshot
823 Recursively snapshot childfren of this dataset.
826 cmd = [ZFSCMD, "snapshot"]
827 if recursive == True:
829 cmd.append("%s@%s" % (self.name, snaplabel))
830 outdata,errdata = util.run_command(cmd, False)
833 self.datasets.refresh_snapshots()
835 def list_children(self):
837 # Note, if more dataset types ever come around they will
838 # need to be added to the filsystem,volume args below.
839 # Not for the forseeable future though.
840 cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem,volume",
841 "-o", "name", self.name]
842 outdata,errdata = util.run_command(cmd)
844 for line in outdata.rstrip().split('\n'):
845 if line.rstrip() != self.name:
846 result.append(line.rstrip())
850 def list_snapshots(self, pattern = None):
852 List pattern matching snapshots sorted by creation date.
856 pattern -- Filter according to pattern (default None)
858 # If there isn't a list of snapshots for this dataset
859 # already, create it now and store it in order to save
860 # time later for potential future invocations.
861 Datasets.snapshotslock.acquire()
862 if Datasets.snapshots == None:
863 self.__snapshots = None
864 Datasets.snapshotslock.release()
865 if self.__snapshots == None:
867 regexpattern = "^%s@" % self.name
868 patternobj = re.compile(regexpattern)
869 for snapname,snaptime in self.datasets.list_snapshots():
870 patternmatchobj = re.match(patternobj, snapname)
871 if patternmatchobj != None:
872 result.append([snapname, snaptime])
873 # Results already sorted by creation time
874 self.__snapshots = result
876 return self.__snapshots
879 regexpattern = "^%s@.*%s" % (self.name, pattern)
880 patternobj = re.compile(regexpattern)
881 for snapname,snaptime in self.__snapshots:
882 patternmatchobj = re.match(patternobj, snapname)
883 if patternmatchobj != None:
884 snapshots.append(snapname)
887 def set_auto_snap(self, include, inherit = False):
889 self.unset_user_property("com.sun:auto-snapshot")
895 self.set_user_property("com.sun:auto-snapshot", value)
900 class Filesystem(ReadWritableDataset):
901 """ZFS Filesystem class"""
902 def __init__(self, name, mountpoint = None):
903 ReadWritableDataset.__init__(self, name)
904 self.__mountpoint = mountpoint
907 return_string = "Filesystem name: " + self.name + \
908 "\n\tMountpoint: " + self.get_mountpoint() + \
909 "\n\tMounted: " + str(self.is_mounted()) + \
910 "\n\tAuto snap: " + str(self.get_auto_snap())
913 def get_mountpoint(self):
914 if (self.__mountpoint == None):
915 cmd = [ZFSCMD, "get", "-H", "-o", "value", "mountpoint", \
917 outdata,errdata = util.run_command(cmd)
918 result = outdata.rstrip()
919 self.__mountpoint = result
920 return self.__mountpoint
922 def is_mounted(self):
923 cmd = [ZFSCMD, "get", "-H", "-o", "value", "mounted", \
925 outdata,errdata = util.run_command(cmd)
926 result = outdata.rstrip()
932 def list_children(self):
933 cmd = [ZFSCMD, "list", "-H", "-r", "-t", "filesystem", "-o", "name",
935 outdata,errdata = util.run_command(cmd)
937 for line in outdata.rstrip().split('\n'):
938 if line.rstrip() != self.name:
939 result.append(line.rstrip())
943 class Volume(ReadWritableDataset):
946 This is basically just a stub and does nothing
947 unique from ReadWritableDataset parent class.
949 def __init__(self, name):
950 ReadWritableDataset.__init__(self, name)
953 return_string = "Volume name: " + self.name + "\n"
957 class ZFSError(Exception):
958 """Generic base class for ZPoolFaultedError and SnapshotError
961 msg -- explanation of the error
963 def __init__(self, msg):
966 return repr(self.msg)
969 class ZPoolFaultedError(ZFSError):
970 """Exception raised for queries made against ZPools that
971 are in a FAULTED state
974 msg -- explanation of the error
976 def __init__(self, msg):
977 ZFSError.__init__(self, msg)
980 class SnapshotError(ZFSError):
981 """Exception raised for invalid snapshot names provided to
982 Snapshot() constructor.
985 msg -- explanation of the error
987 def __init__(self, msg):
988 ZFSError.__init__(self, msg)
992 """Returns a list of all zpools on the system"""
994 cmd = [ZPOOLCMD, "list", "-H", "-o", "name"]
995 outdata,errdata = util.run_command(cmd)
996 for line in outdata.rstrip().split('\n'):
997 result.append(line.rstrip())
1001 if __name__ == "__main__":
1002 for zpool in list_zpools():
1005 for filesys,mountpoint in pool.list_filesystems():
1006 fs = Filesystem(filesys, mountpoint)
1008 print "\tSnapshots:"
1009 for snapshot, snaptime in fs.list_snapshots():
1010 snap = Snapshot(snapshot, snaptime)
1011 print "\t\t" + snap.name
1013 for volname in pool.list_volumes():
1014 vol = Volume(volname)
1016 print "\tSnapshots:"
1017 for snapshot, snaptime in vol.list_snapshots():
1018 snap = Snapshot(snapshot, snaptime)
1019 print "\t\t" + snap.name